linux/kernel/trace/trace.c
<<
>>
Prefs
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally taken from the RT patch by:
   8 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code from the latency_tracer, that is:
  11 *  Copyright (C) 2004-2006 Ingo Molnar
  12 *  Copyright (C) 2004 Nadia Yvette Chambers
  13 */
  14#include <linux/ring_buffer.h>
  15#include <generated/utsrelease.h>
  16#include <linux/stacktrace.h>
  17#include <linux/writeback.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/notifier.h>
  21#include <linux/irqflags.h>
  22#include <linux/debugfs.h>
  23#include <linux/pagemap.h>
  24#include <linux/hardirq.h>
  25#include <linux/linkage.h>
  26#include <linux/uaccess.h>
  27#include <linux/kprobes.h>
  28#include <linux/ftrace.h>
  29#include <linux/module.h>
  30#include <linux/percpu.h>
  31#include <linux/splice.h>
  32#include <linux/kdebug.h>
  33#include <linux/string.h>
  34#include <linux/rwsem.h>
  35#include <linux/slab.h>
  36#include <linux/ctype.h>
  37#include <linux/init.h>
  38#include <linux/poll.h>
  39#include <linux/nmi.h>
  40#include <linux/fs.h>
  41#include <linux/sched/rt.h>
  42
  43#include "trace.h"
  44#include "trace_output.h"
  45
  46/*
  47 * On boot up, the ring buffer is set to the minimum size, so that
  48 * we do not waste memory on systems that are not using tracing.
  49 */
  50bool ring_buffer_expanded;
  51
  52/*
  53 * We need to change this state when a selftest is running.
  54 * A selftest will lurk into the ring-buffer to count the
  55 * entries inserted during the selftest although some concurrent
  56 * insertions into the ring-buffer such as trace_printk could occurred
  57 * at the same time, giving false positive or negative results.
  58 */
  59static bool __read_mostly tracing_selftest_running;
  60
  61/*
  62 * If a tracer is running, we do not want to run SELFTEST.
  63 */
  64bool __read_mostly tracing_selftest_disabled;
  65
  66/* Pipe tracepoints to printk */
  67struct trace_iterator *tracepoint_print_iter;
  68int tracepoint_printk;
  69
  70/* For tracers that don't implement custom flags */
  71static struct tracer_opt dummy_tracer_opt[] = {
  72        { }
  73};
  74
  75static struct tracer_flags dummy_tracer_flags = {
  76        .val = 0,
  77        .opts = dummy_tracer_opt
  78};
  79
  80static int
  81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  82{
  83        return 0;
  84}
  85
  86/*
  87 * To prevent the comm cache from being overwritten when no
  88 * tracing is active, only save the comm when a trace event
  89 * occurred.
  90 */
  91static DEFINE_PER_CPU(bool, trace_cmdline_save);
  92
  93/*
  94 * Kill all tracing for good (never come back).
  95 * It is initialized to 1 but will turn to zero if the initialization
  96 * of the tracer is successful. But that is the only place that sets
  97 * this back to zero.
  98 */
  99static int tracing_disabled = 1;
 100
 101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
 102
 103cpumask_var_t __read_mostly     tracing_buffer_mask;
 104
 105/*
 106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 107 *
 108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 109 * is set, then ftrace_dump is called. This will output the contents
 110 * of the ftrace buffers to the console.  This is very useful for
 111 * capturing traces that lead to crashes and outputing it to a
 112 * serial console.
 113 *
 114 * It is default off, but you can enable it with either specifying
 115 * "ftrace_dump_on_oops" in the kernel command line, or setting
 116 * /proc/sys/kernel/ftrace_dump_on_oops
 117 * Set 1 if you want to dump buffers of all CPUs
 118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 119 */
 120
 121enum ftrace_dump_mode ftrace_dump_on_oops;
 122
 123/* When set, tracing will stop when a WARN*() is hit */
 124int __disable_trace_on_warning;
 125
 126static int tracing_set_tracer(struct trace_array *tr, const char *buf);
 127
 128#define MAX_TRACER_SIZE         100
 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 130static char *default_bootup_tracer;
 131
 132static bool allocate_snapshot;
 133
 134static int __init set_cmdline_ftrace(char *str)
 135{
 136        strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 137        default_bootup_tracer = bootup_tracer_buf;
 138        /* We are using ftrace early, expand it */
 139        ring_buffer_expanded = true;
 140        return 1;
 141}
 142__setup("ftrace=", set_cmdline_ftrace);
 143
 144static int __init set_ftrace_dump_on_oops(char *str)
 145{
 146        if (*str++ != '=' || !*str) {
 147                ftrace_dump_on_oops = DUMP_ALL;
 148                return 1;
 149        }
 150
 151        if (!strcmp("orig_cpu", str)) {
 152                ftrace_dump_on_oops = DUMP_ORIG;
 153                return 1;
 154        }
 155
 156        return 0;
 157}
 158__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 159
 160static int __init stop_trace_on_warning(char *str)
 161{
 162        if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 163                __disable_trace_on_warning = 1;
 164        return 1;
 165}
 166__setup("traceoff_on_warning", stop_trace_on_warning);
 167
 168static int __init boot_alloc_snapshot(char *str)
 169{
 170        allocate_snapshot = true;
 171        /* We also need the main ring buffer expanded */
 172        ring_buffer_expanded = true;
 173        return 1;
 174}
 175__setup("alloc_snapshot", boot_alloc_snapshot);
 176
 177
 178static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 179static char *trace_boot_options __initdata;
 180
 181static int __init set_trace_boot_options(char *str)
 182{
 183        strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 184        trace_boot_options = trace_boot_options_buf;
 185        return 0;
 186}
 187__setup("trace_options=", set_trace_boot_options);
 188
 189static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
 190static char *trace_boot_clock __initdata;
 191
 192static int __init set_trace_boot_clock(char *str)
 193{
 194        strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
 195        trace_boot_clock = trace_boot_clock_buf;
 196        return 0;
 197}
 198__setup("trace_clock=", set_trace_boot_clock);
 199
 200static int __init set_tracepoint_printk(char *str)
 201{
 202        if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 203                tracepoint_printk = 1;
 204        return 1;
 205}
 206__setup("tp_printk", set_tracepoint_printk);
 207
 208unsigned long long ns2usecs(cycle_t nsec)
 209{
 210        nsec += 500;
 211        do_div(nsec, 1000);
 212        return nsec;
 213}
 214
 215/*
 216 * The global_trace is the descriptor that holds the tracing
 217 * buffers for the live tracing. For each CPU, it contains
 218 * a link list of pages that will store trace entries. The
 219 * page descriptor of the pages in the memory is used to hold
 220 * the link list by linking the lru item in the page descriptor
 221 * to each of the pages in the buffer per CPU.
 222 *
 223 * For each active CPU there is a data field that holds the
 224 * pages for the buffer for that CPU. Each CPU has the same number
 225 * of pages allocated for its buffer.
 226 */
 227static struct trace_array       global_trace;
 228
 229LIST_HEAD(ftrace_trace_arrays);
 230
 231int trace_array_get(struct trace_array *this_tr)
 232{
 233        struct trace_array *tr;
 234        int ret = -ENODEV;
 235
 236        mutex_lock(&trace_types_lock);
 237        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 238                if (tr == this_tr) {
 239                        tr->ref++;
 240                        ret = 0;
 241                        break;
 242                }
 243        }
 244        mutex_unlock(&trace_types_lock);
 245
 246        return ret;
 247}
 248
 249static void __trace_array_put(struct trace_array *this_tr)
 250{
 251        WARN_ON(!this_tr->ref);
 252        this_tr->ref--;
 253}
 254
 255void trace_array_put(struct trace_array *this_tr)
 256{
 257        mutex_lock(&trace_types_lock);
 258        __trace_array_put(this_tr);
 259        mutex_unlock(&trace_types_lock);
 260}
 261
 262int filter_check_discard(struct ftrace_event_file *file, void *rec,
 263                         struct ring_buffer *buffer,
 264                         struct ring_buffer_event *event)
 265{
 266        if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
 267            !filter_match_preds(file->filter, rec)) {
 268                ring_buffer_discard_commit(buffer, event);
 269                return 1;
 270        }
 271
 272        return 0;
 273}
 274EXPORT_SYMBOL_GPL(filter_check_discard);
 275
 276int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
 277                              struct ring_buffer *buffer,
 278                              struct ring_buffer_event *event)
 279{
 280        if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 281            !filter_match_preds(call->filter, rec)) {
 282                ring_buffer_discard_commit(buffer, event);
 283                return 1;
 284        }
 285
 286        return 0;
 287}
 288EXPORT_SYMBOL_GPL(call_filter_check_discard);
 289
 290static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 291{
 292        u64 ts;
 293
 294        /* Early boot up does not have a buffer yet */
 295        if (!buf->buffer)
 296                return trace_clock_local();
 297
 298        ts = ring_buffer_time_stamp(buf->buffer, cpu);
 299        ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 300
 301        return ts;
 302}
 303
 304cycle_t ftrace_now(int cpu)
 305{
 306        return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
 307}
 308
 309/**
 310 * tracing_is_enabled - Show if global_trace has been disabled
 311 *
 312 * Shows if the global trace has been enabled or not. It uses the
 313 * mirror flag "buffer_disabled" to be used in fast paths such as for
 314 * the irqsoff tracer. But it may be inaccurate due to races. If you
 315 * need to know the accurate state, use tracing_is_on() which is a little
 316 * slower, but accurate.
 317 */
 318int tracing_is_enabled(void)
 319{
 320        /*
 321         * For quick access (irqsoff uses this in fast path), just
 322         * return the mirror variable of the state of the ring buffer.
 323         * It's a little racy, but we don't really care.
 324         */
 325        smp_rmb();
 326        return !global_trace.buffer_disabled;
 327}
 328
 329/*
 330 * trace_buf_size is the size in bytes that is allocated
 331 * for a buffer. Note, the number of bytes is always rounded
 332 * to page size.
 333 *
 334 * This number is purposely set to a low number of 16384.
 335 * If the dump on oops happens, it will be much appreciated
 336 * to not have to wait for all that output. Anyway this can be
 337 * boot time and run time configurable.
 338 */
 339#define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
 340
 341static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 342
 343/* trace_types holds a link list of available tracers. */
 344static struct tracer            *trace_types __read_mostly;
 345
 346/*
 347 * trace_types_lock is used to protect the trace_types list.
 348 */
 349DEFINE_MUTEX(trace_types_lock);
 350
 351/*
 352 * serialize the access of the ring buffer
 353 *
 354 * ring buffer serializes readers, but it is low level protection.
 355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 356 * are not protected by ring buffer.
 357 *
 358 * The content of events may become garbage if we allow other process consumes
 359 * these events concurrently:
 360 *   A) the page of the consumed events may become a normal page
 361 *      (not reader page) in ring buffer, and this page will be rewrited
 362 *      by events producer.
 363 *   B) The page of the consumed events may become a page for splice_read,
 364 *      and this page will be returned to system.
 365 *
 366 * These primitives allow multi process access to different cpu ring buffer
 367 * concurrently.
 368 *
 369 * These primitives don't distinguish read-only and read-consume access.
 370 * Multi read-only access are also serialized.
 371 */
 372
 373#ifdef CONFIG_SMP
 374static DECLARE_RWSEM(all_cpu_access_lock);
 375static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 376
 377static inline void trace_access_lock(int cpu)
 378{
 379        if (cpu == RING_BUFFER_ALL_CPUS) {
 380                /* gain it for accessing the whole ring buffer. */
 381                down_write(&all_cpu_access_lock);
 382        } else {
 383                /* gain it for accessing a cpu ring buffer. */
 384
 385                /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
 386                down_read(&all_cpu_access_lock);
 387
 388                /* Secondly block other access to this @cpu ring buffer. */
 389                mutex_lock(&per_cpu(cpu_access_lock, cpu));
 390        }
 391}
 392
 393static inline void trace_access_unlock(int cpu)
 394{
 395        if (cpu == RING_BUFFER_ALL_CPUS) {
 396                up_write(&all_cpu_access_lock);
 397        } else {
 398                mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 399                up_read(&all_cpu_access_lock);
 400        }
 401}
 402
 403static inline void trace_access_lock_init(void)
 404{
 405        int cpu;
 406
 407        for_each_possible_cpu(cpu)
 408                mutex_init(&per_cpu(cpu_access_lock, cpu));
 409}
 410
 411#else
 412
 413static DEFINE_MUTEX(access_lock);
 414
 415static inline void trace_access_lock(int cpu)
 416{
 417        (void)cpu;
 418        mutex_lock(&access_lock);
 419}
 420
 421static inline void trace_access_unlock(int cpu)
 422{
 423        (void)cpu;
 424        mutex_unlock(&access_lock);
 425}
 426
 427static inline void trace_access_lock_init(void)
 428{
 429}
 430
 431#endif
 432
 433/* trace_flags holds trace_options default values */
 434unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 435        TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
 436        TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
 437        TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
 438
 439static void tracer_tracing_on(struct trace_array *tr)
 440{
 441        if (tr->trace_buffer.buffer)
 442                ring_buffer_record_on(tr->trace_buffer.buffer);
 443        /*
 444         * This flag is looked at when buffers haven't been allocated
 445         * yet, or by some tracers (like irqsoff), that just want to
 446         * know if the ring buffer has been disabled, but it can handle
 447         * races of where it gets disabled but we still do a record.
 448         * As the check is in the fast path of the tracers, it is more
 449         * important to be fast than accurate.
 450         */
 451        tr->buffer_disabled = 0;
 452        /* Make the flag seen by readers */
 453        smp_wmb();
 454}
 455
 456/**
 457 * tracing_on - enable tracing buffers
 458 *
 459 * This function enables tracing buffers that may have been
 460 * disabled with tracing_off.
 461 */
 462void tracing_on(void)
 463{
 464        tracer_tracing_on(&global_trace);
 465}
 466EXPORT_SYMBOL_GPL(tracing_on);
 467
 468/**
 469 * __trace_puts - write a constant string into the trace buffer.
 470 * @ip:    The address of the caller
 471 * @str:   The constant string to write
 472 * @size:  The size of the string.
 473 */
 474int __trace_puts(unsigned long ip, const char *str, int size)
 475{
 476        struct ring_buffer_event *event;
 477        struct ring_buffer *buffer;
 478        struct print_entry *entry;
 479        unsigned long irq_flags;
 480        int alloc;
 481        int pc;
 482
 483        if (!(trace_flags & TRACE_ITER_PRINTK))
 484                return 0;
 485
 486        pc = preempt_count();
 487
 488        if (unlikely(tracing_selftest_running || tracing_disabled))
 489                return 0;
 490
 491        alloc = sizeof(*entry) + size + 2; /* possible \n added */
 492
 493        local_save_flags(irq_flags);
 494        buffer = global_trace.trace_buffer.buffer;
 495        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 496                                          irq_flags, pc);
 497        if (!event)
 498                return 0;
 499
 500        entry = ring_buffer_event_data(event);
 501        entry->ip = ip;
 502
 503        memcpy(&entry->buf, str, size);
 504
 505        /* Add a newline if necessary */
 506        if (entry->buf[size - 1] != '\n') {
 507                entry->buf[size] = '\n';
 508                entry->buf[size + 1] = '\0';
 509        } else
 510                entry->buf[size] = '\0';
 511
 512        __buffer_unlock_commit(buffer, event);
 513        ftrace_trace_stack(buffer, irq_flags, 4, pc);
 514
 515        return size;
 516}
 517EXPORT_SYMBOL_GPL(__trace_puts);
 518
 519/**
 520 * __trace_bputs - write the pointer to a constant string into trace buffer
 521 * @ip:    The address of the caller
 522 * @str:   The constant string to write to the buffer to
 523 */
 524int __trace_bputs(unsigned long ip, const char *str)
 525{
 526        struct ring_buffer_event *event;
 527        struct ring_buffer *buffer;
 528        struct bputs_entry *entry;
 529        unsigned long irq_flags;
 530        int size = sizeof(struct bputs_entry);
 531        int pc;
 532
 533        if (!(trace_flags & TRACE_ITER_PRINTK))
 534                return 0;
 535
 536        pc = preempt_count();
 537
 538        if (unlikely(tracing_selftest_running || tracing_disabled))
 539                return 0;
 540
 541        local_save_flags(irq_flags);
 542        buffer = global_trace.trace_buffer.buffer;
 543        event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 544                                          irq_flags, pc);
 545        if (!event)
 546                return 0;
 547
 548        entry = ring_buffer_event_data(event);
 549        entry->ip                       = ip;
 550        entry->str                      = str;
 551
 552        __buffer_unlock_commit(buffer, event);
 553        ftrace_trace_stack(buffer, irq_flags, 4, pc);
 554
 555        return 1;
 556}
 557EXPORT_SYMBOL_GPL(__trace_bputs);
 558
 559#ifdef CONFIG_TRACER_SNAPSHOT
 560/**
 561 * trace_snapshot - take a snapshot of the current buffer.
 562 *
 563 * This causes a swap between the snapshot buffer and the current live
 564 * tracing buffer. You can use this to take snapshots of the live
 565 * trace when some condition is triggered, but continue to trace.
 566 *
 567 * Note, make sure to allocate the snapshot with either
 568 * a tracing_snapshot_alloc(), or by doing it manually
 569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 570 *
 571 * If the snapshot buffer is not allocated, it will stop tracing.
 572 * Basically making a permanent snapshot.
 573 */
 574void tracing_snapshot(void)
 575{
 576        struct trace_array *tr = &global_trace;
 577        struct tracer *tracer = tr->current_trace;
 578        unsigned long flags;
 579
 580        if (in_nmi()) {
 581                internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 582                internal_trace_puts("*** snapshot is being ignored        ***\n");
 583                return;
 584        }
 585
 586        if (!tr->allocated_snapshot) {
 587                internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 588                internal_trace_puts("*** stopping trace here!   ***\n");
 589                tracing_off();
 590                return;
 591        }
 592
 593        /* Note, snapshot can not be used when the tracer uses it */
 594        if (tracer->use_max_tr) {
 595                internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 596                internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 597                return;
 598        }
 599
 600        local_irq_save(flags);
 601        update_max_tr(tr, current, smp_processor_id());
 602        local_irq_restore(flags);
 603}
 604EXPORT_SYMBOL_GPL(tracing_snapshot);
 605
 606static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
 607                                        struct trace_buffer *size_buf, int cpu_id);
 608static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 609
 610static int alloc_snapshot(struct trace_array *tr)
 611{
 612        int ret;
 613
 614        if (!tr->allocated_snapshot) {
 615
 616                /* allocate spare buffer */
 617                ret = resize_buffer_duplicate_size(&tr->max_buffer,
 618                                   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
 619                if (ret < 0)
 620                        return ret;
 621
 622                tr->allocated_snapshot = true;
 623        }
 624
 625        return 0;
 626}
 627
 628static void free_snapshot(struct trace_array *tr)
 629{
 630        /*
 631         * We don't free the ring buffer. instead, resize it because
 632         * The max_tr ring buffer has some state (e.g. ring->clock) and
 633         * we want preserve it.
 634         */
 635        ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 636        set_buffer_entries(&tr->max_buffer, 1);
 637        tracing_reset_online_cpus(&tr->max_buffer);
 638        tr->allocated_snapshot = false;
 639}
 640
 641/**
 642 * tracing_alloc_snapshot - allocate snapshot buffer.
 643 *
 644 * This only allocates the snapshot buffer if it isn't already
 645 * allocated - it doesn't also take a snapshot.
 646 *
 647 * This is meant to be used in cases where the snapshot buffer needs
 648 * to be set up for events that can't sleep but need to be able to
 649 * trigger a snapshot.
 650 */
 651int tracing_alloc_snapshot(void)
 652{
 653        struct trace_array *tr = &global_trace;
 654        int ret;
 655
 656        ret = alloc_snapshot(tr);
 657        WARN_ON(ret < 0);
 658
 659        return ret;
 660}
 661EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 662
 663/**
 664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 665 *
 666 * This is similar to trace_snapshot(), but it will allocate the
 667 * snapshot buffer if it isn't already allocated. Use this only
 668 * where it is safe to sleep, as the allocation may sleep.
 669 *
 670 * This causes a swap between the snapshot buffer and the current live
 671 * tracing buffer. You can use this to take snapshots of the live
 672 * trace when some condition is triggered, but continue to trace.
 673 */
 674void tracing_snapshot_alloc(void)
 675{
 676        int ret;
 677
 678        ret = tracing_alloc_snapshot();
 679        if (ret < 0)
 680                return;
 681
 682        tracing_snapshot();
 683}
 684EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 685#else
 686void tracing_snapshot(void)
 687{
 688        WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 689}
 690EXPORT_SYMBOL_GPL(tracing_snapshot);
 691int tracing_alloc_snapshot(void)
 692{
 693        WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 694        return -ENODEV;
 695}
 696EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 697void tracing_snapshot_alloc(void)
 698{
 699        /* Give warning */
 700        tracing_snapshot();
 701}
 702EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 703#endif /* CONFIG_TRACER_SNAPSHOT */
 704
 705static void tracer_tracing_off(struct trace_array *tr)
 706{
 707        if (tr->trace_buffer.buffer)
 708                ring_buffer_record_off(tr->trace_buffer.buffer);
 709        /*
 710         * This flag is looked at when buffers haven't been allocated
 711         * yet, or by some tracers (like irqsoff), that just want to
 712         * know if the ring buffer has been disabled, but it can handle
 713         * races of where it gets disabled but we still do a record.
 714         * As the check is in the fast path of the tracers, it is more
 715         * important to be fast than accurate.
 716         */
 717        tr->buffer_disabled = 1;
 718        /* Make the flag seen by readers */
 719        smp_wmb();
 720}
 721
 722/**
 723 * tracing_off - turn off tracing buffers
 724 *
 725 * This function stops the tracing buffers from recording data.
 726 * It does not disable any overhead the tracers themselves may
 727 * be causing. This function simply causes all recording to
 728 * the ring buffers to fail.
 729 */
 730void tracing_off(void)
 731{
 732        tracer_tracing_off(&global_trace);
 733}
 734EXPORT_SYMBOL_GPL(tracing_off);
 735
 736void disable_trace_on_warning(void)
 737{
 738        if (__disable_trace_on_warning)
 739                tracing_off();
 740}
 741
 742/**
 743 * tracer_tracing_is_on - show real state of ring buffer enabled
 744 * @tr : the trace array to know if ring buffer is enabled
 745 *
 746 * Shows real state of the ring buffer if it is enabled or not.
 747 */
 748static int tracer_tracing_is_on(struct trace_array *tr)
 749{
 750        if (tr->trace_buffer.buffer)
 751                return ring_buffer_record_is_on(tr->trace_buffer.buffer);
 752        return !tr->buffer_disabled;
 753}
 754
 755/**
 756 * tracing_is_on - show state of ring buffers enabled
 757 */
 758int tracing_is_on(void)
 759{
 760        return tracer_tracing_is_on(&global_trace);
 761}
 762EXPORT_SYMBOL_GPL(tracing_is_on);
 763
 764static int __init set_buf_size(char *str)
 765{
 766        unsigned long buf_size;
 767
 768        if (!str)
 769                return 0;
 770        buf_size = memparse(str, &str);
 771        /* nr_entries can not be zero */
 772        if (buf_size == 0)
 773                return 0;
 774        trace_buf_size = buf_size;
 775        return 1;
 776}
 777__setup("trace_buf_size=", set_buf_size);
 778
 779static int __init set_tracing_thresh(char *str)
 780{
 781        unsigned long threshold;
 782        int ret;
 783
 784        if (!str)
 785                return 0;
 786        ret = kstrtoul(str, 0, &threshold);
 787        if (ret < 0)
 788                return 0;
 789        tracing_thresh = threshold * 1000;
 790        return 1;
 791}
 792__setup("tracing_thresh=", set_tracing_thresh);
 793
 794unsigned long nsecs_to_usecs(unsigned long nsecs)
 795{
 796        return nsecs / 1000;
 797}
 798
 799/* These must match the bit postions in trace_iterator_flags */
 800static const char *trace_options[] = {
 801        "print-parent",
 802        "sym-offset",
 803        "sym-addr",
 804        "verbose",
 805        "raw",
 806        "hex",
 807        "bin",
 808        "block",
 809        "stacktrace",
 810        "trace_printk",
 811        "ftrace_preempt",
 812        "branch",
 813        "annotate",
 814        "userstacktrace",
 815        "sym-userobj",
 816        "printk-msg-only",
 817        "context-info",
 818        "latency-format",
 819        "sleep-time",
 820        "graph-time",
 821        "record-cmd",
 822        "overwrite",
 823        "disable_on_free",
 824        "irq-info",
 825        "markers",
 826        "function-trace",
 827        NULL
 828};
 829
 830static struct {
 831        u64 (*func)(void);
 832        const char *name;
 833        int in_ns;              /* is this clock in nanoseconds? */
 834} trace_clocks[] = {
 835        { trace_clock_local,            "local",        1 },
 836        { trace_clock_global,           "global",       1 },
 837        { trace_clock_counter,          "counter",      0 },
 838        { trace_clock_jiffies,          "uptime",       0 },
 839        { trace_clock,                  "perf",         1 },
 840        { ktime_get_mono_fast_ns,       "mono",         1 },
 841        ARCH_TRACE_CLOCKS
 842};
 843
 844/*
 845 * trace_parser_get_init - gets the buffer for trace parser
 846 */
 847int trace_parser_get_init(struct trace_parser *parser, int size)
 848{
 849        memset(parser, 0, sizeof(*parser));
 850
 851        parser->buffer = kmalloc(size, GFP_KERNEL);
 852        if (!parser->buffer)
 853                return 1;
 854
 855        parser->size = size;
 856        return 0;
 857}
 858
 859/*
 860 * trace_parser_put - frees the buffer for trace parser
 861 */
 862void trace_parser_put(struct trace_parser *parser)
 863{
 864        kfree(parser->buffer);
 865}
 866
 867/*
 868 * trace_get_user - reads the user input string separated by  space
 869 * (matched by isspace(ch))
 870 *
 871 * For each string found the 'struct trace_parser' is updated,
 872 * and the function returns.
 873 *
 874 * Returns number of bytes read.
 875 *
 876 * See kernel/trace/trace.h for 'struct trace_parser' details.
 877 */
 878int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 879        size_t cnt, loff_t *ppos)
 880{
 881        char ch;
 882        size_t read = 0;
 883        ssize_t ret;
 884
 885        if (!*ppos)
 886                trace_parser_clear(parser);
 887
 888        ret = get_user(ch, ubuf++);
 889        if (ret)
 890                goto out;
 891
 892        read++;
 893        cnt--;
 894
 895        /*
 896         * The parser is not finished with the last write,
 897         * continue reading the user input without skipping spaces.
 898         */
 899        if (!parser->cont) {
 900                /* skip white space */
 901                while (cnt && isspace(ch)) {
 902                        ret = get_user(ch, ubuf++);
 903                        if (ret)
 904                                goto out;
 905                        read++;
 906                        cnt--;
 907                }
 908
 909                /* only spaces were written */
 910                if (isspace(ch)) {
 911                        *ppos += read;
 912                        ret = read;
 913                        goto out;
 914                }
 915
 916                parser->idx = 0;
 917        }
 918
 919        /* read the non-space input */
 920        while (cnt && !isspace(ch)) {
 921                if (parser->idx < parser->size - 1)
 922                        parser->buffer[parser->idx++] = ch;
 923                else {
 924                        ret = -EINVAL;
 925                        goto out;
 926                }
 927                ret = get_user(ch, ubuf++);
 928                if (ret)
 929                        goto out;
 930                read++;
 931                cnt--;
 932        }
 933
 934        /* We either got finished input or we have to wait for another call. */
 935        if (isspace(ch)) {
 936                parser->buffer[parser->idx] = 0;
 937                parser->cont = false;
 938        } else if (parser->idx < parser->size - 1) {
 939                parser->cont = true;
 940                parser->buffer[parser->idx++] = ch;
 941        } else {
 942                ret = -EINVAL;
 943                goto out;
 944        }
 945
 946        *ppos += read;
 947        ret = read;
 948
 949out:
 950        return ret;
 951}
 952
 953/* TODO add a seq_buf_to_buffer() */
 954static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 955{
 956        int len;
 957
 958        if (trace_seq_used(s) <= s->seq.readpos)
 959                return -EBUSY;
 960
 961        len = trace_seq_used(s) - s->seq.readpos;
 962        if (cnt > len)
 963                cnt = len;
 964        memcpy(buf, s->buffer + s->seq.readpos, cnt);
 965
 966        s->seq.readpos += cnt;
 967        return cnt;
 968}
 969
 970unsigned long __read_mostly     tracing_thresh;
 971
 972#ifdef CONFIG_TRACER_MAX_TRACE
 973/*
 974 * Copy the new maximum trace into the separate maximum-trace
 975 * structure. (this way the maximum trace is permanently saved,
 976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 977 */
 978static void
 979__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 980{
 981        struct trace_buffer *trace_buf = &tr->trace_buffer;
 982        struct trace_buffer *max_buf = &tr->max_buffer;
 983        struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 984        struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 985
 986        max_buf->cpu = cpu;
 987        max_buf->time_start = data->preempt_timestamp;
 988
 989        max_data->saved_latency = tr->max_latency;
 990        max_data->critical_start = data->critical_start;
 991        max_data->critical_end = data->critical_end;
 992
 993        memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
 994        max_data->pid = tsk->pid;
 995        /*
 996         * If tsk == current, then use current_uid(), as that does not use
 997         * RCU. The irq tracer can be called out of RCU scope.
 998         */
 999        if (tsk == current)
1000                max_data->uid = current_uid();
1001        else
1002                max_data->uid = task_uid(tsk);
1003
1004        max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005        max_data->policy = tsk->policy;
1006        max_data->rt_priority = tsk->rt_priority;
1007
1008        /* record this tasks comm */
1009        tracing_record_cmdline(tsk);
1010}
1011
1012/**
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tr: tracer
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1017 *
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1020 */
1021void
1022update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023{
1024        struct ring_buffer *buf;
1025
1026        if (tr->stop_count)
1027                return;
1028
1029        WARN_ON_ONCE(!irqs_disabled());
1030
1031        if (!tr->allocated_snapshot) {
1032                /* Only the nop tracer should hit this when disabling */
1033                WARN_ON_ONCE(tr->current_trace != &nop_trace);
1034                return;
1035        }
1036
1037        arch_spin_lock(&tr->max_lock);
1038
1039        buf = tr->trace_buffer.buffer;
1040        tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041        tr->max_buffer.buffer = buf;
1042
1043        __update_max_tr(tr, tsk, cpu);
1044        arch_spin_unlock(&tr->max_lock);
1045}
1046
1047/**
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tr - tracer
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
1052 *
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1054 */
1055void
1056update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057{
1058        int ret;
1059
1060        if (tr->stop_count)
1061                return;
1062
1063        WARN_ON_ONCE(!irqs_disabled());
1064        if (!tr->allocated_snapshot) {
1065                /* Only the nop tracer should hit this when disabling */
1066                WARN_ON_ONCE(tr->current_trace != &nop_trace);
1067                return;
1068        }
1069
1070        arch_spin_lock(&tr->max_lock);
1071
1072        ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1073
1074        if (ret == -EBUSY) {
1075                /*
1076                 * We failed to swap the buffer due to a commit taking
1077                 * place on this CPU. We fail to record, but we reset
1078                 * the max trace buffer (no one writes directly to it)
1079                 * and flag that it failed.
1080                 */
1081                trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1082                        "Failed to swap buffers due to commit in progress\n");
1083        }
1084
1085        WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1086
1087        __update_max_tr(tr, tsk, cpu);
1088        arch_spin_unlock(&tr->max_lock);
1089}
1090#endif /* CONFIG_TRACER_MAX_TRACE */
1091
1092static int wait_on_pipe(struct trace_iterator *iter, bool full)
1093{
1094        /* Iterators are static, they should be filled or empty */
1095        if (trace_buffer_iter(iter, iter->cpu_file))
1096                return 0;
1097
1098        return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099                                full);
1100}
1101
1102#ifdef CONFIG_FTRACE_STARTUP_TEST
1103static int run_tracer_selftest(struct tracer *type)
1104{
1105        struct trace_array *tr = &global_trace;
1106        struct tracer *saved_tracer = tr->current_trace;
1107        int ret;
1108
1109        if (!type->selftest || tracing_selftest_disabled)
1110                return 0;
1111
1112        /*
1113         * Run a selftest on this tracer.
1114         * Here we reset the trace buffer, and set the current
1115         * tracer to be this tracer. The tracer can then run some
1116         * internal tracing to verify that everything is in order.
1117         * If we fail, we do not register this tracer.
1118         */
1119        tracing_reset_online_cpus(&tr->trace_buffer);
1120
1121        tr->current_trace = type;
1122
1123#ifdef CONFIG_TRACER_MAX_TRACE
1124        if (type->use_max_tr) {
1125                /* If we expanded the buffers, make sure the max is expanded too */
1126                if (ring_buffer_expanded)
1127                        ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128                                           RING_BUFFER_ALL_CPUS);
1129                tr->allocated_snapshot = true;
1130        }
1131#endif
1132
1133        /* the test is responsible for initializing and enabling */
1134        pr_info("Testing tracer %s: ", type->name);
1135        ret = type->selftest(type, tr);
1136        /* the test is responsible for resetting too */
1137        tr->current_trace = saved_tracer;
1138        if (ret) {
1139                printk(KERN_CONT "FAILED!\n");
1140                /* Add the warning after printing 'FAILED' */
1141                WARN_ON(1);
1142                return -1;
1143        }
1144        /* Only reset on passing, to avoid touching corrupted buffers */
1145        tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147#ifdef CONFIG_TRACER_MAX_TRACE
1148        if (type->use_max_tr) {
1149                tr->allocated_snapshot = false;
1150
1151                /* Shrink the max buffer again */
1152                if (ring_buffer_expanded)
1153                        ring_buffer_resize(tr->max_buffer.buffer, 1,
1154                                           RING_BUFFER_ALL_CPUS);
1155        }
1156#endif
1157
1158        printk(KERN_CONT "PASSED\n");
1159        return 0;
1160}
1161#else
1162static inline int run_tracer_selftest(struct tracer *type)
1163{
1164        return 0;
1165}
1166#endif /* CONFIG_FTRACE_STARTUP_TEST */
1167
1168/**
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1171 *
1172 * Register a new plugin tracer.
1173 */
1174int register_tracer(struct tracer *type)
1175{
1176        struct tracer *t;
1177        int ret = 0;
1178
1179        if (!type->name) {
1180                pr_info("Tracer must have a name\n");
1181                return -1;
1182        }
1183
1184        if (strlen(type->name) >= MAX_TRACER_SIZE) {
1185                pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186                return -1;
1187        }
1188
1189        mutex_lock(&trace_types_lock);
1190
1191        tracing_selftest_running = true;
1192
1193        for (t = trace_types; t; t = t->next) {
1194                if (strcmp(type->name, t->name) == 0) {
1195                        /* already found */
1196                        pr_info("Tracer %s already registered\n",
1197                                type->name);
1198                        ret = -1;
1199                        goto out;
1200                }
1201        }
1202
1203        if (!type->set_flag)
1204                type->set_flag = &dummy_set_flag;
1205        if (!type->flags)
1206                type->flags = &dummy_tracer_flags;
1207        else
1208                if (!type->flags->opts)
1209                        type->flags->opts = dummy_tracer_opt;
1210
1211        ret = run_tracer_selftest(type);
1212        if (ret < 0)
1213                goto out;
1214
1215        type->next = trace_types;
1216        trace_types = type;
1217
1218 out:
1219        tracing_selftest_running = false;
1220        mutex_unlock(&trace_types_lock);
1221
1222        if (ret || !default_bootup_tracer)
1223                goto out_unlock;
1224
1225        if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1226                goto out_unlock;
1227
1228        printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229        /* Do we want this tracer to start on bootup? */
1230        tracing_set_tracer(&global_trace, type->name);
1231        default_bootup_tracer = NULL;
1232        /* disable other selftests, since this will break it. */
1233        tracing_selftest_disabled = true;
1234#ifdef CONFIG_FTRACE_STARTUP_TEST
1235        printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236               type->name);
1237#endif
1238
1239 out_unlock:
1240        return ret;
1241}
1242
1243void tracing_reset(struct trace_buffer *buf, int cpu)
1244{
1245        struct ring_buffer *buffer = buf->buffer;
1246
1247        if (!buffer)
1248                return;
1249
1250        ring_buffer_record_disable(buffer);
1251
1252        /* Make sure all commits have finished */
1253        synchronize_sched();
1254        ring_buffer_reset_cpu(buffer, cpu);
1255
1256        ring_buffer_record_enable(buffer);
1257}
1258
1259void tracing_reset_online_cpus(struct trace_buffer *buf)
1260{
1261        struct ring_buffer *buffer = buf->buffer;
1262        int cpu;
1263
1264        if (!buffer)
1265                return;
1266
1267        ring_buffer_record_disable(buffer);
1268
1269        /* Make sure all commits have finished */
1270        synchronize_sched();
1271
1272        buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1273
1274        for_each_online_cpu(cpu)
1275                ring_buffer_reset_cpu(buffer, cpu);
1276
1277        ring_buffer_record_enable(buffer);
1278}
1279
1280/* Must have trace_types_lock held */
1281void tracing_reset_all_online_cpus(void)
1282{
1283        struct trace_array *tr;
1284
1285        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1286                tracing_reset_online_cpus(&tr->trace_buffer);
1287#ifdef CONFIG_TRACER_MAX_TRACE
1288                tracing_reset_online_cpus(&tr->max_buffer);
1289#endif
1290        }
1291}
1292
1293#define SAVED_CMDLINES_DEFAULT 128
1294#define NO_CMDLINE_MAP UINT_MAX
1295static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1296struct saved_cmdlines_buffer {
1297        unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298        unsigned *map_cmdline_to_pid;
1299        unsigned cmdline_num;
1300        int cmdline_idx;
1301        char *saved_cmdlines;
1302};
1303static struct saved_cmdlines_buffer *savedcmd;
1304
1305/* temporary disable recording */
1306static atomic_t trace_record_cmdline_disabled __read_mostly;
1307
1308static inline char *get_saved_cmdlines(int idx)
1309{
1310        return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311}
1312
1313static inline void set_cmdline(int idx, const char *cmdline)
1314{
1315        memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316}
1317
1318static int allocate_cmdlines_buffer(unsigned int val,
1319                                    struct saved_cmdlines_buffer *s)
1320{
1321        s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322                                        GFP_KERNEL);
1323        if (!s->map_cmdline_to_pid)
1324                return -ENOMEM;
1325
1326        s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327        if (!s->saved_cmdlines) {
1328                kfree(s->map_cmdline_to_pid);
1329                return -ENOMEM;
1330        }
1331
1332        s->cmdline_idx = 0;
1333        s->cmdline_num = val;
1334        memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335               sizeof(s->map_pid_to_cmdline));
1336        memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337               val * sizeof(*s->map_cmdline_to_pid));
1338
1339        return 0;
1340}
1341
1342static int trace_create_savedcmd(void)
1343{
1344        int ret;
1345
1346        savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1347        if (!savedcmd)
1348                return -ENOMEM;
1349
1350        ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351        if (ret < 0) {
1352                kfree(savedcmd);
1353                savedcmd = NULL;
1354                return -ENOMEM;
1355        }
1356
1357        return 0;
1358}
1359
1360int is_tracing_stopped(void)
1361{
1362        return global_trace.stop_count;
1363}
1364
1365/**
1366 * tracing_start - quick start of the tracer
1367 *
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1370 */
1371void tracing_start(void)
1372{
1373        struct ring_buffer *buffer;
1374        unsigned long flags;
1375
1376        if (tracing_disabled)
1377                return;
1378
1379        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380        if (--global_trace.stop_count) {
1381                if (global_trace.stop_count < 0) {
1382                        /* Someone screwed up their debugging */
1383                        WARN_ON_ONCE(1);
1384                        global_trace.stop_count = 0;
1385                }
1386                goto out;
1387        }
1388
1389        /* Prevent the buffers from switching */
1390        arch_spin_lock(&global_trace.max_lock);
1391
1392        buffer = global_trace.trace_buffer.buffer;
1393        if (buffer)
1394                ring_buffer_record_enable(buffer);
1395
1396#ifdef CONFIG_TRACER_MAX_TRACE
1397        buffer = global_trace.max_buffer.buffer;
1398        if (buffer)
1399                ring_buffer_record_enable(buffer);
1400#endif
1401
1402        arch_spin_unlock(&global_trace.max_lock);
1403
1404 out:
1405        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406}
1407
1408static void tracing_start_tr(struct trace_array *tr)
1409{
1410        struct ring_buffer *buffer;
1411        unsigned long flags;
1412
1413        if (tracing_disabled)
1414                return;
1415
1416        /* If global, we need to also start the max tracer */
1417        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418                return tracing_start();
1419
1420        raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422        if (--tr->stop_count) {
1423                if (tr->stop_count < 0) {
1424                        /* Someone screwed up their debugging */
1425                        WARN_ON_ONCE(1);
1426                        tr->stop_count = 0;
1427                }
1428                goto out;
1429        }
1430
1431        buffer = tr->trace_buffer.buffer;
1432        if (buffer)
1433                ring_buffer_record_enable(buffer);
1434
1435 out:
1436        raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1437}
1438
1439/**
1440 * tracing_stop - quick stop of the tracer
1441 *
1442 * Light weight way to stop tracing. Use in conjunction with
1443 * tracing_start.
1444 */
1445void tracing_stop(void)
1446{
1447        struct ring_buffer *buffer;
1448        unsigned long flags;
1449
1450        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451        if (global_trace.stop_count++)
1452                goto out;
1453
1454        /* Prevent the buffers from switching */
1455        arch_spin_lock(&global_trace.max_lock);
1456
1457        buffer = global_trace.trace_buffer.buffer;
1458        if (buffer)
1459                ring_buffer_record_disable(buffer);
1460
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462        buffer = global_trace.max_buffer.buffer;
1463        if (buffer)
1464                ring_buffer_record_disable(buffer);
1465#endif
1466
1467        arch_spin_unlock(&global_trace.max_lock);
1468
1469 out:
1470        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_stop_tr(struct trace_array *tr)
1474{
1475        struct ring_buffer *buffer;
1476        unsigned long flags;
1477
1478        /* If global, we need to also stop the max tracer */
1479        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480                return tracing_stop();
1481
1482        raw_spin_lock_irqsave(&tr->start_lock, flags);
1483        if (tr->stop_count++)
1484                goto out;
1485
1486        buffer = tr->trace_buffer.buffer;
1487        if (buffer)
1488                ring_buffer_record_disable(buffer);
1489
1490 out:
1491        raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1492}
1493
1494void trace_stop_cmdline_recording(void);
1495
1496static int trace_save_cmdline(struct task_struct *tsk)
1497{
1498        unsigned pid, idx;
1499
1500        if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1501                return 0;
1502
1503        /*
1504         * It's not the end of the world if we don't get
1505         * the lock, but we also don't want to spin
1506         * nor do we want to disable interrupts,
1507         * so if we miss here, then better luck next time.
1508         */
1509        if (!arch_spin_trylock(&trace_cmdline_lock))
1510                return 0;
1511
1512        idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1513        if (idx == NO_CMDLINE_MAP) {
1514                idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1515
1516                /*
1517                 * Check whether the cmdline buffer at idx has a pid
1518                 * mapped. We are going to overwrite that entry so we
1519                 * need to clear the map_pid_to_cmdline. Otherwise we
1520                 * would read the new comm for the old pid.
1521                 */
1522                pid = savedcmd->map_cmdline_to_pid[idx];
1523                if (pid != NO_CMDLINE_MAP)
1524                        savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1525
1526                savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527                savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1528
1529                savedcmd->cmdline_idx = idx;
1530        }
1531
1532        set_cmdline(idx, tsk->comm);
1533
1534        arch_spin_unlock(&trace_cmdline_lock);
1535
1536        return 1;
1537}
1538
1539static void __trace_find_cmdline(int pid, char comm[])
1540{
1541        unsigned map;
1542
1543        if (!pid) {
1544                strcpy(comm, "<idle>");
1545                return;
1546        }
1547
1548        if (WARN_ON_ONCE(pid < 0)) {
1549                strcpy(comm, "<XXX>");
1550                return;
1551        }
1552
1553        if (pid > PID_MAX_DEFAULT) {
1554                strcpy(comm, "<...>");
1555                return;
1556        }
1557
1558        map = savedcmd->map_pid_to_cmdline[pid];
1559        if (map != NO_CMDLINE_MAP)
1560                strcpy(comm, get_saved_cmdlines(map));
1561        else
1562                strcpy(comm, "<...>");
1563}
1564
1565void trace_find_cmdline(int pid, char comm[])
1566{
1567        preempt_disable();
1568        arch_spin_lock(&trace_cmdline_lock);
1569
1570        __trace_find_cmdline(pid, comm);
1571
1572        arch_spin_unlock(&trace_cmdline_lock);
1573        preempt_enable();
1574}
1575
1576void tracing_record_cmdline(struct task_struct *tsk)
1577{
1578        if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1579                return;
1580
1581        if (!__this_cpu_read(trace_cmdline_save))
1582                return;
1583
1584        if (trace_save_cmdline(tsk))
1585                __this_cpu_write(trace_cmdline_save, false);
1586}
1587
1588void
1589tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590                             int pc)
1591{
1592        struct task_struct *tsk = current;
1593
1594        entry->preempt_count            = pc & 0xff;
1595        entry->pid                      = (tsk) ? tsk->pid : 0;
1596        entry->flags =
1597#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1598                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1599#else
1600                TRACE_FLAG_IRQS_NOSUPPORT |
1601#endif
1602                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603                ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1604                (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605                (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1606}
1607EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1608
1609struct ring_buffer_event *
1610trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611                          int type,
1612                          unsigned long len,
1613                          unsigned long flags, int pc)
1614{
1615        struct ring_buffer_event *event;
1616
1617        event = ring_buffer_lock_reserve(buffer, len);
1618        if (event != NULL) {
1619                struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621                tracing_generic_entry_update(ent, flags, pc);
1622                ent->type = type;
1623        }
1624
1625        return event;
1626}
1627
1628void
1629__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630{
1631        __this_cpu_write(trace_cmdline_save, true);
1632        ring_buffer_unlock_commit(buffer, event);
1633}
1634
1635static inline void
1636__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637                             struct ring_buffer_event *event,
1638                             unsigned long flags, int pc)
1639{
1640        __buffer_unlock_commit(buffer, event);
1641
1642        ftrace_trace_stack(buffer, flags, 6, pc);
1643        ftrace_trace_userstack(buffer, flags, pc);
1644}
1645
1646void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647                                struct ring_buffer_event *event,
1648                                unsigned long flags, int pc)
1649{
1650        __trace_buffer_unlock_commit(buffer, event, flags, pc);
1651}
1652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1653
1654static struct ring_buffer *temp_buffer;
1655
1656struct ring_buffer_event *
1657trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658                          struct ftrace_event_file *ftrace_file,
1659                          int type, unsigned long len,
1660                          unsigned long flags, int pc)
1661{
1662        struct ring_buffer_event *entry;
1663
1664        *current_rb = ftrace_file->tr->trace_buffer.buffer;
1665        entry = trace_buffer_lock_reserve(*current_rb,
1666                                         type, len, flags, pc);
1667        /*
1668         * If tracing is off, but we have triggers enabled
1669         * we still need to look at the event data. Use the temp_buffer
1670         * to store the trace event for the tigger to use. It's recusive
1671         * safe and will not be recorded anywhere.
1672         */
1673        if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674                *current_rb = temp_buffer;
1675                entry = trace_buffer_lock_reserve(*current_rb,
1676                                                  type, len, flags, pc);
1677        }
1678        return entry;
1679}
1680EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
1682struct ring_buffer_event *
1683trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684                                  int type, unsigned long len,
1685                                  unsigned long flags, int pc)
1686{
1687        *current_rb = global_trace.trace_buffer.buffer;
1688        return trace_buffer_lock_reserve(*current_rb,
1689                                         type, len, flags, pc);
1690}
1691EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1692
1693void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694                                        struct ring_buffer_event *event,
1695                                        unsigned long flags, int pc)
1696{
1697        __trace_buffer_unlock_commit(buffer, event, flags, pc);
1698}
1699EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1700
1701void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702                                     struct ring_buffer_event *event,
1703                                     unsigned long flags, int pc,
1704                                     struct pt_regs *regs)
1705{
1706        __buffer_unlock_commit(buffer, event);
1707
1708        ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709        ftrace_trace_userstack(buffer, flags, pc);
1710}
1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1712
1713void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714                                         struct ring_buffer_event *event)
1715{
1716        ring_buffer_discard_commit(buffer, event);
1717}
1718EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1719
1720void
1721trace_function(struct trace_array *tr,
1722               unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723               int pc)
1724{
1725        struct ftrace_event_call *call = &event_function;
1726        struct ring_buffer *buffer = tr->trace_buffer.buffer;
1727        struct ring_buffer_event *event;
1728        struct ftrace_entry *entry;
1729
1730        /* If we are reading the ring buffer, don't trace */
1731        if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1732                return;
1733
1734        event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1735                                          flags, pc);
1736        if (!event)
1737                return;
1738        entry   = ring_buffer_event_data(event);
1739        entry->ip                       = ip;
1740        entry->parent_ip                = parent_ip;
1741
1742        if (!call_filter_check_discard(call, entry, buffer, event))
1743                __buffer_unlock_commit(buffer, event);
1744}
1745
1746#ifdef CONFIG_STACKTRACE
1747
1748#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749struct ftrace_stack {
1750        unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1751};
1752
1753static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
1756static void __ftrace_trace_stack(struct ring_buffer *buffer,
1757                                 unsigned long flags,
1758                                 int skip, int pc, struct pt_regs *regs)
1759{
1760        struct ftrace_event_call *call = &event_kernel_stack;
1761        struct ring_buffer_event *event;
1762        struct stack_entry *entry;
1763        struct stack_trace trace;
1764        int use_stack;
1765        int size = FTRACE_STACK_ENTRIES;
1766
1767        trace.nr_entries        = 0;
1768        trace.skip              = skip;
1769
1770        /*
1771         * Since events can happen in NMIs there's no safe way to
1772         * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773         * or NMI comes in, it will just have to use the default
1774         * FTRACE_STACK_SIZE.
1775         */
1776        preempt_disable_notrace();
1777
1778        use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1779        /*
1780         * We don't need any atomic variables, just a barrier.
1781         * If an interrupt comes in, we don't care, because it would
1782         * have exited and put the counter back to what we want.
1783         * We just need a barrier to keep gcc from moving things
1784         * around.
1785         */
1786        barrier();
1787        if (use_stack == 1) {
1788                trace.entries           = this_cpu_ptr(ftrace_stack.calls);
1789                trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1790
1791                if (regs)
1792                        save_stack_trace_regs(regs, &trace);
1793                else
1794                        save_stack_trace(&trace);
1795
1796                if (trace.nr_entries > size)
1797                        size = trace.nr_entries;
1798        } else
1799                /* From now on, use_stack is a boolean */
1800                use_stack = 0;
1801
1802        size *= sizeof(unsigned long);
1803
1804        event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805                                          sizeof(*entry) + size, flags, pc);
1806        if (!event)
1807                goto out;
1808        entry = ring_buffer_event_data(event);
1809
1810        memset(&entry->caller, 0, size);
1811
1812        if (use_stack)
1813                memcpy(&entry->caller, trace.entries,
1814                       trace.nr_entries * sizeof(unsigned long));
1815        else {
1816                trace.max_entries       = FTRACE_STACK_ENTRIES;
1817                trace.entries           = entry->caller;
1818                if (regs)
1819                        save_stack_trace_regs(regs, &trace);
1820                else
1821                        save_stack_trace(&trace);
1822        }
1823
1824        entry->size = trace.nr_entries;
1825
1826        if (!call_filter_check_discard(call, entry, buffer, event))
1827                __buffer_unlock_commit(buffer, event);
1828
1829 out:
1830        /* Again, don't let gcc optimize things here */
1831        barrier();
1832        __this_cpu_dec(ftrace_stack_reserve);
1833        preempt_enable_notrace();
1834
1835}
1836
1837void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838                             int skip, int pc, struct pt_regs *regs)
1839{
1840        if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841                return;
1842
1843        __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844}
1845
1846void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847                        int skip, int pc)
1848{
1849        if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850                return;
1851
1852        __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1853}
1854
1855void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856                   int pc)
1857{
1858        __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1859}
1860
1861/**
1862 * trace_dump_stack - record a stack back trace in the trace buffer
1863 * @skip: Number of functions to skip (helper handlers)
1864 */
1865void trace_dump_stack(int skip)
1866{
1867        unsigned long flags;
1868
1869        if (tracing_disabled || tracing_selftest_running)
1870                return;
1871
1872        local_save_flags(flags);
1873
1874        /*
1875         * Skip 3 more, seems to get us at the caller of
1876         * this function.
1877         */
1878        skip += 3;
1879        __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880                             flags, skip, preempt_count(), NULL);
1881}
1882
1883static DEFINE_PER_CPU(int, user_stack_count);
1884
1885void
1886ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1887{
1888        struct ftrace_event_call *call = &event_user_stack;
1889        struct ring_buffer_event *event;
1890        struct userstack_entry *entry;
1891        struct stack_trace trace;
1892
1893        if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894                return;
1895
1896        /*
1897         * NMIs can not handle page faults, even with fix ups.
1898         * The save user stack can (and often does) fault.
1899         */
1900        if (unlikely(in_nmi()))
1901                return;
1902
1903        /*
1904         * prevent recursion, since the user stack tracing may
1905         * trigger other kernel events.
1906         */
1907        preempt_disable();
1908        if (__this_cpu_read(user_stack_count))
1909                goto out;
1910
1911        __this_cpu_inc(user_stack_count);
1912
1913        event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1914                                          sizeof(*entry), flags, pc);
1915        if (!event)
1916                goto out_drop_count;
1917        entry   = ring_buffer_event_data(event);
1918
1919        entry->tgid             = current->tgid;
1920        memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922        trace.nr_entries        = 0;
1923        trace.max_entries       = FTRACE_STACK_ENTRIES;
1924        trace.skip              = 0;
1925        trace.entries           = entry->caller;
1926
1927        save_stack_trace_user(&trace);
1928        if (!call_filter_check_discard(call, entry, buffer, event))
1929                __buffer_unlock_commit(buffer, event);
1930
1931 out_drop_count:
1932        __this_cpu_dec(user_stack_count);
1933 out:
1934        preempt_enable();
1935}
1936
1937#ifdef UNUSED
1938static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1939{
1940        ftrace_trace_userstack(tr, flags, preempt_count());
1941}
1942#endif /* UNUSED */
1943
1944#endif /* CONFIG_STACKTRACE */
1945
1946/* created for use with alloc_percpu */
1947struct trace_buffer_struct {
1948        char buffer[TRACE_BUF_SIZE];
1949};
1950
1951static struct trace_buffer_struct *trace_percpu_buffer;
1952static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956/*
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1960 *
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1962 */
1963static char *get_trace_buf(void)
1964{
1965        struct trace_buffer_struct *percpu_buffer;
1966
1967        /*
1968         * If we have allocated per cpu buffers, then we do not
1969         * need to do any locking.
1970         */
1971        if (in_nmi())
1972                percpu_buffer = trace_percpu_nmi_buffer;
1973        else if (in_irq())
1974                percpu_buffer = trace_percpu_irq_buffer;
1975        else if (in_softirq())
1976                percpu_buffer = trace_percpu_sirq_buffer;
1977        else
1978                percpu_buffer = trace_percpu_buffer;
1979
1980        if (!percpu_buffer)
1981                return NULL;
1982
1983        return this_cpu_ptr(&percpu_buffer->buffer[0]);
1984}
1985
1986static int alloc_percpu_trace_buffer(void)
1987{
1988        struct trace_buffer_struct *buffers;
1989        struct trace_buffer_struct *sirq_buffers;
1990        struct trace_buffer_struct *irq_buffers;
1991        struct trace_buffer_struct *nmi_buffers;
1992
1993        buffers = alloc_percpu(struct trace_buffer_struct);
1994        if (!buffers)
1995                goto err_warn;
1996
1997        sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998        if (!sirq_buffers)
1999                goto err_sirq;
2000
2001        irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002        if (!irq_buffers)
2003                goto err_irq;
2004
2005        nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006        if (!nmi_buffers)
2007                goto err_nmi;
2008
2009        trace_percpu_buffer = buffers;
2010        trace_percpu_sirq_buffer = sirq_buffers;
2011        trace_percpu_irq_buffer = irq_buffers;
2012        trace_percpu_nmi_buffer = nmi_buffers;
2013
2014        return 0;
2015
2016 err_nmi:
2017        free_percpu(irq_buffers);
2018 err_irq:
2019        free_percpu(sirq_buffers);
2020 err_sirq:
2021        free_percpu(buffers);
2022 err_warn:
2023        WARN(1, "Could not allocate percpu trace_printk buffer");
2024        return -ENOMEM;
2025}
2026
2027static int buffers_allocated;
2028
2029void trace_printk_init_buffers(void)
2030{
2031        if (buffers_allocated)
2032                return;
2033
2034        if (alloc_percpu_trace_buffer())
2035                return;
2036
2037        /* trace_printk() is for debug use only. Don't use it in production. */
2038
2039        pr_warning("\n**********************************************************\n");
2040        pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2041        pr_warning("**                                                      **\n");
2042        pr_warning("** trace_printk() being used. Allocating extra memory.  **\n");
2043        pr_warning("**                                                      **\n");
2044        pr_warning("** This means that this is a DEBUG kernel and it is     **\n");
2045        pr_warning("** unsafe for production use.                           **\n");
2046        pr_warning("**                                                      **\n");
2047        pr_warning("** If you see this message and you are not debugging    **\n");
2048        pr_warning("** the kernel, report this immediately to your vendor!  **\n");
2049        pr_warning("**                                                      **\n");
2050        pr_warning("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2051        pr_warning("**********************************************************\n");
2052
2053        /* Expand the buffers to set size */
2054        tracing_update_buffers();
2055
2056        buffers_allocated = 1;
2057
2058        /*
2059         * trace_printk_init_buffers() can be called by modules.
2060         * If that happens, then we need to start cmdline recording
2061         * directly here. If the global_trace.buffer is already
2062         * allocated here, then this was called by module code.
2063         */
2064        if (global_trace.trace_buffer.buffer)
2065                tracing_start_cmdline_record();
2066}
2067
2068void trace_printk_start_comm(void)
2069{
2070        /* Start tracing comms if trace printk is set */
2071        if (!buffers_allocated)
2072                return;
2073        tracing_start_cmdline_record();
2074}
2075
2076static void trace_printk_start_stop_comm(int enabled)
2077{
2078        if (!buffers_allocated)
2079                return;
2080
2081        if (enabled)
2082                tracing_start_cmdline_record();
2083        else
2084                tracing_stop_cmdline_record();
2085}
2086
2087/**
2088 * trace_vbprintk - write binary msg to tracing buffer
2089 *
2090 */
2091int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2092{
2093        struct ftrace_event_call *call = &event_bprint;
2094        struct ring_buffer_event *event;
2095        struct ring_buffer *buffer;
2096        struct trace_array *tr = &global_trace;
2097        struct bprint_entry *entry;
2098        unsigned long flags;
2099        char *tbuffer;
2100        int len = 0, size, pc;
2101
2102        if (unlikely(tracing_selftest_running || tracing_disabled))
2103                return 0;
2104
2105        /* Don't pollute graph traces with trace_vprintk internals */
2106        pause_graph_tracing();
2107
2108        pc = preempt_count();
2109        preempt_disable_notrace();
2110
2111        tbuffer = get_trace_buf();
2112        if (!tbuffer) {
2113                len = 0;
2114                goto out;
2115        }
2116
2117        len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2118
2119        if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2120                goto out;
2121
2122        local_save_flags(flags);
2123        size = sizeof(*entry) + sizeof(u32) * len;
2124        buffer = tr->trace_buffer.buffer;
2125        event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2126                                          flags, pc);
2127        if (!event)
2128                goto out;
2129        entry = ring_buffer_event_data(event);
2130        entry->ip                       = ip;
2131        entry->fmt                      = fmt;
2132
2133        memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2134        if (!call_filter_check_discard(call, entry, buffer, event)) {
2135                __buffer_unlock_commit(buffer, event);
2136                ftrace_trace_stack(buffer, flags, 6, pc);
2137        }
2138
2139out:
2140        preempt_enable_notrace();
2141        unpause_graph_tracing();
2142
2143        return len;
2144}
2145EXPORT_SYMBOL_GPL(trace_vbprintk);
2146
2147static int
2148__trace_array_vprintk(struct ring_buffer *buffer,
2149                      unsigned long ip, const char *fmt, va_list args)
2150{
2151        struct ftrace_event_call *call = &event_print;
2152        struct ring_buffer_event *event;
2153        int len = 0, size, pc;
2154        struct print_entry *entry;
2155        unsigned long flags;
2156        char *tbuffer;
2157
2158        if (tracing_disabled || tracing_selftest_running)
2159                return 0;
2160
2161        /* Don't pollute graph traces with trace_vprintk internals */
2162        pause_graph_tracing();
2163
2164        pc = preempt_count();
2165        preempt_disable_notrace();
2166
2167
2168        tbuffer = get_trace_buf();
2169        if (!tbuffer) {
2170                len = 0;
2171                goto out;
2172        }
2173
2174        len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2175
2176        local_save_flags(flags);
2177        size = sizeof(*entry) + len + 1;
2178        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2179                                          flags, pc);
2180        if (!event)
2181                goto out;
2182        entry = ring_buffer_event_data(event);
2183        entry->ip = ip;
2184
2185        memcpy(&entry->buf, tbuffer, len + 1);
2186        if (!call_filter_check_discard(call, entry, buffer, event)) {
2187                __buffer_unlock_commit(buffer, event);
2188                ftrace_trace_stack(buffer, flags, 6, pc);
2189        }
2190 out:
2191        preempt_enable_notrace();
2192        unpause_graph_tracing();
2193
2194        return len;
2195}
2196
2197int trace_array_vprintk(struct trace_array *tr,
2198                        unsigned long ip, const char *fmt, va_list args)
2199{
2200        return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201}
2202
2203int trace_array_printk(struct trace_array *tr,
2204                       unsigned long ip, const char *fmt, ...)
2205{
2206        int ret;
2207        va_list ap;
2208
2209        if (!(trace_flags & TRACE_ITER_PRINTK))
2210                return 0;
2211
2212        va_start(ap, fmt);
2213        ret = trace_array_vprintk(tr, ip, fmt, ap);
2214        va_end(ap);
2215        return ret;
2216}
2217
2218int trace_array_printk_buf(struct ring_buffer *buffer,
2219                           unsigned long ip, const char *fmt, ...)
2220{
2221        int ret;
2222        va_list ap;
2223
2224        if (!(trace_flags & TRACE_ITER_PRINTK))
2225                return 0;
2226
2227        va_start(ap, fmt);
2228        ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229        va_end(ap);
2230        return ret;
2231}
2232
2233int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234{
2235        return trace_array_vprintk(&global_trace, ip, fmt, args);
2236}
2237EXPORT_SYMBOL_GPL(trace_vprintk);
2238
2239static void trace_iterator_increment(struct trace_iterator *iter)
2240{
2241        struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
2243        iter->idx++;
2244        if (buf_iter)
2245                ring_buffer_read(buf_iter, NULL);
2246}
2247
2248static struct trace_entry *
2249peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250                unsigned long *lost_events)
2251{
2252        struct ring_buffer_event *event;
2253        struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2254
2255        if (buf_iter)
2256                event = ring_buffer_iter_peek(buf_iter, ts);
2257        else
2258                event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2259                                         lost_events);
2260
2261        if (event) {
2262                iter->ent_size = ring_buffer_event_length(event);
2263                return ring_buffer_event_data(event);
2264        }
2265        iter->ent_size = 0;
2266        return NULL;
2267}
2268
2269static struct trace_entry *
2270__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271                  unsigned long *missing_events, u64 *ent_ts)
2272{
2273        struct ring_buffer *buffer = iter->trace_buffer->buffer;
2274        struct trace_entry *ent, *next = NULL;
2275        unsigned long lost_events = 0, next_lost = 0;
2276        int cpu_file = iter->cpu_file;
2277        u64 next_ts = 0, ts;
2278        int next_cpu = -1;
2279        int next_size = 0;
2280        int cpu;
2281
2282        /*
2283         * If we are in a per_cpu trace file, don't bother by iterating over
2284         * all cpu and peek directly.
2285         */
2286        if (cpu_file > RING_BUFFER_ALL_CPUS) {
2287                if (ring_buffer_empty_cpu(buffer, cpu_file))
2288                        return NULL;
2289                ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2290                if (ent_cpu)
2291                        *ent_cpu = cpu_file;
2292
2293                return ent;
2294        }
2295
2296        for_each_tracing_cpu(cpu) {
2297
2298                if (ring_buffer_empty_cpu(buffer, cpu))
2299                        continue;
2300
2301                ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2302
2303                /*
2304                 * Pick the entry with the smallest timestamp:
2305                 */
2306                if (ent && (!next || ts < next_ts)) {
2307                        next = ent;
2308                        next_cpu = cpu;
2309                        next_ts = ts;
2310                        next_lost = lost_events;
2311                        next_size = iter->ent_size;
2312                }
2313        }
2314
2315        iter->ent_size = next_size;
2316
2317        if (ent_cpu)
2318                *ent_cpu = next_cpu;
2319
2320        if (ent_ts)
2321                *ent_ts = next_ts;
2322
2323        if (missing_events)
2324                *missing_events = next_lost;
2325
2326        return next;
2327}
2328
2329/* Find the next real entry, without updating the iterator itself */
2330struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331                                          int *ent_cpu, u64 *ent_ts)
2332{
2333        return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2334}
2335
2336/* Find the next real entry, and increment the iterator to the next entry */
2337void *trace_find_next_entry_inc(struct trace_iterator *iter)
2338{
2339        iter->ent = __find_next_entry(iter, &iter->cpu,
2340                                      &iter->lost_events, &iter->ts);
2341
2342        if (iter->ent)
2343                trace_iterator_increment(iter);
2344
2345        return iter->ent ? iter : NULL;
2346}
2347
2348static void trace_consume(struct trace_iterator *iter)
2349{
2350        ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2351                            &iter->lost_events);
2352}
2353
2354static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2355{
2356        struct trace_iterator *iter = m->private;
2357        int i = (int)*pos;
2358        void *ent;
2359
2360        WARN_ON_ONCE(iter->leftover);
2361
2362        (*pos)++;
2363
2364        /* can't go backwards */
2365        if (iter->idx > i)
2366                return NULL;
2367
2368        if (iter->idx < 0)
2369                ent = trace_find_next_entry_inc(iter);
2370        else
2371                ent = iter;
2372
2373        while (ent && iter->idx < i)
2374                ent = trace_find_next_entry_inc(iter);
2375
2376        iter->pos = *pos;
2377
2378        return ent;
2379}
2380
2381void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2382{
2383        struct ring_buffer_event *event;
2384        struct ring_buffer_iter *buf_iter;
2385        unsigned long entries = 0;
2386        u64 ts;
2387
2388        per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2389
2390        buf_iter = trace_buffer_iter(iter, cpu);
2391        if (!buf_iter)
2392                return;
2393
2394        ring_buffer_iter_reset(buf_iter);
2395
2396        /*
2397         * We could have the case with the max latency tracers
2398         * that a reset never took place on a cpu. This is evident
2399         * by the timestamp being before the start of the buffer.
2400         */
2401        while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2402                if (ts >= iter->trace_buffer->time_start)
2403                        break;
2404                entries++;
2405                ring_buffer_read(buf_iter, NULL);
2406        }
2407
2408        per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2409}
2410
2411/*
2412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
2415static void *s_start(struct seq_file *m, loff_t *pos)
2416{
2417        struct trace_iterator *iter = m->private;
2418        struct trace_array *tr = iter->tr;
2419        int cpu_file = iter->cpu_file;
2420        void *p = NULL;
2421        loff_t l = 0;
2422        int cpu;
2423
2424        /*
2425         * copy the tracer to avoid using a global lock all around.
2426         * iter->trace is a copy of current_trace, the pointer to the
2427         * name may be used instead of a strcmp(), as iter->trace->name
2428         * will point to the same string as current_trace->name.
2429         */
2430        mutex_lock(&trace_types_lock);
2431        if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432                *iter->trace = *tr->current_trace;
2433        mutex_unlock(&trace_types_lock);
2434
2435#ifdef CONFIG_TRACER_MAX_TRACE
2436        if (iter->snapshot && iter->trace->use_max_tr)
2437                return ERR_PTR(-EBUSY);
2438#endif
2439
2440        if (!iter->snapshot)
2441                atomic_inc(&trace_record_cmdline_disabled);
2442
2443        if (*pos != iter->pos) {
2444                iter->ent = NULL;
2445                iter->cpu = 0;
2446                iter->idx = -1;
2447
2448                if (cpu_file == RING_BUFFER_ALL_CPUS) {
2449                        for_each_tracing_cpu(cpu)
2450                                tracing_iter_reset(iter, cpu);
2451                } else
2452                        tracing_iter_reset(iter, cpu_file);
2453
2454                iter->leftover = 0;
2455                for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456                        ;
2457
2458        } else {
2459                /*
2460                 * If we overflowed the seq_file before, then we want
2461                 * to just reuse the trace_seq buffer again.
2462                 */
2463                if (iter->leftover)
2464                        p = iter;
2465                else {
2466                        l = *pos - 1;
2467                        p = s_next(m, p, &l);
2468                }
2469        }
2470
2471        trace_event_read_lock();
2472        trace_access_lock(cpu_file);
2473        return p;
2474}
2475
2476static void s_stop(struct seq_file *m, void *p)
2477{
2478        struct trace_iterator *iter = m->private;
2479
2480#ifdef CONFIG_TRACER_MAX_TRACE
2481        if (iter->snapshot && iter->trace->use_max_tr)
2482                return;
2483#endif
2484
2485        if (!iter->snapshot)
2486                atomic_dec(&trace_record_cmdline_disabled);
2487
2488        trace_access_unlock(iter->cpu_file);
2489        trace_event_read_unlock();
2490}
2491
2492static void
2493get_total_entries(struct trace_buffer *buf,
2494                  unsigned long *total, unsigned long *entries)
2495{
2496        unsigned long count;
2497        int cpu;
2498
2499        *total = 0;
2500        *entries = 0;
2501
2502        for_each_tracing_cpu(cpu) {
2503                count = ring_buffer_entries_cpu(buf->buffer, cpu);
2504                /*
2505                 * If this buffer has skipped entries, then we hold all
2506                 * entries for the trace and we need to ignore the
2507                 * ones before the time stamp.
2508                 */
2509                if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510                        count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2511                        /* total is the same as the entries */
2512                        *total += count;
2513                } else
2514                        *total += count +
2515                                ring_buffer_overrun_cpu(buf->buffer, cpu);
2516                *entries += count;
2517        }
2518}
2519
2520static void print_lat_help_header(struct seq_file *m)
2521{
2522        seq_puts(m, "#                  _------=> CPU#            \n"
2523                    "#                 / _-----=> irqs-off        \n"
2524                    "#                | / _----=> need-resched    \n"
2525                    "#                || / _---=> hardirq/softirq \n"
2526                    "#                ||| / _--=> preempt-depth   \n"
2527                    "#                |||| /     delay            \n"
2528                    "#  cmd     pid   ||||| time  |   caller      \n"
2529                    "#     \\   /      |||||  \\    |   /         \n");
2530}
2531
2532static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2533{
2534        unsigned long total;
2535        unsigned long entries;
2536
2537        get_total_entries(buf, &total, &entries);
2538        seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2539                   entries, total, num_online_cpus());
2540        seq_puts(m, "#\n");
2541}
2542
2543static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2544{
2545        print_event_info(buf, m);
2546        seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
2547                    "#              | |       |          |         |\n");
2548}
2549
2550static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2551{
2552        print_event_info(buf, m);
2553        seq_puts(m, "#                              _-----=> irqs-off\n"
2554                    "#                             / _----=> need-resched\n"
2555                    "#                            | / _---=> hardirq/softirq\n"
2556                    "#                            || / _--=> preempt-depth\n"
2557                    "#                            ||| /     delay\n"
2558                    "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
2559                    "#              | |       |   ||||       |         |\n");
2560}
2561
2562void
2563print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564{
2565        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2566        struct trace_buffer *buf = iter->trace_buffer;
2567        struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2568        struct tracer *type = iter->trace;
2569        unsigned long entries;
2570        unsigned long total;
2571        const char *name = "preemption";
2572
2573        name = type->name;
2574
2575        get_total_entries(buf, &total, &entries);
2576
2577        seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2578                   name, UTS_RELEASE);
2579        seq_puts(m, "# -----------------------------------"
2580                 "---------------------------------\n");
2581        seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2582                   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2583                   nsecs_to_usecs(data->saved_latency),
2584                   entries,
2585                   total,
2586                   buf->cpu,
2587#if defined(CONFIG_PREEMPT_NONE)
2588                   "server",
2589#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590                   "desktop",
2591#elif defined(CONFIG_PREEMPT)
2592                   "preempt",
2593#else
2594                   "unknown",
2595#endif
2596                   /* These are reserved for later use */
2597                   0, 0, 0, 0);
2598#ifdef CONFIG_SMP
2599        seq_printf(m, " #P:%d)\n", num_online_cpus());
2600#else
2601        seq_puts(m, ")\n");
2602#endif
2603        seq_puts(m, "#    -----------------\n");
2604        seq_printf(m, "#    | task: %.16s-%d "
2605                   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2606                   data->comm, data->pid,
2607                   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2608                   data->policy, data->rt_priority);
2609        seq_puts(m, "#    -----------------\n");
2610
2611        if (data->critical_start) {
2612                seq_puts(m, "#  => started at: ");
2613                seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614                trace_print_seq(m, &iter->seq);
2615                seq_puts(m, "\n#  => ended at:   ");
2616                seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617                trace_print_seq(m, &iter->seq);
2618                seq_puts(m, "\n#\n");
2619        }
2620
2621        seq_puts(m, "#\n");
2622}
2623
2624static void test_cpu_buff_start(struct trace_iterator *iter)
2625{
2626        struct trace_seq *s = &iter->seq;
2627
2628        if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629                return;
2630
2631        if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632                return;
2633
2634        if (cpumask_test_cpu(iter->cpu, iter->started))
2635                return;
2636
2637        if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2638                return;
2639
2640        cpumask_set_cpu(iter->cpu, iter->started);
2641
2642        /* Don't print started cpu buffer for the first entry of the trace */
2643        if (iter->idx > 1)
2644                trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645                                iter->cpu);
2646}
2647
2648static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2649{
2650        struct trace_seq *s = &iter->seq;
2651        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2652        struct trace_entry *entry;
2653        struct trace_event *event;
2654
2655        entry = iter->ent;
2656
2657        test_cpu_buff_start(iter);
2658
2659        event = ftrace_find_event(entry->type);
2660
2661        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2662                if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2663                        trace_print_lat_context(iter);
2664                else
2665                        trace_print_context(iter);
2666        }
2667
2668        if (trace_seq_has_overflowed(s))
2669                return TRACE_TYPE_PARTIAL_LINE;
2670
2671        if (event)
2672                return event->funcs->trace(iter, sym_flags, event);
2673
2674        trace_seq_printf(s, "Unknown type %d\n", entry->type);
2675
2676        return trace_handle_return(s);
2677}
2678
2679static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2680{
2681        struct trace_seq *s = &iter->seq;
2682        struct trace_entry *entry;
2683        struct trace_event *event;
2684
2685        entry = iter->ent;
2686
2687        if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2688                trace_seq_printf(s, "%d %d %llu ",
2689                                 entry->pid, iter->cpu, iter->ts);
2690
2691        if (trace_seq_has_overflowed(s))
2692                return TRACE_TYPE_PARTIAL_LINE;
2693
2694        event = ftrace_find_event(entry->type);
2695        if (event)
2696                return event->funcs->raw(iter, 0, event);
2697
2698        trace_seq_printf(s, "%d ?\n", entry->type);
2699
2700        return trace_handle_return(s);
2701}
2702
2703static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2704{
2705        struct trace_seq *s = &iter->seq;
2706        unsigned char newline = '\n';
2707        struct trace_entry *entry;
2708        struct trace_event *event;
2709
2710        entry = iter->ent;
2711
2712        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2713                SEQ_PUT_HEX_FIELD(s, entry->pid);
2714                SEQ_PUT_HEX_FIELD(s, iter->cpu);
2715                SEQ_PUT_HEX_FIELD(s, iter->ts);
2716                if (trace_seq_has_overflowed(s))
2717                        return TRACE_TYPE_PARTIAL_LINE;
2718        }
2719
2720        event = ftrace_find_event(entry->type);
2721        if (event) {
2722                enum print_line_t ret = event->funcs->hex(iter, 0, event);
2723                if (ret != TRACE_TYPE_HANDLED)
2724                        return ret;
2725        }
2726
2727        SEQ_PUT_FIELD(s, newline);
2728
2729        return trace_handle_return(s);
2730}
2731
2732static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2733{
2734        struct trace_seq *s = &iter->seq;
2735        struct trace_entry *entry;
2736        struct trace_event *event;
2737
2738        entry = iter->ent;
2739
2740        if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2741                SEQ_PUT_FIELD(s, entry->pid);
2742                SEQ_PUT_FIELD(s, iter->cpu);
2743                SEQ_PUT_FIELD(s, iter->ts);
2744                if (trace_seq_has_overflowed(s))
2745                        return TRACE_TYPE_PARTIAL_LINE;
2746        }
2747
2748        event = ftrace_find_event(entry->type);
2749        return event ? event->funcs->binary(iter, 0, event) :
2750                TRACE_TYPE_HANDLED;
2751}
2752
2753int trace_empty(struct trace_iterator *iter)
2754{
2755        struct ring_buffer_iter *buf_iter;
2756        int cpu;
2757
2758        /* If we are looking at one CPU buffer, only check that one */
2759        if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2760                cpu = iter->cpu_file;
2761                buf_iter = trace_buffer_iter(iter, cpu);
2762                if (buf_iter) {
2763                        if (!ring_buffer_iter_empty(buf_iter))
2764                                return 0;
2765                } else {
2766                        if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2767                                return 0;
2768                }
2769                return 1;
2770        }
2771
2772        for_each_tracing_cpu(cpu) {
2773                buf_iter = trace_buffer_iter(iter, cpu);
2774                if (buf_iter) {
2775                        if (!ring_buffer_iter_empty(buf_iter))
2776                                return 0;
2777                } else {
2778                        if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2779                                return 0;
2780                }
2781        }
2782
2783        return 1;
2784}
2785
2786/*  Called with trace_event_read_lock() held. */
2787enum print_line_t print_trace_line(struct trace_iterator *iter)
2788{
2789        enum print_line_t ret;
2790
2791        if (iter->lost_events) {
2792                trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2793                                 iter->cpu, iter->lost_events);
2794                if (trace_seq_has_overflowed(&iter->seq))
2795                        return TRACE_TYPE_PARTIAL_LINE;
2796        }
2797
2798        if (iter->trace && iter->trace->print_line) {
2799                ret = iter->trace->print_line(iter);
2800                if (ret != TRACE_TYPE_UNHANDLED)
2801                        return ret;
2802        }
2803
2804        if (iter->ent->type == TRACE_BPUTS &&
2805                        trace_flags & TRACE_ITER_PRINTK &&
2806                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2807                return trace_print_bputs_msg_only(iter);
2808
2809        if (iter->ent->type == TRACE_BPRINT &&
2810                        trace_flags & TRACE_ITER_PRINTK &&
2811                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2812                return trace_print_bprintk_msg_only(iter);
2813
2814        if (iter->ent->type == TRACE_PRINT &&
2815                        trace_flags & TRACE_ITER_PRINTK &&
2816                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2817                return trace_print_printk_msg_only(iter);
2818
2819        if (trace_flags & TRACE_ITER_BIN)
2820                return print_bin_fmt(iter);
2821
2822        if (trace_flags & TRACE_ITER_HEX)
2823                return print_hex_fmt(iter);
2824
2825        if (trace_flags & TRACE_ITER_RAW)
2826                return print_raw_fmt(iter);
2827
2828        return print_trace_fmt(iter);
2829}
2830
2831void trace_latency_header(struct seq_file *m)
2832{
2833        struct trace_iterator *iter = m->private;
2834
2835        /* print nothing if the buffers are empty */
2836        if (trace_empty(iter))
2837                return;
2838
2839        if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2840                print_trace_header(m, iter);
2841
2842        if (!(trace_flags & TRACE_ITER_VERBOSE))
2843                print_lat_help_header(m);
2844}
2845
2846void trace_default_header(struct seq_file *m)
2847{
2848        struct trace_iterator *iter = m->private;
2849
2850        if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2851                return;
2852
2853        if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2854                /* print nothing if the buffers are empty */
2855                if (trace_empty(iter))
2856                        return;
2857                print_trace_header(m, iter);
2858                if (!(trace_flags & TRACE_ITER_VERBOSE))
2859                        print_lat_help_header(m);
2860        } else {
2861                if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2862                        if (trace_flags & TRACE_ITER_IRQ_INFO)
2863                                print_func_help_header_irq(iter->trace_buffer, m);
2864                        else
2865                                print_func_help_header(iter->trace_buffer, m);
2866                }
2867        }
2868}
2869
2870static void test_ftrace_alive(struct seq_file *m)
2871{
2872        if (!ftrace_is_dead())
2873                return;
2874        seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2875                    "#          MAY BE MISSING FUNCTION EVENTS\n");
2876}
2877
2878#ifdef CONFIG_TRACER_MAX_TRACE
2879static void show_snapshot_main_help(struct seq_file *m)
2880{
2881        seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2882                    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2883                    "#                      Takes a snapshot of the main buffer.\n"
2884                    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2885                    "#                      (Doesn't have to be '2' works with any number that\n"
2886                    "#                       is not a '0' or '1')\n");
2887}
2888
2889static void show_snapshot_percpu_help(struct seq_file *m)
2890{
2891        seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2892#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2893        seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894                    "#                      Takes a snapshot of the main buffer for this cpu.\n");
2895#else
2896        seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2897                    "#                     Must use main snapshot file to allocate.\n");
2898#endif
2899        seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2900                    "#                      (Doesn't have to be '2' works with any number that\n"
2901                    "#                       is not a '0' or '1')\n");
2902}
2903
2904static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2905{
2906        if (iter->tr->allocated_snapshot)
2907                seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2908        else
2909                seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2910
2911        seq_puts(m, "# Snapshot commands:\n");
2912        if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2913                show_snapshot_main_help(m);
2914        else
2915                show_snapshot_percpu_help(m);
2916}
2917#else
2918/* Should never be called */
2919static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2920#endif
2921
2922static int s_show(struct seq_file *m, void *v)
2923{
2924        struct trace_iterator *iter = v;
2925        int ret;
2926
2927        if (iter->ent == NULL) {
2928                if (iter->tr) {
2929                        seq_printf(m, "# tracer: %s\n", iter->trace->name);
2930                        seq_puts(m, "#\n");
2931                        test_ftrace_alive(m);
2932                }
2933                if (iter->snapshot && trace_empty(iter))
2934                        print_snapshot_help(m, iter);
2935                else if (iter->trace && iter->trace->print_header)
2936                        iter->trace->print_header(m);
2937                else
2938                        trace_default_header(m);
2939
2940        } else if (iter->leftover) {
2941                /*
2942                 * If we filled the seq_file buffer earlier, we
2943                 * want to just show it now.
2944                 */
2945                ret = trace_print_seq(m, &iter->seq);
2946
2947                /* ret should this time be zero, but you never know */
2948                iter->leftover = ret;
2949
2950        } else {
2951                print_trace_line(iter);
2952                ret = trace_print_seq(m, &iter->seq);
2953                /*
2954                 * If we overflow the seq_file buffer, then it will
2955                 * ask us for this data again at start up.
2956                 * Use that instead.
2957                 *  ret is 0 if seq_file write succeeded.
2958                 *        -1 otherwise.
2959                 */
2960                iter->leftover = ret;
2961        }
2962
2963        return 0;
2964}
2965
2966/*
2967 * Should be used after trace_array_get(), trace_types_lock
2968 * ensures that i_cdev was already initialized.
2969 */
2970static inline int tracing_get_cpu(struct inode *inode)
2971{
2972        if (inode->i_cdev) /* See trace_create_cpu_file() */
2973                return (long)inode->i_cdev - 1;
2974        return RING_BUFFER_ALL_CPUS;
2975}
2976
2977static const struct seq_operations tracer_seq_ops = {
2978        .start          = s_start,
2979        .next           = s_next,
2980        .stop           = s_stop,
2981        .show           = s_show,
2982};
2983
2984static struct trace_iterator *
2985__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2986{
2987        struct trace_array *tr = inode->i_private;
2988        struct trace_iterator *iter;
2989        int cpu;
2990
2991        if (tracing_disabled)
2992                return ERR_PTR(-ENODEV);
2993
2994        iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2995        if (!iter)
2996                return ERR_PTR(-ENOMEM);
2997
2998        iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2999                                    GFP_KERNEL);
3000        if (!iter->buffer_iter)
3001                goto release;
3002
3003        /*
3004         * We make a copy of the current tracer to avoid concurrent
3005         * changes on it while we are reading.
3006         */
3007        mutex_lock(&trace_types_lock);
3008        iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3009        if (!iter->trace)
3010                goto fail;
3011
3012        *iter->trace = *tr->current_trace;
3013
3014        if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3015                goto fail;
3016
3017        iter->tr = tr;
3018
3019#ifdef CONFIG_TRACER_MAX_TRACE
3020        /* Currently only the top directory has a snapshot */
3021        if (tr->current_trace->print_max || snapshot)
3022                iter->trace_buffer = &tr->max_buffer;
3023        else
3024#endif
3025                iter->trace_buffer = &tr->trace_buffer;
3026        iter->snapshot = snapshot;
3027        iter->pos = -1;
3028        iter->cpu_file = tracing_get_cpu(inode);
3029        mutex_init(&iter->mutex);
3030
3031        /* Notify the tracer early; before we stop tracing. */
3032        if (iter->trace && iter->trace->open)
3033                iter->trace->open(iter);
3034
3035        /* Annotate start of buffers if we had overruns */
3036        if (ring_buffer_overruns(iter->trace_buffer->buffer))
3037                iter->iter_flags |= TRACE_FILE_ANNOTATE;
3038
3039        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3040        if (trace_clocks[tr->clock_id].in_ns)
3041                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3042
3043        /* stop the trace while dumping if we are not opening "snapshot" */
3044        if (!iter->snapshot)
3045                tracing_stop_tr(tr);
3046
3047        if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3048                for_each_tracing_cpu(cpu) {
3049                        iter->buffer_iter[cpu] =
3050                                ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3051                }
3052                ring_buffer_read_prepare_sync();
3053                for_each_tracing_cpu(cpu) {
3054                        ring_buffer_read_start(iter->buffer_iter[cpu]);
3055                        tracing_iter_reset(iter, cpu);
3056                }
3057        } else {
3058                cpu = iter->cpu_file;
3059                iter->buffer_iter[cpu] =
3060                        ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3061                ring_buffer_read_prepare_sync();
3062                ring_buffer_read_start(iter->buffer_iter[cpu]);
3063                tracing_iter_reset(iter, cpu);
3064        }
3065
3066        mutex_unlock(&trace_types_lock);
3067
3068        return iter;
3069
3070 fail:
3071        mutex_unlock(&trace_types_lock);
3072        kfree(iter->trace);
3073        kfree(iter->buffer_iter);
3074release:
3075        seq_release_private(inode, file);
3076        return ERR_PTR(-ENOMEM);
3077}
3078
3079int tracing_open_generic(struct inode *inode, struct file *filp)
3080{
3081        if (tracing_disabled)
3082                return -ENODEV;
3083
3084        filp->private_data = inode->i_private;
3085        return 0;
3086}
3087
3088bool tracing_is_disabled(void)
3089{
3090        return (tracing_disabled) ? true: false;
3091}
3092
3093/*
3094 * Open and update trace_array ref count.
3095 * Must have the current trace_array passed to it.
3096 */
3097static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3098{
3099        struct trace_array *tr = inode->i_private;
3100
3101        if (tracing_disabled)
3102                return -ENODEV;
3103
3104        if (trace_array_get(tr) < 0)
3105                return -ENODEV;
3106
3107        filp->private_data = inode->i_private;
3108
3109        return 0;
3110}
3111
3112static int tracing_release(struct inode *inode, struct file *file)
3113{
3114        struct trace_array *tr = inode->i_private;
3115        struct seq_file *m = file->private_data;
3116        struct trace_iterator *iter;
3117        int cpu;
3118
3119        if (!(file->f_mode & FMODE_READ)) {
3120                trace_array_put(tr);
3121                return 0;
3122        }
3123
3124        /* Writes do not use seq_file */
3125        iter = m->private;
3126        mutex_lock(&trace_types_lock);
3127
3128        for_each_tracing_cpu(cpu) {
3129                if (iter->buffer_iter[cpu])
3130                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
3131        }
3132
3133        if (iter->trace && iter->trace->close)
3134                iter->trace->close(iter);
3135
3136        if (!iter->snapshot)
3137                /* reenable tracing if it was previously enabled */
3138                tracing_start_tr(tr);
3139
3140        __trace_array_put(tr);
3141
3142        mutex_unlock(&trace_types_lock);
3143
3144        mutex_destroy(&iter->mutex);
3145        free_cpumask_var(iter->started);
3146        kfree(iter->trace);
3147        kfree(iter->buffer_iter);
3148        seq_release_private(inode, file);
3149
3150        return 0;
3151}
3152
3153static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3154{
3155        struct trace_array *tr = inode->i_private;
3156
3157        trace_array_put(tr);
3158        return 0;
3159}
3160
3161static int tracing_single_release_tr(struct inode *inode, struct file *file)
3162{
3163        struct trace_array *tr = inode->i_private;
3164
3165        trace_array_put(tr);
3166
3167        return single_release(inode, file);
3168}
3169
3170static int tracing_open(struct inode *inode, struct file *file)
3171{
3172        struct trace_array *tr = inode->i_private;
3173        struct trace_iterator *iter;
3174        int ret = 0;
3175
3176        if (trace_array_get(tr) < 0)
3177                return -ENODEV;
3178
3179        /* If this file was open for write, then erase contents */
3180        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3181                int cpu = tracing_get_cpu(inode);
3182
3183                if (cpu == RING_BUFFER_ALL_CPUS)
3184                        tracing_reset_online_cpus(&tr->trace_buffer);
3185                else
3186                        tracing_reset(&tr->trace_buffer, cpu);
3187        }
3188
3189        if (file->f_mode & FMODE_READ) {
3190                iter = __tracing_open(inode, file, false);
3191                if (IS_ERR(iter))
3192                        ret = PTR_ERR(iter);
3193                else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3194                        iter->iter_flags |= TRACE_FILE_LAT_FMT;
3195        }
3196
3197        if (ret < 0)
3198                trace_array_put(tr);
3199
3200        return ret;
3201}
3202
3203/*
3204 * Some tracers are not suitable for instance buffers.
3205 * A tracer is always available for the global array (toplevel)
3206 * or if it explicitly states that it is.
3207 */
3208static bool
3209trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3210{
3211        return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3212}
3213
3214/* Find the next tracer that this trace array may use */
3215static struct tracer *
3216get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3217{
3218        while (t && !trace_ok_for_array(t, tr))
3219                t = t->next;
3220
3221        return t;
3222}
3223
3224static void *
3225t_next(struct seq_file *m, void *v, loff_t *pos)
3226{
3227        struct trace_array *tr = m->private;
3228        struct tracer *t = v;
3229
3230        (*pos)++;
3231
3232        if (t)
3233                t = get_tracer_for_array(tr, t->next);
3234
3235        return t;
3236}
3237
3238static void *t_start(struct seq_file *m, loff_t *pos)
3239{
3240        struct trace_array *tr = m->private;
3241        struct tracer *t;
3242        loff_t l = 0;
3243
3244        mutex_lock(&trace_types_lock);
3245
3246        t = get_tracer_for_array(tr, trace_types);
3247        for (; t && l < *pos; t = t_next(m, t, &l))
3248                        ;
3249
3250        return t;
3251}
3252
3253static void t_stop(struct seq_file *m, void *p)
3254{
3255        mutex_unlock(&trace_types_lock);
3256}
3257
3258static int t_show(struct seq_file *m, void *v)
3259{
3260        struct tracer *t = v;
3261
3262        if (!t)
3263                return 0;
3264
3265        seq_puts(m, t->name);
3266        if (t->next)
3267                seq_putc(m, ' ');
3268        else
3269                seq_putc(m, '\n');
3270
3271        return 0;
3272}
3273
3274static const struct seq_operations show_traces_seq_ops = {
3275        .start          = t_start,
3276        .next           = t_next,
3277        .stop           = t_stop,
3278        .show           = t_show,
3279};
3280
3281static int show_traces_open(struct inode *inode, struct file *file)
3282{
3283        struct trace_array *tr = inode->i_private;
3284        struct seq_file *m;
3285        int ret;
3286
3287        if (tracing_disabled)
3288                return -ENODEV;
3289
3290        ret = seq_open(file, &show_traces_seq_ops);
3291        if (ret)
3292                return ret;
3293
3294        m = file->private_data;
3295        m->private = tr;
3296
3297        return 0;
3298}
3299
3300static ssize_t
3301tracing_write_stub(struct file *filp, const char __user *ubuf,
3302                   size_t count, loff_t *ppos)
3303{
3304        return count;
3305}
3306
3307loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3308{
3309        int ret;
3310
3311        if (file->f_mode & FMODE_READ)
3312                ret = seq_lseek(file, offset, whence);
3313        else
3314                file->f_pos = ret = 0;
3315
3316        return ret;
3317}
3318
3319static const struct file_operations tracing_fops = {
3320        .open           = tracing_open,
3321        .read           = seq_read,
3322        .write          = tracing_write_stub,
3323        .llseek         = tracing_lseek,
3324        .release        = tracing_release,
3325};
3326
3327static const struct file_operations show_traces_fops = {
3328        .open           = show_traces_open,
3329        .read           = seq_read,
3330        .release        = seq_release,
3331        .llseek         = seq_lseek,
3332};
3333
3334/*
3335 * The tracer itself will not take this lock, but still we want
3336 * to provide a consistent cpumask to user-space:
3337 */
3338static DEFINE_MUTEX(tracing_cpumask_update_lock);
3339
3340/*
3341 * Temporary storage for the character representation of the
3342 * CPU bitmask (and one more byte for the newline):
3343 */
3344static char mask_str[NR_CPUS + 1];
3345
3346static ssize_t
3347tracing_cpumask_read(struct file *filp, char __user *ubuf,
3348                     size_t count, loff_t *ppos)
3349{
3350        struct trace_array *tr = file_inode(filp)->i_private;
3351        int len;
3352
3353        mutex_lock(&tracing_cpumask_update_lock);
3354
3355        len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3356        if (count - len < 2) {
3357                count = -EINVAL;
3358                goto out_err;
3359        }
3360        len += sprintf(mask_str + len, "\n");
3361        count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3362
3363out_err:
3364        mutex_unlock(&tracing_cpumask_update_lock);
3365
3366        return count;
3367}
3368
3369static ssize_t
3370tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3371                      size_t count, loff_t *ppos)
3372{
3373        struct trace_array *tr = file_inode(filp)->i_private;
3374        cpumask_var_t tracing_cpumask_new;
3375        int err, cpu;
3376
3377        if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3378                return -ENOMEM;
3379
3380        err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3381        if (err)
3382                goto err_unlock;
3383
3384        mutex_lock(&tracing_cpumask_update_lock);
3385
3386        local_irq_disable();
3387        arch_spin_lock(&tr->max_lock);
3388        for_each_tracing_cpu(cpu) {
3389                /*
3390                 * Increase/decrease the disabled counter if we are
3391                 * about to flip a bit in the cpumask:
3392                 */
3393                if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3394                                !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3395                        atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3396                        ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3397                }
3398                if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3399                                cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3400                        atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3401                        ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3402                }
3403        }
3404        arch_spin_unlock(&tr->max_lock);
3405        local_irq_enable();
3406
3407        cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3408
3409        mutex_unlock(&tracing_cpumask_update_lock);
3410        free_cpumask_var(tracing_cpumask_new);
3411
3412        return count;
3413
3414err_unlock:
3415        free_cpumask_var(tracing_cpumask_new);
3416
3417        return err;
3418}
3419
3420static const struct file_operations tracing_cpumask_fops = {
3421        .open           = tracing_open_generic_tr,
3422        .read           = tracing_cpumask_read,
3423        .write          = tracing_cpumask_write,
3424        .release        = tracing_release_generic_tr,
3425        .llseek         = generic_file_llseek,
3426};
3427
3428static int tracing_trace_options_show(struct seq_file *m, void *v)
3429{
3430        struct tracer_opt *trace_opts;
3431        struct trace_array *tr = m->private;
3432        u32 tracer_flags;
3433        int i;
3434
3435        mutex_lock(&trace_types_lock);
3436        tracer_flags = tr->current_trace->flags->val;
3437        trace_opts = tr->current_trace->flags->opts;
3438
3439        for (i = 0; trace_options[i]; i++) {
3440                if (trace_flags & (1 << i))
3441                        seq_printf(m, "%s\n", trace_options[i]);
3442                else
3443                        seq_printf(m, "no%s\n", trace_options[i]);
3444        }
3445
3446        for (i = 0; trace_opts[i].name; i++) {
3447                if (tracer_flags & trace_opts[i].bit)
3448                        seq_printf(m, "%s\n", trace_opts[i].name);
3449                else
3450                        seq_printf(m, "no%s\n", trace_opts[i].name);
3451        }
3452        mutex_unlock(&trace_types_lock);
3453
3454        return 0;
3455}
3456
3457static int __set_tracer_option(struct trace_array *tr,
3458                               struct tracer_flags *tracer_flags,
3459                               struct tracer_opt *opts, int neg)
3460{
3461        struct tracer *trace = tr->current_trace;
3462        int ret;
3463
3464        ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3465        if (ret)
3466                return ret;
3467
3468        if (neg)
3469                tracer_flags->val &= ~opts->bit;
3470        else
3471                tracer_flags->val |= opts->bit;
3472        return 0;
3473}
3474
3475/* Try to assign a tracer specific option */
3476static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3477{
3478        struct tracer *trace = tr->current_trace;
3479        struct tracer_flags *tracer_flags = trace->flags;
3480        struct tracer_opt *opts = NULL;
3481        int i;
3482
3483        for (i = 0; tracer_flags->opts[i].name; i++) {
3484                opts = &tracer_flags->opts[i];
3485
3486                if (strcmp(cmp, opts->name) == 0)
3487                        return __set_tracer_option(tr, trace->flags, opts, neg);
3488        }
3489
3490        return -EINVAL;
3491}
3492
3493/* Some tracers require overwrite to stay enabled */
3494int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3495{
3496        if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3497                return -1;
3498
3499        return 0;
3500}
3501
3502int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3503{
3504        /* do nothing if flag is already set */
3505        if (!!(trace_flags & mask) == !!enabled)
3506                return 0;
3507
3508        /* Give the tracer a chance to approve the change */
3509        if (tr->current_trace->flag_changed)
3510                if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3511                        return -EINVAL;
3512
3513        if (enabled)
3514                trace_flags |= mask;
3515        else
3516                trace_flags &= ~mask;
3517
3518        if (mask == TRACE_ITER_RECORD_CMD)
3519                trace_event_enable_cmd_record(enabled);
3520
3521        if (mask == TRACE_ITER_OVERWRITE) {
3522                ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3523#ifdef CONFIG_TRACER_MAX_TRACE
3524                ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3525#endif
3526        }
3527
3528        if (mask == TRACE_ITER_PRINTK)
3529                trace_printk_start_stop_comm(enabled);
3530
3531        return 0;
3532}
3533
3534static int trace_set_options(struct trace_array *tr, char *option)
3535{
3536        char *cmp;
3537        int neg = 0;
3538        int ret = -ENODEV;
3539        int i;
3540
3541        cmp = strstrip(option);
3542
3543        if (strncmp(cmp, "no", 2) == 0) {
3544                neg = 1;
3545                cmp += 2;
3546        }
3547
3548        mutex_lock(&trace_types_lock);
3549
3550        for (i = 0; trace_options[i]; i++) {
3551                if (strcmp(cmp, trace_options[i]) == 0) {
3552                        ret = set_tracer_flag(tr, 1 << i, !neg);
3553                        break;
3554                }
3555        }
3556
3557        /* If no option could be set, test the specific tracer options */
3558        if (!trace_options[i])
3559                ret = set_tracer_option(tr, cmp, neg);
3560
3561        mutex_unlock(&trace_types_lock);
3562
3563        return ret;
3564}
3565
3566static ssize_t
3567tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3568                        size_t cnt, loff_t *ppos)
3569{
3570        struct seq_file *m = filp->private_data;
3571        struct trace_array *tr = m->private;
3572        char buf[64];
3573        int ret;
3574
3575        if (cnt >= sizeof(buf))
3576                return -EINVAL;
3577
3578        if (copy_from_user(&buf, ubuf, cnt))
3579                return -EFAULT;
3580
3581        buf[cnt] = 0;
3582
3583        ret = trace_set_options(tr, buf);
3584        if (ret < 0)
3585                return ret;
3586
3587        *ppos += cnt;
3588
3589        return cnt;
3590}
3591
3592static int tracing_trace_options_open(struct inode *inode, struct file *file)
3593{
3594        struct trace_array *tr = inode->i_private;
3595        int ret;
3596
3597        if (tracing_disabled)
3598                return -ENODEV;
3599
3600        if (trace_array_get(tr) < 0)
3601                return -ENODEV;
3602
3603        ret = single_open(file, tracing_trace_options_show, inode->i_private);
3604        if (ret < 0)
3605                trace_array_put(tr);
3606
3607        return ret;
3608}
3609
3610static const struct file_operations tracing_iter_fops = {
3611        .open           = tracing_trace_options_open,
3612        .read           = seq_read,
3613        .llseek         = seq_lseek,
3614        .release        = tracing_single_release_tr,
3615        .write          = tracing_trace_options_write,
3616};
3617
3618static const char readme_msg[] =
3619        "tracing mini-HOWTO:\n\n"
3620        "# echo 0 > tracing_on : quick way to disable tracing\n"
3621        "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3622        " Important files:\n"
3623        "  trace\t\t\t- The static contents of the buffer\n"
3624        "\t\t\t  To clear the buffer write into this file: echo > trace\n"
3625        "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3626        "  current_tracer\t- function and latency tracers\n"
3627        "  available_tracers\t- list of configured tracers for current_tracer\n"
3628        "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3629        "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3630        "  trace_clock\t\t-change the clock used to order events\n"
3631        "       local:   Per cpu clock but may not be synced across CPUs\n"
3632        "      global:   Synced across CPUs but slows tracing down.\n"
3633        "     counter:   Not a clock, but just an increment\n"
3634        "      uptime:   Jiffy counter from time of boot\n"
3635        "        perf:   Same clock that perf events use\n"
3636#ifdef CONFIG_X86_64
3637        "     x86-tsc:   TSC cycle counter\n"
3638#endif
3639        "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3640        "  tracing_cpumask\t- Limit which CPUs to trace\n"
3641        "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3642        "\t\t\t  Remove sub-buffer with rmdir\n"
3643        "  trace_options\t\t- Set format or modify how tracing happens\n"
3644        "\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3645        "\t\t\t  option name\n"
3646        "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3647#ifdef CONFIG_DYNAMIC_FTRACE
3648        "\n  available_filter_functions - list of functions that can be filtered on\n"
3649        "  set_ftrace_filter\t- echo function name in here to only trace these\n"
3650        "\t\t\t  functions\n"
3651        "\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3652        "\t     modules: Can select a group via module\n"
3653        "\t      Format: :mod:<module-name>\n"
3654        "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3655        "\t    triggers: a command to perform when function is hit\n"
3656        "\t      Format: <function>:<trigger>[:count]\n"
3657        "\t     trigger: traceon, traceoff\n"
3658        "\t\t      enable_event:<system>:<event>\n"
3659        "\t\t      disable_event:<system>:<event>\n"
3660#ifdef CONFIG_STACKTRACE
3661        "\t\t      stacktrace\n"
3662#endif
3663#ifdef CONFIG_TRACER_SNAPSHOT
3664        "\t\t      snapshot\n"
3665#endif
3666        "\t\t      dump\n"
3667        "\t\t      cpudump\n"
3668        "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3669        "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3670        "\t     The first one will disable tracing every time do_fault is hit\n"
3671        "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3672        "\t       The first time do trap is hit and it disables tracing, the\n"
3673        "\t       counter will decrement to 2. If tracing is already disabled,\n"
3674        "\t       the counter will not decrement. It only decrements when the\n"
3675        "\t       trigger did work\n"
3676        "\t     To remove trigger without count:\n"
3677        "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3678        "\t     To remove trigger with a count:\n"
3679        "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3680        "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3681        "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682        "\t    modules: Can select a group via module command :mod:\n"
3683        "\t    Does not accept triggers\n"
3684#endif /* CONFIG_DYNAMIC_FTRACE */
3685#ifdef CONFIG_FUNCTION_TRACER
3686        "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3687        "\t\t    (function)\n"
3688#endif
3689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690        "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3691        "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3692        "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3693#endif
3694#ifdef CONFIG_TRACER_SNAPSHOT
3695        "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3696        "\t\t\t  snapshot buffer. Read the contents for more\n"
3697        "\t\t\t  information\n"
3698#endif
3699#ifdef CONFIG_STACK_TRACER
3700        "  stack_trace\t\t- Shows the max stack trace when active\n"
3701        "  stack_max_size\t- Shows current max stack size that was traced\n"
3702        "\t\t\t  Write into this file to reset the max size (trigger a\n"
3703        "\t\t\t  new trace)\n"
3704#ifdef CONFIG_DYNAMIC_FTRACE
3705        "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706        "\t\t\t  traces\n"
3707#endif
3708#endif /* CONFIG_STACK_TRACER */
3709        "  events/\t\t- Directory containing all trace event subsystems:\n"
3710        "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3711        "  events/<system>/\t- Directory containing all trace events for <system>:\n"
3712        "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3713        "\t\t\t  events\n"
3714        "      filter\t\t- If set, only events passing filter are traced\n"
3715        "  events/<system>/<event>/\t- Directory containing control files for\n"
3716        "\t\t\t  <event>:\n"
3717        "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3718        "      filter\t\t- If set, only events passing filter are traced\n"
3719        "      trigger\t\t- If set, a command to perform when event is hit\n"
3720        "\t    Format: <trigger>[:count][if <filter>]\n"
3721        "\t   trigger: traceon, traceoff\n"
3722        "\t            enable_event:<system>:<event>\n"
3723        "\t            disable_event:<system>:<event>\n"
3724#ifdef CONFIG_STACKTRACE
3725        "\t\t    stacktrace\n"
3726#endif
3727#ifdef CONFIG_TRACER_SNAPSHOT
3728        "\t\t    snapshot\n"
3729#endif
3730        "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3731        "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3732        "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3733        "\t                  events/block/block_unplug/trigger\n"
3734        "\t   The first disables tracing every time block_unplug is hit.\n"
3735        "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3736        "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3737        "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3738        "\t   Like function triggers, the counter is only decremented if it\n"
3739        "\t    enabled or disabled tracing.\n"
3740        "\t   To remove a trigger without a count:\n"
3741        "\t     echo '!<trigger> > <system>/<event>/trigger\n"
3742        "\t   To remove a trigger with a count:\n"
3743        "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3744        "\t   Filters can be ignored when removing a trigger.\n"
3745;
3746
3747static ssize_t
3748tracing_readme_read(struct file *filp, char __user *ubuf,
3749                       size_t cnt, loff_t *ppos)
3750{
3751        return simple_read_from_buffer(ubuf, cnt, ppos,
3752                                        readme_msg, strlen(readme_msg));
3753}
3754
3755static const struct file_operations tracing_readme_fops = {
3756        .open           = tracing_open_generic,
3757        .read           = tracing_readme_read,
3758        .llseek         = generic_file_llseek,
3759};
3760
3761static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3762{
3763        unsigned int *ptr = v;
3764
3765        if (*pos || m->count)
3766                ptr++;
3767
3768        (*pos)++;
3769
3770        for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3771             ptr++) {
3772                if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3773                        continue;
3774
3775                return ptr;
3776        }
3777
3778        return NULL;
3779}
3780
3781static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3782{
3783        void *v;
3784        loff_t l = 0;
3785
3786        preempt_disable();
3787        arch_spin_lock(&trace_cmdline_lock);
3788
3789        v = &savedcmd->map_cmdline_to_pid[0];
3790        while (l <= *pos) {
3791                v = saved_cmdlines_next(m, v, &l);
3792                if (!v)
3793                        return NULL;
3794        }
3795
3796        return v;
3797}
3798
3799static void saved_cmdlines_stop(struct seq_file *m, void *v)
3800{
3801        arch_spin_unlock(&trace_cmdline_lock);
3802        preempt_enable();
3803}
3804
3805static int saved_cmdlines_show(struct seq_file *m, void *v)
3806{
3807        char buf[TASK_COMM_LEN];
3808        unsigned int *pid = v;
3809
3810        __trace_find_cmdline(*pid, buf);
3811        seq_printf(m, "%d %s\n", *pid, buf);
3812        return 0;
3813}
3814
3815static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3816        .start          = saved_cmdlines_start,
3817        .next           = saved_cmdlines_next,
3818        .stop           = saved_cmdlines_stop,
3819        .show           = saved_cmdlines_show,
3820};
3821
3822static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3823{
3824        if (tracing_disabled)
3825                return -ENODEV;
3826
3827        return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3828}
3829
3830static const struct file_operations tracing_saved_cmdlines_fops = {
3831        .open           = tracing_saved_cmdlines_open,
3832        .read           = seq_read,
3833        .llseek         = seq_lseek,
3834        .release        = seq_release,
3835};
3836
3837static ssize_t
3838tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3839                                 size_t cnt, loff_t *ppos)
3840{
3841        char buf[64];
3842        int r;
3843
3844        arch_spin_lock(&trace_cmdline_lock);
3845        r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3846        arch_spin_unlock(&trace_cmdline_lock);
3847
3848        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3849}
3850
3851static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3852{
3853        kfree(s->saved_cmdlines);
3854        kfree(s->map_cmdline_to_pid);
3855        kfree(s);
3856}
3857
3858static int tracing_resize_saved_cmdlines(unsigned int val)
3859{
3860        struct saved_cmdlines_buffer *s, *savedcmd_temp;
3861
3862        s = kmalloc(sizeof(*s), GFP_KERNEL);
3863        if (!s)
3864                return -ENOMEM;
3865
3866        if (allocate_cmdlines_buffer(val, s) < 0) {
3867                kfree(s);
3868                return -ENOMEM;
3869        }
3870
3871        arch_spin_lock(&trace_cmdline_lock);
3872        savedcmd_temp = savedcmd;
3873        savedcmd = s;
3874        arch_spin_unlock(&trace_cmdline_lock);
3875        free_saved_cmdlines_buffer(savedcmd_temp);
3876
3877        return 0;
3878}
3879
3880static ssize_t
3881tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3882                                  size_t cnt, loff_t *ppos)
3883{
3884        unsigned long val;
3885        int ret;
3886
3887        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3888        if (ret)
3889                return ret;
3890
3891        /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3892        if (!val || val > PID_MAX_DEFAULT)
3893                return -EINVAL;
3894
3895        ret = tracing_resize_saved_cmdlines((unsigned int)val);
3896        if (ret < 0)
3897                return ret;
3898
3899        *ppos += cnt;
3900
3901        return cnt;
3902}
3903
3904static const struct file_operations tracing_saved_cmdlines_size_fops = {
3905        .open           = tracing_open_generic,
3906        .read           = tracing_saved_cmdlines_size_read,
3907        .write          = tracing_saved_cmdlines_size_write,
3908};
3909
3910static ssize_t
3911tracing_set_trace_read(struct file *filp, char __user *ubuf,
3912                       size_t cnt, loff_t *ppos)
3913{
3914        struct trace_array *tr = filp->private_data;
3915        char buf[MAX_TRACER_SIZE+2];
3916        int r;
3917
3918        mutex_lock(&trace_types_lock);
3919        r = sprintf(buf, "%s\n", tr->current_trace->name);
3920        mutex_unlock(&trace_types_lock);
3921
3922        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3923}
3924
3925int tracer_init(struct tracer *t, struct trace_array *tr)
3926{
3927        tracing_reset_online_cpus(&tr->trace_buffer);
3928        return t->init(tr);
3929}
3930
3931static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3932{
3933        int cpu;
3934
3935        for_each_tracing_cpu(cpu)
3936                per_cpu_ptr(buf->data, cpu)->entries = val;
3937}
3938
3939#ifdef CONFIG_TRACER_MAX_TRACE
3940/* resize @tr's buffer to the size of @size_tr's entries */
3941static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3942                                        struct trace_buffer *size_buf, int cpu_id)
3943{
3944        int cpu, ret = 0;
3945
3946        if (cpu_id == RING_BUFFER_ALL_CPUS) {
3947                for_each_tracing_cpu(cpu) {
3948                        ret = ring_buffer_resize(trace_buf->buffer,
3949                                 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3950                        if (ret < 0)
3951                                break;
3952                        per_cpu_ptr(trace_buf->data, cpu)->entries =
3953                                per_cpu_ptr(size_buf->data, cpu)->entries;
3954                }
3955        } else {
3956                ret = ring_buffer_resize(trace_buf->buffer,
3957                                 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3958                if (ret == 0)
3959                        per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3960                                per_cpu_ptr(size_buf->data, cpu_id)->entries;
3961        }
3962
3963        return ret;
3964}
3965#endif /* CONFIG_TRACER_MAX_TRACE */
3966
3967static int __tracing_resize_ring_buffer(struct trace_array *tr,
3968                                        unsigned long size, int cpu)
3969{
3970        int ret;
3971
3972        /*
3973         * If kernel or user changes the size of the ring buffer
3974         * we use the size that was given, and we can forget about
3975         * expanding it later.
3976         */
3977        ring_buffer_expanded = true;
3978
3979        /* May be called before buffers are initialized */
3980        if (!tr->trace_buffer.buffer)
3981                return 0;
3982
3983        ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3984        if (ret < 0)
3985                return ret;
3986
3987#ifdef CONFIG_TRACER_MAX_TRACE
3988        if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3989            !tr->current_trace->use_max_tr)
3990                goto out;
3991
3992        ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3993        if (ret < 0) {
3994                int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3995                                                     &tr->trace_buffer, cpu);
3996                if (r < 0) {
3997                        /*
3998                         * AARGH! We are left with different
3999                         * size max buffer!!!!
4000                         * The max buffer is our "snapshot" buffer.
4001                         * When a tracer needs a snapshot (one of the
4002                         * latency tracers), it swaps the max buffer
4003                         * with the saved snap shot. We succeeded to
4004                         * update the size of the main buffer, but failed to
4005                         * update the size of the max buffer. But when we tried
4006                         * to reset the main buffer to the original size, we
4007                         * failed there too. This is very unlikely to
4008                         * happen, but if it does, warn and kill all
4009                         * tracing.
4010                         */
4011                        WARN_ON(1);
4012                        tracing_disabled = 1;
4013                }
4014                return ret;
4015        }
4016
4017        if (cpu == RING_BUFFER_ALL_CPUS)
4018                set_buffer_entries(&tr->max_buffer, size);
4019        else
4020                per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4021
4022 out:
4023#endif /* CONFIG_TRACER_MAX_TRACE */
4024
4025        if (cpu == RING_BUFFER_ALL_CPUS)
4026                set_buffer_entries(&tr->trace_buffer, size);
4027        else
4028                per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4029
4030        return ret;
4031}
4032
4033static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4034                                          unsigned long size, int cpu_id)
4035{
4036        int ret = size;
4037
4038        mutex_lock(&trace_types_lock);
4039
4040        if (cpu_id != RING_BUFFER_ALL_CPUS) {
4041                /* make sure, this cpu is enabled in the mask */
4042                if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4043                        ret = -EINVAL;
4044                        goto out;
4045                }
4046        }
4047
4048        ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4049        if (ret < 0)
4050                ret = -ENOMEM;
4051
4052out:
4053        mutex_unlock(&trace_types_lock);
4054
4055        return ret;
4056}
4057
4058
4059/**
4060 * tracing_update_buffers - used by tracing facility to expand ring buffers
4061 *
4062 * To save on memory when the tracing is never used on a system with it
4063 * configured in. The ring buffers are set to a minimum size. But once
4064 * a user starts to use the tracing facility, then they need to grow
4065 * to their default size.
4066 *
4067 * This function is to be called when a tracer is about to be used.
4068 */
4069int tracing_update_buffers(void)
4070{
4071        int ret = 0;
4072
4073        mutex_lock(&trace_types_lock);
4074        if (!ring_buffer_expanded)
4075                ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4076                                                RING_BUFFER_ALL_CPUS);
4077        mutex_unlock(&trace_types_lock);
4078
4079        return ret;
4080}
4081
4082struct trace_option_dentry;
4083
4084static struct trace_option_dentry *
4085create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4086
4087static void
4088destroy_trace_option_files(struct trace_option_dentry *topts);
4089
4090/*
4091 * Used to clear out the tracer before deletion of an instance.
4092 * Must have trace_types_lock held.
4093 */
4094static void tracing_set_nop(struct trace_array *tr)
4095{
4096        if (tr->current_trace == &nop_trace)
4097                return;
4098        
4099        tr->current_trace->enabled--;
4100
4101        if (tr->current_trace->reset)
4102                tr->current_trace->reset(tr);
4103
4104        tr->current_trace = &nop_trace;
4105}
4106
4107static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4108{
4109        static struct trace_option_dentry *topts;
4110        struct tracer *t;
4111#ifdef CONFIG_TRACER_MAX_TRACE
4112        bool had_max_tr;
4113#endif
4114        int ret = 0;
4115
4116        mutex_lock(&trace_types_lock);
4117
4118        if (!ring_buffer_expanded) {
4119                ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4120                                                RING_BUFFER_ALL_CPUS);
4121                if (ret < 0)
4122                        goto out;
4123                ret = 0;
4124        }
4125
4126        for (t = trace_types; t; t = t->next) {
4127                if (strcmp(t->name, buf) == 0)
4128                        break;
4129        }
4130        if (!t) {
4131                ret = -EINVAL;
4132                goto out;
4133        }
4134        if (t == tr->current_trace)
4135                goto out;
4136
4137        /* Some tracers are only allowed for the top level buffer */
4138        if (!trace_ok_for_array(t, tr)) {
4139                ret = -EINVAL;
4140                goto out;
4141        }
4142
4143        trace_branch_disable();
4144
4145        tr->current_trace->enabled--;
4146
4147        if (tr->current_trace->reset)
4148                tr->current_trace->reset(tr);
4149
4150        /* Current trace needs to be nop_trace before synchronize_sched */
4151        tr->current_trace = &nop_trace;
4152
4153#ifdef CONFIG_TRACER_MAX_TRACE
4154        had_max_tr = tr->allocated_snapshot;
4155
4156        if (had_max_tr && !t->use_max_tr) {
4157                /*
4158                 * We need to make sure that the update_max_tr sees that
4159                 * current_trace changed to nop_trace to keep it from
4160                 * swapping the buffers after we resize it.
4161                 * The update_max_tr is called from interrupts disabled
4162                 * so a synchronized_sched() is sufficient.
4163                 */
4164                synchronize_sched();
4165                free_snapshot(tr);
4166        }
4167#endif
4168        /* Currently, only the top instance has options */
4169        if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4170                destroy_trace_option_files(topts);
4171                topts = create_trace_option_files(tr, t);
4172        }
4173
4174#ifdef CONFIG_TRACER_MAX_TRACE
4175        if (t->use_max_tr && !had_max_tr) {
4176                ret = alloc_snapshot(tr);
4177                if (ret < 0)
4178                        goto out;
4179        }
4180#endif
4181
4182        if (t->init) {
4183                ret = tracer_init(t, tr);
4184                if (ret)
4185                        goto out;
4186        }
4187
4188        tr->current_trace = t;
4189        tr->current_trace->enabled++;
4190        trace_branch_enable(tr);
4191 out:
4192        mutex_unlock(&trace_types_lock);
4193
4194        return ret;
4195}
4196
4197static ssize_t
4198tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4199                        size_t cnt, loff_t *ppos)
4200{
4201        struct trace_array *tr = filp->private_data;
4202        char buf[MAX_TRACER_SIZE+1];
4203        int i;
4204        size_t ret;
4205        int err;
4206
4207        ret = cnt;
4208
4209        if (cnt > MAX_TRACER_SIZE)
4210                cnt = MAX_TRACER_SIZE;
4211
4212        if (copy_from_user(&buf, ubuf, cnt))
4213                return -EFAULT;
4214
4215        buf[cnt] = 0;
4216
4217        /* strip ending whitespace. */
4218        for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4219                buf[i] = 0;
4220
4221        err = tracing_set_tracer(tr, buf);
4222        if (err)
4223                return err;
4224
4225        *ppos += ret;
4226
4227        return ret;
4228}
4229
4230static ssize_t
4231tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4232                   size_t cnt, loff_t *ppos)
4233{
4234        char buf[64];
4235        int r;
4236
4237        r = snprintf(buf, sizeof(buf), "%ld\n",
4238                     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4239        if (r > sizeof(buf))
4240                r = sizeof(buf);
4241        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4242}
4243
4244static ssize_t
4245tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4246                    size_t cnt, loff_t *ppos)
4247{
4248        unsigned long val;
4249        int ret;
4250
4251        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4252        if (ret)
4253                return ret;
4254
4255        *ptr = val * 1000;
4256
4257        return cnt;
4258}
4259
4260static ssize_t
4261tracing_thresh_read(struct file *filp, char __user *ubuf,
4262                    size_t cnt, loff_t *ppos)
4263{
4264        return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4265}
4266
4267static ssize_t
4268tracing_thresh_write(struct file *filp, const char __user *ubuf,
4269                     size_t cnt, loff_t *ppos)
4270{
4271        struct trace_array *tr = filp->private_data;
4272        int ret;
4273
4274        mutex_lock(&trace_types_lock);
4275        ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4276        if (ret < 0)
4277                goto out;
4278
4279        if (tr->current_trace->update_thresh) {
4280                ret = tr->current_trace->update_thresh(tr);
4281                if (ret < 0)
4282                        goto out;
4283        }
4284
4285        ret = cnt;
4286out:
4287        mutex_unlock(&trace_types_lock);
4288
4289        return ret;
4290}
4291
4292static ssize_t
4293tracing_max_lat_read(struct file *filp, char __user *ubuf,
4294                     size_t cnt, loff_t *ppos)
4295{
4296        return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4297}
4298
4299static ssize_t
4300tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4301                      size_t cnt, loff_t *ppos)
4302{
4303        return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4304}
4305
4306static int tracing_open_pipe(struct inode *inode, struct file *filp)
4307{
4308        struct trace_array *tr = inode->i_private;
4309        struct trace_iterator *iter;
4310        int ret = 0;
4311
4312        if (tracing_disabled)
4313                return -ENODEV;
4314
4315        if (trace_array_get(tr) < 0)
4316                return -ENODEV;
4317
4318        mutex_lock(&trace_types_lock);
4319
4320        /* create a buffer to store the information to pass to userspace */
4321        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4322        if (!iter) {
4323                ret = -ENOMEM;
4324                __trace_array_put(tr);
4325                goto out;
4326        }
4327
4328        trace_seq_init(&iter->seq);
4329
4330        /*
4331         * We make a copy of the current tracer to avoid concurrent
4332         * changes on it while we are reading.
4333         */
4334        iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4335        if (!iter->trace) {
4336                ret = -ENOMEM;
4337                goto fail;
4338        }
4339        *iter->trace = *tr->current_trace;
4340
4341        if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4342                ret = -ENOMEM;
4343                goto fail;
4344        }
4345
4346        /* trace pipe does not show start of buffer */
4347        cpumask_setall(iter->started);
4348
4349        if (trace_flags & TRACE_ITER_LATENCY_FMT)
4350                iter->iter_flags |= TRACE_FILE_LAT_FMT;
4351
4352        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4353        if (trace_clocks[tr->clock_id].in_ns)
4354                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4355
4356        iter->tr = tr;
4357        iter->trace_buffer = &tr->trace_buffer;
4358        iter->cpu_file = tracing_get_cpu(inode);
4359        mutex_init(&iter->mutex);
4360        filp->private_data = iter;
4361
4362        if (iter->trace->pipe_open)
4363                iter->trace->pipe_open(iter);
4364
4365        nonseekable_open(inode, filp);
4366out:
4367        mutex_unlock(&trace_types_lock);
4368        return ret;
4369
4370fail:
4371        kfree(iter->trace);
4372        kfree(iter);
4373        __trace_array_put(tr);
4374        mutex_unlock(&trace_types_lock);
4375        return ret;
4376}
4377
4378static int tracing_release_pipe(struct inode *inode, struct file *file)
4379{
4380        struct trace_iterator *iter = file->private_data;
4381        struct trace_array *tr = inode->i_private;
4382
4383        mutex_lock(&trace_types_lock);
4384
4385        if (iter->trace->pipe_close)
4386                iter->trace->pipe_close(iter);
4387
4388        mutex_unlock(&trace_types_lock);
4389
4390        free_cpumask_var(iter->started);
4391        mutex_destroy(&iter->mutex);
4392        kfree(iter->trace);
4393        kfree(iter);
4394
4395        trace_array_put(tr);
4396
4397        return 0;
4398}
4399
4400static unsigned int
4401trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4402{
4403        /* Iterators are static, they should be filled or empty */
4404        if (trace_buffer_iter(iter, iter->cpu_file))
4405                return POLLIN | POLLRDNORM;
4406
4407        if (trace_flags & TRACE_ITER_BLOCK)
4408                /*
4409                 * Always select as readable when in blocking mode
4410                 */
4411                return POLLIN | POLLRDNORM;
4412        else
4413                return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4414                                             filp, poll_table);
4415}
4416
4417static unsigned int
4418tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4419{
4420        struct trace_iterator *iter = filp->private_data;
4421
4422        return trace_poll(iter, filp, poll_table);
4423}
4424
4425/* Must be called with trace_types_lock mutex held. */
4426static int tracing_wait_pipe(struct file *filp)
4427{
4428        struct trace_iterator *iter = filp->private_data;
4429        int ret;
4430
4431        while (trace_empty(iter)) {
4432
4433                if ((filp->f_flags & O_NONBLOCK)) {
4434                        return -EAGAIN;
4435                }
4436
4437                /*
4438                 * We block until we read something and tracing is disabled.
4439                 * We still block if tracing is disabled, but we have never
4440                 * read anything. This allows a user to cat this file, and
4441                 * then enable tracing. But after we have read something,
4442                 * we give an EOF when tracing is again disabled.
4443                 *
4444                 * iter->pos will be 0 if we haven't read anything.
4445                 */
4446                if (!tracing_is_on() && iter->pos)
4447                        break;
4448
4449                mutex_unlock(&iter->mutex);
4450
4451                ret = wait_on_pipe(iter, false);
4452
4453                mutex_lock(&iter->mutex);
4454
4455                if (ret)
4456                        return ret;
4457        }
4458
4459        return 1;
4460}
4461
4462/*
4463 * Consumer reader.
4464 */
4465static ssize_t
4466tracing_read_pipe(struct file *filp, char __user *ubuf,
4467                  size_t cnt, loff_t *ppos)
4468{
4469        struct trace_iterator *iter = filp->private_data;
4470        struct trace_array *tr = iter->tr;
4471        ssize_t sret;
4472
4473        /* return any leftover data */
4474        sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4475        if (sret != -EBUSY)
4476                return sret;
4477
4478        trace_seq_init(&iter->seq);
4479
4480        /* copy the tracer to avoid using a global lock all around */
4481        mutex_lock(&trace_types_lock);
4482        if (unlikely(iter->trace->name != tr->current_trace->name))
4483                *iter->trace = *tr->current_trace;
4484        mutex_unlock(&trace_types_lock);
4485
4486        /*
4487         * Avoid more than one consumer on a single file descriptor
4488         * This is just a matter of traces coherency, the ring buffer itself
4489         * is protected.
4490         */
4491        mutex_lock(&iter->mutex);
4492        if (iter->trace->read) {
4493                sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4494                if (sret)
4495                        goto out;
4496        }
4497
4498waitagain:
4499        sret = tracing_wait_pipe(filp);
4500        if (sret <= 0)
4501                goto out;
4502
4503        /* stop when tracing is finished */
4504        if (trace_empty(iter)) {
4505                sret = 0;
4506                goto out;
4507        }
4508
4509        if (cnt >= PAGE_SIZE)
4510                cnt = PAGE_SIZE - 1;
4511
4512        /* reset all but tr, trace, and overruns */
4513        memset(&iter->seq, 0,
4514               sizeof(struct trace_iterator) -
4515               offsetof(struct trace_iterator, seq));
4516        cpumask_clear(iter->started);
4517        iter->pos = -1;
4518
4519        trace_event_read_lock();
4520        trace_access_lock(iter->cpu_file);
4521        while (trace_find_next_entry_inc(iter) != NULL) {
4522                enum print_line_t ret;
4523                int save_len = iter->seq.seq.len;
4524
4525                ret = print_trace_line(iter);
4526                if (ret == TRACE_TYPE_PARTIAL_LINE) {
4527                        /* don't print partial lines */
4528                        iter->seq.seq.len = save_len;
4529                        break;
4530                }
4531                if (ret != TRACE_TYPE_NO_CONSUME)
4532                        trace_consume(iter);
4533
4534                if (trace_seq_used(&iter->seq) >= cnt)
4535                        break;
4536
4537                /*
4538                 * Setting the full flag means we reached the trace_seq buffer
4539                 * size and we should leave by partial output condition above.
4540                 * One of the trace_seq_* functions is not used properly.
4541                 */
4542                WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4543                          iter->ent->type);
4544        }
4545        trace_access_unlock(iter->cpu_file);
4546        trace_event_read_unlock();
4547
4548        /* Now copy what we have to the user */
4549        sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4550        if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4551                trace_seq_init(&iter->seq);
4552
4553        /*
4554         * If there was nothing to send to user, in spite of consuming trace
4555         * entries, go back to wait for more entries.
4556         */
4557        if (sret == -EBUSY)
4558                goto waitagain;
4559
4560out:
4561        mutex_unlock(&iter->mutex);
4562
4563        return sret;
4564}
4565
4566static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4567                                     unsigned int idx)
4568{
4569        __free_page(spd->pages[idx]);
4570}
4571
4572static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4573        .can_merge              = 0,
4574        .confirm                = generic_pipe_buf_confirm,
4575        .release                = generic_pipe_buf_release,
4576        .steal                  = generic_pipe_buf_steal,
4577        .get                    = generic_pipe_buf_get,
4578};
4579
4580static size_t
4581tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4582{
4583        size_t count;
4584        int save_len;
4585        int ret;
4586
4587        /* Seq buffer is page-sized, exactly what we need. */
4588        for (;;) {
4589                save_len = iter->seq.seq.len;
4590                ret = print_trace_line(iter);
4591
4592                if (trace_seq_has_overflowed(&iter->seq)) {
4593                        iter->seq.seq.len = save_len;
4594                        break;
4595                }
4596
4597                /*
4598                 * This should not be hit, because it should only
4599                 * be set if the iter->seq overflowed. But check it
4600                 * anyway to be safe.
4601                 */
4602                if (ret == TRACE_TYPE_PARTIAL_LINE) {
4603                        iter->seq.seq.len = save_len;
4604                        break;
4605                }
4606
4607                count = trace_seq_used(&iter->seq) - save_len;
4608                if (rem < count) {
4609                        rem = 0;
4610                        iter->seq.seq.len = save_len;
4611                        break;
4612                }
4613
4614                if (ret != TRACE_TYPE_NO_CONSUME)
4615                        trace_consume(iter);
4616                rem -= count;
4617                if (!trace_find_next_entry_inc(iter))   {
4618                        rem = 0;
4619                        iter->ent = NULL;
4620                        break;
4621                }
4622        }
4623
4624        return rem;
4625}
4626
4627static ssize_t tracing_splice_read_pipe(struct file *filp,
4628                                        loff_t *ppos,
4629                                        struct pipe_inode_info *pipe,
4630                                        size_t len,
4631                                        unsigned int flags)
4632{
4633        struct page *pages_def[PIPE_DEF_BUFFERS];
4634        struct partial_page partial_def[PIPE_DEF_BUFFERS];
4635        struct trace_iterator *iter = filp->private_data;
4636        struct splice_pipe_desc spd = {
4637                .pages          = pages_def,
4638                .partial        = partial_def,
4639                .nr_pages       = 0, /* This gets updated below. */
4640                .nr_pages_max   = PIPE_DEF_BUFFERS,
4641                .flags          = flags,
4642                .ops            = &tracing_pipe_buf_ops,
4643                .spd_release    = tracing_spd_release_pipe,
4644        };
4645        struct trace_array *tr = iter->tr;
4646        ssize_t ret;
4647        size_t rem;
4648        unsigned int i;
4649
4650        if (splice_grow_spd(pipe, &spd))
4651                return -ENOMEM;
4652
4653        /* copy the tracer to avoid using a global lock all around */
4654        mutex_lock(&trace_types_lock);
4655        if (unlikely(iter->trace->name != tr->current_trace->name))
4656                *iter->trace = *tr->current_trace;
4657        mutex_unlock(&trace_types_lock);
4658
4659        mutex_lock(&iter->mutex);
4660
4661        if (iter->trace->splice_read) {
4662                ret = iter->trace->splice_read(iter, filp,
4663                                               ppos, pipe, len, flags);
4664                if (ret)
4665                        goto out_err;
4666        }
4667
4668        ret = tracing_wait_pipe(filp);
4669        if (ret <= 0)
4670                goto out_err;
4671
4672        if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4673                ret = -EFAULT;
4674                goto out_err;
4675        }
4676
4677        trace_event_read_lock();
4678        trace_access_lock(iter->cpu_file);
4679
4680        /* Fill as many pages as possible. */
4681        for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4682                spd.pages[i] = alloc_page(GFP_KERNEL);
4683                if (!spd.pages[i])
4684                        break;
4685
4686                rem = tracing_fill_pipe_page(rem, iter);
4687
4688                /* Copy the data into the page, so we can start over. */
4689                ret = trace_seq_to_buffer(&iter->seq,
4690                                          page_address(spd.pages[i]),
4691                                          trace_seq_used(&iter->seq));
4692                if (ret < 0) {
4693                        __free_page(spd.pages[i]);
4694                        break;
4695                }
4696                spd.partial[i].offset = 0;
4697                spd.partial[i].len = trace_seq_used(&iter->seq);
4698
4699                trace_seq_init(&iter->seq);
4700        }
4701
4702        trace_access_unlock(iter->cpu_file);
4703        trace_event_read_unlock();
4704        mutex_unlock(&iter->mutex);
4705
4706        spd.nr_pages = i;
4707
4708        ret = splice_to_pipe(pipe, &spd);
4709out:
4710        splice_shrink_spd(&spd);
4711        return ret;
4712
4713out_err:
4714        mutex_unlock(&iter->mutex);
4715        goto out;
4716}
4717
4718static ssize_t
4719tracing_entries_read(struct file *filp, char __user *ubuf,
4720                     size_t cnt, loff_t *ppos)
4721{
4722        struct inode *inode = file_inode(filp);
4723        struct trace_array *tr = inode->i_private;
4724        int cpu = tracing_get_cpu(inode);
4725        char buf[64];
4726        int r = 0;
4727        ssize_t ret;
4728
4729        mutex_lock(&trace_types_lock);
4730
4731        if (cpu == RING_BUFFER_ALL_CPUS) {
4732                int cpu, buf_size_same;
4733                unsigned long size;
4734
4735                size = 0;
4736                buf_size_same = 1;
4737                /* check if all cpu sizes are same */
4738                for_each_tracing_cpu(cpu) {
4739                        /* fill in the size from first enabled cpu */
4740                        if (size == 0)
4741                                size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4742                        if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4743                                buf_size_same = 0;
4744                                break;
4745                        }
4746                }
4747
4748                if (buf_size_same) {
4749                        if (!ring_buffer_expanded)
4750                                r = sprintf(buf, "%lu (expanded: %lu)\n",
4751                                            size >> 10,
4752                                            trace_buf_size >> 10);
4753                        else
4754                                r = sprintf(buf, "%lu\n", size >> 10);
4755                } else
4756                        r = sprintf(buf, "X\n");
4757        } else
4758                r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4759
4760        mutex_unlock(&trace_types_lock);
4761
4762        ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4763        return ret;
4764}
4765
4766static ssize_t
4767tracing_entries_write(struct file *filp, const char __user *ubuf,
4768                      size_t cnt, loff_t *ppos)
4769{
4770        struct inode *inode = file_inode(filp);
4771        struct trace_array *tr = inode->i_private;
4772        unsigned long val;
4773        int ret;
4774
4775        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4776        if (ret)
4777                return ret;
4778
4779        /* must have at least 1 entry */
4780        if (!val)
4781                return -EINVAL;
4782
4783        /* value is in KB */
4784        val <<= 10;
4785        ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4786        if (ret < 0)
4787                return ret;
4788
4789        *ppos += cnt;
4790
4791        return cnt;
4792}
4793
4794static ssize_t
4795tracing_total_entries_read(struct file *filp, char __user *ubuf,
4796                                size_t cnt, loff_t *ppos)
4797{
4798        struct trace_array *tr = filp->private_data;
4799        char buf[64];
4800        int r, cpu;
4801        unsigned long size = 0, expanded_size = 0;
4802
4803        mutex_lock(&trace_types_lock);
4804        for_each_tracing_cpu(cpu) {
4805                size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4806                if (!ring_buffer_expanded)
4807                        expanded_size += trace_buf_size >> 10;
4808        }
4809        if (ring_buffer_expanded)
4810                r = sprintf(buf, "%lu\n", size);
4811        else
4812                r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4813        mutex_unlock(&trace_types_lock);
4814
4815        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4816}
4817
4818static ssize_t
4819tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4820                          size_t cnt, loff_t *ppos)
4821{
4822        /*
4823         * There is no need to read what the user has written, this function
4824         * is just to make sure that there is no error when "echo" is used
4825         */
4826
4827        *ppos += cnt;
4828
4829        return cnt;
4830}
4831
4832static int
4833tracing_free_buffer_release(struct inode *inode, struct file *filp)
4834{
4835        struct trace_array *tr = inode->i_private;
4836
4837        /* disable tracing ? */
4838        if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4839                tracer_tracing_off(tr);
4840        /* resize the ring buffer to 0 */
4841        tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4842
4843        trace_array_put(tr);
4844
4845        return 0;
4846}
4847
4848static ssize_t
4849tracing_mark_write(struct file *filp, const char __user *ubuf,
4850                                        size_t cnt, loff_t *fpos)
4851{
4852        unsigned long addr = (unsigned long)ubuf;
4853        struct trace_array *tr = filp->private_data;
4854        struct ring_buffer_event *event;
4855        struct ring_buffer *buffer;
4856        struct print_entry *entry;
4857        unsigned long irq_flags;
4858        struct page *pages[2];
4859        void *map_page[2];
4860        int nr_pages = 1;
4861        ssize_t written;
4862        int offset;
4863        int size;
4864        int len;
4865        int ret;
4866        int i;
4867
4868        if (tracing_disabled)
4869                return -EINVAL;
4870
4871        if (!(trace_flags & TRACE_ITER_MARKERS))
4872                return -EINVAL;
4873
4874        if (cnt > TRACE_BUF_SIZE)
4875                cnt = TRACE_BUF_SIZE;
4876
4877        /*
4878         * Userspace is injecting traces into the kernel trace buffer.
4879         * We want to be as non intrusive as possible.
4880         * To do so, we do not want to allocate any special buffers
4881         * or take any locks, but instead write the userspace data
4882         * straight into the ring buffer.
4883         *
4884         * First we need to pin the userspace buffer into memory,
4885         * which, most likely it is, because it just referenced it.
4886         * But there's no guarantee that it is. By using get_user_pages_fast()
4887         * and kmap_atomic/kunmap_atomic() we can get access to the
4888         * pages directly. We then write the data directly into the
4889         * ring buffer.
4890         */
4891        BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4892
4893        /* check if we cross pages */
4894        if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4895                nr_pages = 2;
4896
4897        offset = addr & (PAGE_SIZE - 1);
4898        addr &= PAGE_MASK;
4899
4900        ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4901        if (ret < nr_pages) {
4902                while (--ret >= 0)
4903                        put_page(pages[ret]);
4904                written = -EFAULT;
4905                goto out;
4906        }
4907
4908        for (i = 0; i < nr_pages; i++)
4909                map_page[i] = kmap_atomic(pages[i]);
4910
4911        local_save_flags(irq_flags);
4912        size = sizeof(*entry) + cnt + 2; /* possible \n added */
4913        buffer = tr->trace_buffer.buffer;
4914        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4915                                          irq_flags, preempt_count());
4916        if (!event) {
4917                /* Ring buffer disabled, return as if not open for write */
4918                written = -EBADF;
4919                goto out_unlock;
4920        }
4921
4922        entry = ring_buffer_event_data(event);
4923        entry->ip = _THIS_IP_;
4924
4925        if (nr_pages == 2) {
4926                len = PAGE_SIZE - offset;
4927                memcpy(&entry->buf, map_page[0] + offset, len);
4928                memcpy(&entry->buf[len], map_page[1], cnt - len);
4929        } else
4930                memcpy(&entry->buf, map_page[0] + offset, cnt);
4931
4932        if (entry->buf[cnt - 1] != '\n') {
4933                entry->buf[cnt] = '\n';
4934                entry->buf[cnt + 1] = '\0';
4935        } else
4936                entry->buf[cnt] = '\0';
4937
4938        __buffer_unlock_commit(buffer, event);
4939
4940        written = cnt;
4941
4942        *fpos += written;
4943
4944 out_unlock:
4945        for (i = 0; i < nr_pages; i++){
4946                kunmap_atomic(map_page[i]);
4947                put_page(pages[i]);
4948        }
4949 out:
4950        return written;
4951}
4952
4953static int tracing_clock_show(struct seq_file *m, void *v)
4954{
4955        struct trace_array *tr = m->private;
4956        int i;
4957
4958        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4959                seq_printf(m,
4960                        "%s%s%s%s", i ? " " : "",
4961                        i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4962                        i == tr->clock_id ? "]" : "");
4963        seq_putc(m, '\n');
4964
4965        return 0;
4966}
4967
4968static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4969{
4970        int i;
4971
4972        for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4973                if (strcmp(trace_clocks[i].name, clockstr) == 0)
4974                        break;
4975        }
4976        if (i == ARRAY_SIZE(trace_clocks))
4977                return -EINVAL;
4978
4979        mutex_lock(&trace_types_lock);
4980
4981        tr->clock_id = i;
4982
4983        ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4984
4985        /*
4986         * New clock may not be consistent with the previous clock.
4987         * Reset the buffer so that it doesn't have incomparable timestamps.
4988         */
4989        tracing_reset_online_cpus(&tr->trace_buffer);
4990
4991#ifdef CONFIG_TRACER_MAX_TRACE
4992        if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4993                ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4994        tracing_reset_online_cpus(&tr->max_buffer);
4995#endif
4996
4997        mutex_unlock(&trace_types_lock);
4998
4999        return 0;
5000}
5001
5002static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5003                                   size_t cnt, loff_t *fpos)
5004{
5005        struct seq_file *m = filp->private_data;
5006        struct trace_array *tr = m->private;
5007        char buf[64];
5008        const char *clockstr;
5009        int ret;
5010
5011        if (cnt >= sizeof(buf))
5012                return -EINVAL;
5013
5014        if (copy_from_user(&buf, ubuf, cnt))
5015                return -EFAULT;
5016
5017        buf[cnt] = 0;
5018
5019        clockstr = strstrip(buf);
5020
5021        ret = tracing_set_clock(tr, clockstr);
5022        if (ret)
5023                return ret;
5024
5025        *fpos += cnt;
5026
5027        return cnt;
5028}
5029
5030static int tracing_clock_open(struct inode *inode, struct file *file)
5031{
5032        struct trace_array *tr = inode->i_private;
5033        int ret;
5034
5035        if (tracing_disabled)
5036                return -ENODEV;
5037
5038        if (trace_array_get(tr))
5039                return -ENODEV;
5040
5041        ret = single_open(file, tracing_clock_show, inode->i_private);
5042        if (ret < 0)
5043                trace_array_put(tr);
5044
5045        return ret;
5046}
5047
5048struct ftrace_buffer_info {
5049        struct trace_iterator   iter;
5050        void                    *spare;
5051        unsigned int            read;
5052};
5053
5054#ifdef CONFIG_TRACER_SNAPSHOT
5055static int tracing_snapshot_open(struct inode *inode, struct file *file)
5056{
5057        struct trace_array *tr = inode->i_private;
5058        struct trace_iterator *iter;
5059        struct seq_file *m;
5060        int ret = 0;
5061
5062        if (trace_array_get(tr) < 0)
5063                return -ENODEV;
5064
5065        if (file->f_mode & FMODE_READ) {
5066                iter = __tracing_open(inode, file, true);
5067                if (IS_ERR(iter))
5068                        ret = PTR_ERR(iter);
5069        } else {
5070                /* Writes still need the seq_file to hold the private data */
5071                ret = -ENOMEM;
5072                m = kzalloc(sizeof(*m), GFP_KERNEL);
5073                if (!m)
5074                        goto out;
5075                iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5076                if (!iter) {
5077                        kfree(m);
5078                        goto out;
5079                }
5080                ret = 0;
5081
5082                iter->tr = tr;
5083                iter->trace_buffer = &tr->max_buffer;
5084                iter->cpu_file = tracing_get_cpu(inode);
5085                m->private = iter;
5086                file->private_data = m;
5087        }
5088out:
5089        if (ret < 0)
5090                trace_array_put(tr);
5091
5092        return ret;
5093}
5094
5095static ssize_t
5096tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5097                       loff_t *ppos)
5098{
5099        struct seq_file *m = filp->private_data;
5100        struct trace_iterator *iter = m->private;
5101        struct trace_array *tr = iter->tr;
5102        unsigned long val;
5103        int ret;
5104
5105        ret = tracing_update_buffers();
5106        if (ret < 0)
5107                return ret;
5108
5109        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5110        if (ret)
5111                return ret;
5112
5113        mutex_lock(&trace_types_lock);
5114
5115        if (tr->current_trace->use_max_tr) {
5116                ret = -EBUSY;
5117                goto out;
5118        }
5119
5120        switch (val) {
5121        case 0:
5122                if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5123                        ret = -EINVAL;
5124                        break;
5125                }
5126                if (tr->allocated_snapshot)
5127                        free_snapshot(tr);
5128                break;
5129        case 1:
5130/* Only allow per-cpu swap if the ring buffer supports it */
5131#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5132                if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5133                        ret = -EINVAL;
5134                        break;
5135                }
5136#endif
5137                if (!tr->allocated_snapshot) {
5138                        ret = alloc_snapshot(tr);
5139                        if (ret < 0)
5140                                break;
5141                }
5142                local_irq_disable();
5143                /* Now, we're going to swap */
5144                if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5145                        update_max_tr(tr, current, smp_processor_id());
5146                else
5147                        update_max_tr_single(tr, current, iter->cpu_file);
5148                local_irq_enable();
5149                break;
5150        default:
5151                if (tr->allocated_snapshot) {
5152                        if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5153                                tracing_reset_online_cpus(&tr->max_buffer);
5154                        else
5155                                tracing_reset(&tr->max_buffer, iter->cpu_file);
5156                }
5157                break;
5158        }
5159
5160        if (ret >= 0) {
5161                *ppos += cnt;
5162                ret = cnt;
5163        }
5164out:
5165        mutex_unlock(&trace_types_lock);
5166        return ret;
5167}
5168
5169static int tracing_snapshot_release(struct inode *inode, struct file *file)
5170{
5171        struct seq_file *m = file->private_data;
5172        int ret;
5173
5174        ret = tracing_release(inode, file);
5175
5176        if (file->f_mode & FMODE_READ)
5177                return ret;
5178
5179        /* If write only, the seq_file is just a stub */
5180        if (m)
5181                kfree(m->private);
5182        kfree(m);
5183
5184        return 0;
5185}
5186
5187static int tracing_buffers_open(struct inode *inode, struct file *filp);
5188static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5189                                    size_t count, loff_t *ppos);
5190static int tracing_buffers_release(struct inode *inode, struct file *file);
5191static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5192                   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5193
5194static int snapshot_raw_open(struct inode *inode, struct file *filp)
5195{
5196        struct ftrace_buffer_info *info;
5197        int ret;
5198
5199        ret = tracing_buffers_open(inode, filp);
5200        if (ret < 0)
5201                return ret;
5202
5203        info = filp->private_data;
5204
5205        if (info->iter.trace->use_max_tr) {
5206                tracing_buffers_release(inode, filp);
5207                return -EBUSY;
5208        }
5209
5210        info->iter.snapshot = true;
5211        info->iter.trace_buffer = &info->iter.tr->max_buffer;
5212
5213        return ret;
5214}
5215
5216#endif /* CONFIG_TRACER_SNAPSHOT */
5217
5218
5219static const struct file_operations tracing_thresh_fops = {
5220        .open           = tracing_open_generic,
5221        .read           = tracing_thresh_read,
5222        .write          = tracing_thresh_write,
5223        .llseek         = generic_file_llseek,
5224};
5225
5226static const struct file_operations tracing_max_lat_fops = {
5227        .open           = tracing_open_generic,
5228        .read           = tracing_max_lat_read,
5229        .write          = tracing_max_lat_write,
5230        .llseek         = generic_file_llseek,
5231};
5232
5233static const struct file_operations set_tracer_fops = {
5234        .open           = tracing_open_generic,
5235        .read           = tracing_set_trace_read,
5236        .write          = tracing_set_trace_write,
5237        .llseek         = generic_file_llseek,
5238};
5239
5240static const struct file_operations tracing_pipe_fops = {
5241        .open           = tracing_open_pipe,
5242        .poll           = tracing_poll_pipe,
5243        .read           = tracing_read_pipe,
5244        .splice_read    = tracing_splice_read_pipe,
5245        .release        = tracing_release_pipe,
5246        .llseek         = no_llseek,
5247};
5248
5249static const struct file_operations tracing_entries_fops = {
5250        .open           = tracing_open_generic_tr,
5251        .read           = tracing_entries_read,
5252        .write          = tracing_entries_write,
5253        .llseek         = generic_file_llseek,
5254        .release        = tracing_release_generic_tr,
5255};
5256
5257static const struct file_operations tracing_total_entries_fops = {
5258        .open           = tracing_open_generic_tr,
5259        .read           = tracing_total_entries_read,
5260        .llseek         = generic_file_llseek,
5261        .release        = tracing_release_generic_tr,
5262};
5263
5264static const struct file_operations tracing_free_buffer_fops = {
5265        .open           = tracing_open_generic_tr,
5266        .write          = tracing_free_buffer_write,
5267        .release        = tracing_free_buffer_release,
5268};
5269
5270static const struct file_operations tracing_mark_fops = {
5271        .open           = tracing_open_generic_tr,
5272        .write          = tracing_mark_write,
5273        .llseek         = generic_file_llseek,
5274        .release        = tracing_release_generic_tr,
5275};
5276
5277static const struct file_operations trace_clock_fops = {
5278        .open           = tracing_clock_open,
5279        .read           = seq_read,
5280        .llseek         = seq_lseek,
5281        .release        = tracing_single_release_tr,
5282        .write          = tracing_clock_write,
5283};
5284
5285#ifdef CONFIG_TRACER_SNAPSHOT
5286static const struct file_operations snapshot_fops = {
5287        .open           = tracing_snapshot_open,
5288        .read           = seq_read,
5289        .write          = tracing_snapshot_write,
5290        .llseek         = tracing_lseek,
5291        .release        = tracing_snapshot_release,
5292};
5293
5294static const struct file_operations snapshot_raw_fops = {
5295        .open           = snapshot_raw_open,
5296        .read           = tracing_buffers_read,
5297        .release        = tracing_buffers_release,
5298        .splice_read    = tracing_buffers_splice_read,
5299        .llseek         = no_llseek,
5300};
5301
5302#endif /* CONFIG_TRACER_SNAPSHOT */
5303
5304static int tracing_buffers_open(struct inode *inode, struct file *filp)
5305{
5306        struct trace_array *tr = inode->i_private;
5307        struct ftrace_buffer_info *info;
5308        int ret;
5309
5310        if (tracing_disabled)
5311                return -ENODEV;
5312
5313        if (trace_array_get(tr) < 0)
5314                return -ENODEV;
5315
5316        info = kzalloc(sizeof(*info), GFP_KERNEL);
5317        if (!info) {
5318                trace_array_put(tr);
5319                return -ENOMEM;
5320        }
5321
5322        mutex_lock(&trace_types_lock);
5323
5324        info->iter.tr           = tr;
5325        info->iter.cpu_file     = tracing_get_cpu(inode);
5326        info->iter.trace        = tr->current_trace;
5327        info->iter.trace_buffer = &tr->trace_buffer;
5328        info->spare             = NULL;
5329        /* Force reading ring buffer for first read */
5330        info->read              = (unsigned int)-1;
5331
5332        filp->private_data = info;
5333
5334        mutex_unlock(&trace_types_lock);
5335
5336        ret = nonseekable_open(inode, filp);
5337        if (ret < 0)
5338                trace_array_put(tr);
5339
5340        return ret;
5341}
5342
5343static unsigned int
5344tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5345{
5346        struct ftrace_buffer_info *info = filp->private_data;
5347        struct trace_iterator *iter = &info->iter;
5348
5349        return trace_poll(iter, filp, poll_table);
5350}
5351
5352static ssize_t
5353tracing_buffers_read(struct file *filp, char __user *ubuf,
5354                     size_t count, loff_t *ppos)
5355{
5356        struct ftrace_buffer_info *info = filp->private_data;
5357        struct trace_iterator *iter = &info->iter;
5358        ssize_t ret;
5359        ssize_t size;
5360
5361        if (!count)
5362                return 0;
5363
5364        mutex_lock(&trace_types_lock);
5365
5366#ifdef CONFIG_TRACER_MAX_TRACE
5367        if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5368                size = -EBUSY;
5369                goto out_unlock;
5370        }
5371#endif
5372
5373        if (!info->spare)
5374                info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5375                                                          iter->cpu_file);
5376        size = -ENOMEM;
5377        if (!info->spare)
5378                goto out_unlock;
5379
5380        /* Do we have previous read data to read? */
5381        if (info->read < PAGE_SIZE)
5382                goto read;
5383
5384 again:
5385        trace_access_lock(iter->cpu_file);
5386        ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5387                                    &info->spare,
5388                                    count,
5389                                    iter->cpu_file, 0);
5390        trace_access_unlock(iter->cpu_file);
5391
5392        if (ret < 0) {
5393                if (trace_empty(iter)) {
5394                        if ((filp->f_flags & O_NONBLOCK)) {
5395                                size = -EAGAIN;
5396                                goto out_unlock;
5397                        }
5398                        mutex_unlock(&trace_types_lock);
5399                        ret = wait_on_pipe(iter, false);
5400                        mutex_lock(&trace_types_lock);
5401                        if (ret) {
5402                                size = ret;
5403                                goto out_unlock;
5404                        }
5405                        goto again;
5406                }
5407                size = 0;
5408                goto out_unlock;
5409        }
5410
5411        info->read = 0;
5412 read:
5413        size = PAGE_SIZE - info->read;
5414        if (size > count)
5415                size = count;
5416
5417        ret = copy_to_user(ubuf, info->spare + info->read, size);
5418        if (ret == size) {
5419                size = -EFAULT;
5420                goto out_unlock;
5421        }
5422        size -= ret;
5423
5424        *ppos += size;
5425        info->read += size;
5426
5427 out_unlock:
5428        mutex_unlock(&trace_types_lock);
5429
5430        return size;
5431}
5432
5433static int tracing_buffers_release(struct inode *inode, struct file *file)
5434{
5435        struct ftrace_buffer_info *info = file->private_data;
5436        struct trace_iterator *iter = &info->iter;
5437
5438        mutex_lock(&trace_types_lock);
5439
5440        __trace_array_put(iter->tr);
5441
5442        if (info->spare)
5443                ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5444        kfree(info);
5445
5446        mutex_unlock(&trace_types_lock);
5447
5448        return 0;
5449}
5450
5451struct buffer_ref {
5452        struct ring_buffer      *buffer;
5453        void                    *page;
5454        int                     ref;
5455};
5456
5457static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5458                                    struct pipe_buffer *buf)
5459{
5460        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5461
5462        if (--ref->ref)
5463                return;
5464
5465        ring_buffer_free_read_page(ref->buffer, ref->page);
5466        kfree(ref);
5467        buf->private = 0;
5468}
5469
5470static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5471                                struct pipe_buffer *buf)
5472{
5473        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5474
5475        ref->ref++;
5476}
5477
5478/* Pipe buffer operations for a buffer. */
5479static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5480        .can_merge              = 0,
5481        .confirm                = generic_pipe_buf_confirm,
5482        .release                = buffer_pipe_buf_release,
5483        .steal                  = generic_pipe_buf_steal,
5484        .get                    = buffer_pipe_buf_get,
5485};
5486
5487/*
5488 * Callback from splice_to_pipe(), if we need to release some pages
5489 * at the end of the spd in case we error'ed out in filling the pipe.
5490 */
5491static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5492{
5493        struct buffer_ref *ref =
5494                (struct buffer_ref *)spd->partial[i].private;
5495
5496        if (--ref->ref)
5497                return;
5498
5499        ring_buffer_free_read_page(ref->buffer, ref->page);
5500        kfree(ref);
5501        spd->partial[i].private = 0;
5502}
5503
5504static ssize_t
5505tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5506                            struct pipe_inode_info *pipe, size_t len,
5507                            unsigned int flags)
5508{
5509        struct ftrace_buffer_info *info = file->private_data;
5510        struct trace_iterator *iter = &info->iter;
5511        struct partial_page partial_def[PIPE_DEF_BUFFERS];
5512        struct page *pages_def[PIPE_DEF_BUFFERS];
5513        struct splice_pipe_desc spd = {
5514                .pages          = pages_def,
5515                .partial        = partial_def,
5516                .nr_pages_max   = PIPE_DEF_BUFFERS,
5517                .flags          = flags,
5518                .ops            = &buffer_pipe_buf_ops,
5519                .spd_release    = buffer_spd_release,
5520        };
5521        struct buffer_ref *ref;
5522        int entries, size, i;
5523        ssize_t ret = 0;
5524
5525        mutex_lock(&trace_types_lock);
5526
5527#ifdef CONFIG_TRACER_MAX_TRACE
5528        if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5529                ret = -EBUSY;
5530                goto out;
5531        }
5532#endif
5533
5534        if (splice_grow_spd(pipe, &spd)) {
5535                ret = -ENOMEM;
5536                goto out;
5537        }
5538
5539        if (*ppos & (PAGE_SIZE - 1)) {
5540                ret = -EINVAL;
5541                goto out;
5542        }
5543
5544        if (len & (PAGE_SIZE - 1)) {
5545                if (len < PAGE_SIZE) {
5546                        ret = -EINVAL;
5547                        goto out;
5548                }
5549                len &= PAGE_MASK;
5550        }
5551
5552 again:
5553        trace_access_lock(iter->cpu_file);
5554        entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5555
5556        for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5557                struct page *page;
5558                int r;
5559
5560                ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5561                if (!ref) {
5562                        ret = -ENOMEM;
5563                        break;
5564                }
5565
5566                ref->ref = 1;
5567                ref->buffer = iter->trace_buffer->buffer;
5568                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5569                if (!ref->page) {
5570                        ret = -ENOMEM;
5571                        kfree(ref);
5572                        break;
5573                }
5574
5575                r = ring_buffer_read_page(ref->buffer, &ref->page,
5576                                          len, iter->cpu_file, 1);
5577                if (r < 0) {
5578                        ring_buffer_free_read_page(ref->buffer, ref->page);
5579                        kfree(ref);
5580                        break;
5581                }
5582
5583                /*
5584                 * zero out any left over data, this is going to
5585                 * user land.
5586                 */
5587                size = ring_buffer_page_len(ref->page);
5588                if (size < PAGE_SIZE)
5589                        memset(ref->page + size, 0, PAGE_SIZE - size);
5590
5591                page = virt_to_page(ref->page);
5592
5593                spd.pages[i] = page;
5594                spd.partial[i].len = PAGE_SIZE;
5595                spd.partial[i].offset = 0;
5596                spd.partial[i].private = (unsigned long)ref;
5597                spd.nr_pages++;
5598                *ppos += PAGE_SIZE;
5599
5600                entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5601        }
5602
5603        trace_access_unlock(iter->cpu_file);
5604        spd.nr_pages = i;
5605
5606        /* did we read anything? */
5607        if (!spd.nr_pages) {
5608                if (ret)
5609                        goto out;
5610
5611                if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5612                        ret = -EAGAIN;
5613                        goto out;
5614                }
5615                mutex_unlock(&trace_types_lock);
5616                ret = wait_on_pipe(iter, true);
5617                mutex_lock(&trace_types_lock);
5618                if (ret)
5619                        goto out;
5620
5621                goto again;
5622        }
5623
5624        ret = splice_to_pipe(pipe, &spd);
5625        splice_shrink_spd(&spd);
5626out:
5627        mutex_unlock(&trace_types_lock);
5628
5629        return ret;
5630}
5631
5632static const struct file_operations tracing_buffers_fops = {
5633        .open           = tracing_buffers_open,
5634        .read           = tracing_buffers_read,
5635        .poll           = tracing_buffers_poll,
5636        .release        = tracing_buffers_release,
5637        .splice_read    = tracing_buffers_splice_read,
5638        .llseek         = no_llseek,
5639};
5640
5641static ssize_t
5642tracing_stats_read(struct file *filp, char __user *ubuf,
5643                   size_t count, loff_t *ppos)
5644{
5645        struct inode *inode = file_inode(filp);
5646        struct trace_array *tr = inode->i_private;
5647        struct trace_buffer *trace_buf = &tr->trace_buffer;
5648        int cpu = tracing_get_cpu(inode);
5649        struct trace_seq *s;
5650        unsigned long cnt;
5651        unsigned long long t;
5652        unsigned long usec_rem;
5653
5654        s = kmalloc(sizeof(*s), GFP_KERNEL);
5655        if (!s)
5656                return -ENOMEM;
5657
5658        trace_seq_init(s);
5659
5660        cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5661        trace_seq_printf(s, "entries: %ld\n", cnt);
5662
5663        cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5664        trace_seq_printf(s, "overrun: %ld\n", cnt);
5665
5666        cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5667        trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5668
5669        cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5670        trace_seq_printf(s, "bytes: %ld\n", cnt);
5671
5672        if (trace_clocks[tr->clock_id].in_ns) {
5673                /* local or global for trace_clock */
5674                t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5675                usec_rem = do_div(t, USEC_PER_SEC);
5676                trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5677                                                                t, usec_rem);
5678
5679                t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5680                usec_rem = do_div(t, USEC_PER_SEC);
5681                trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5682        } else {
5683                /* counter or tsc mode for trace_clock */
5684                trace_seq_printf(s, "oldest event ts: %llu\n",
5685                                ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5686
5687                trace_seq_printf(s, "now ts: %llu\n",
5688                                ring_buffer_time_stamp(trace_buf->buffer, cpu));
5689        }
5690
5691        cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5692        trace_seq_printf(s, "dropped events: %ld\n", cnt);
5693
5694        cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5695        trace_seq_printf(s, "read events: %ld\n", cnt);
5696
5697        count = simple_read_from_buffer(ubuf, count, ppos,
5698                                        s->buffer, trace_seq_used(s));
5699
5700        kfree(s);
5701
5702        return count;
5703}
5704
5705static const struct file_operations tracing_stats_fops = {
5706        .open           = tracing_open_generic_tr,
5707        .read           = tracing_stats_read,
5708        .llseek         = generic_file_llseek,
5709        .release        = tracing_release_generic_tr,
5710};
5711
5712#ifdef CONFIG_DYNAMIC_FTRACE
5713
5714int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5715{
5716        return 0;
5717}
5718
5719static ssize_t
5720tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5721                  size_t cnt, loff_t *ppos)
5722{
5723        static char ftrace_dyn_info_buffer[1024];
5724        static DEFINE_MUTEX(dyn_info_mutex);
5725        unsigned long *p = filp->private_data;
5726        char *buf = ftrace_dyn_info_buffer;
5727        int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5728        int r;
5729
5730        mutex_lock(&dyn_info_mutex);
5731        r = sprintf(buf, "%ld ", *p);
5732
5733        r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5734        buf[r++] = '\n';
5735
5736        r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5737
5738        mutex_unlock(&dyn_info_mutex);
5739
5740        return r;
5741}
5742
5743static const struct file_operations tracing_dyn_info_fops = {
5744        .open           = tracing_open_generic,
5745        .read           = tracing_read_dyn_info,
5746        .llseek         = generic_file_llseek,
5747};
5748#endif /* CONFIG_DYNAMIC_FTRACE */
5749
5750#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5751static void
5752ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5753{
5754        tracing_snapshot();
5755}
5756
5757static void
5758ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5759{
5760        unsigned long *count = (long *)data;
5761
5762        if (!*count)
5763                return;
5764
5765        if (*count != -1)
5766                (*count)--;
5767
5768        tracing_snapshot();
5769}
5770
5771static int
5772ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5773                      struct ftrace_probe_ops *ops, void *data)
5774{
5775        long count = (long)data;
5776
5777        seq_printf(m, "%ps:", (void *)ip);
5778
5779        seq_puts(m, "snapshot");
5780
5781        if (count == -1)
5782                seq_puts(m, ":unlimited\n");
5783        else
5784                seq_printf(m, ":count=%ld\n", count);
5785
5786        return 0;
5787}
5788
5789static struct ftrace_probe_ops snapshot_probe_ops = {
5790        .func                   = ftrace_snapshot,
5791        .print                  = ftrace_snapshot_print,
5792};
5793
5794static struct ftrace_probe_ops snapshot_count_probe_ops = {
5795        .func                   = ftrace_count_snapshot,
5796        .print                  = ftrace_snapshot_print,
5797};
5798
5799static int
5800ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5801                               char *glob, char *cmd, char *param, int enable)
5802{
5803        struct ftrace_probe_ops *ops;
5804        void *count = (void *)-1;
5805        char *number;
5806        int ret;
5807
5808        /* hash funcs only work with set_ftrace_filter */
5809        if (!enable)
5810                return -EINVAL;
5811
5812        ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5813
5814        if (glob[0] == '!') {
5815                unregister_ftrace_function_probe_func(glob+1, ops);
5816                return 0;
5817        }
5818
5819        if (!param)
5820                goto out_reg;
5821
5822        number = strsep(&param, ":");
5823
5824        if (!strlen(number))
5825                goto out_reg;
5826
5827        /*
5828         * We use the callback data field (which is a pointer)
5829         * as our counter.
5830         */
5831        ret = kstrtoul(number, 0, (unsigned long *)&count);
5832        if (ret)
5833                return ret;
5834
5835 out_reg:
5836        ret = register_ftrace_function_probe(glob, ops, count);
5837
5838        if (ret >= 0)
5839                alloc_snapshot(&global_trace);
5840
5841        return ret < 0 ? ret : 0;
5842}
5843
5844static struct ftrace_func_command ftrace_snapshot_cmd = {
5845        .name                   = "snapshot",
5846        .func                   = ftrace_trace_snapshot_callback,
5847};
5848
5849static __init int register_snapshot_cmd(void)
5850{
5851        return register_ftrace_command(&ftrace_snapshot_cmd);
5852}
5853#else
5854static inline __init int register_snapshot_cmd(void) { return 0; }
5855#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5856
5857struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5858{
5859        if (tr->dir)
5860                return tr->dir;
5861
5862        if (!debugfs_initialized())
5863                return NULL;
5864
5865        if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5866                tr->dir = debugfs_create_dir("tracing", NULL);
5867
5868        if (!tr->dir)
5869                pr_warn_once("Could not create debugfs directory 'tracing'\n");
5870
5871        return tr->dir;
5872}
5873
5874struct dentry *tracing_init_dentry(void)
5875{
5876        return tracing_init_dentry_tr(&global_trace);
5877}
5878
5879static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5880{
5881        struct dentry *d_tracer;
5882
5883        if (tr->percpu_dir)
5884                return tr->percpu_dir;
5885
5886        d_tracer = tracing_init_dentry_tr(tr);
5887        if (!d_tracer)
5888                return NULL;
5889
5890        tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5891
5892        WARN_ONCE(!tr->percpu_dir,
5893                  "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5894
5895        return tr->percpu_dir;
5896}
5897
5898static struct dentry *
5899trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5900                      void *data, long cpu, const struct file_operations *fops)
5901{
5902        struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5903
5904        if (ret) /* See tracing_get_cpu() */
5905                ret->d_inode->i_cdev = (void *)(cpu + 1);
5906        return ret;
5907}
5908
5909static void
5910tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5911{
5912        struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5913        struct dentry *d_cpu;
5914        char cpu_dir[30]; /* 30 characters should be more than enough */
5915
5916        if (!d_percpu)
5917                return;
5918
5919        snprintf(cpu_dir, 30, "cpu%ld", cpu);
5920        d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5921        if (!d_cpu) {
5922                pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5923                return;
5924        }
5925
5926        /* per cpu trace_pipe */
5927        trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5928                                tr, cpu, &tracing_pipe_fops);
5929
5930        /* per cpu trace */
5931        trace_create_cpu_file("trace", 0644, d_cpu,
5932                                tr, cpu, &tracing_fops);
5933
5934        trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5935                                tr, cpu, &tracing_buffers_fops);
5936
5937        trace_create_cpu_file("stats", 0444, d_cpu,
5938                                tr, cpu, &tracing_stats_fops);
5939
5940        trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5941                                tr, cpu, &tracing_entries_fops);
5942
5943#ifdef CONFIG_TRACER_SNAPSHOT
5944        trace_create_cpu_file("snapshot", 0644, d_cpu,
5945                                tr, cpu, &snapshot_fops);
5946
5947        trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5948                                tr, cpu, &snapshot_raw_fops);
5949#endif
5950}
5951
5952#ifdef CONFIG_FTRACE_SELFTEST
5953/* Let selftest have access to static functions in this file */
5954#include "trace_selftest.c"
5955#endif
5956
5957struct trace_option_dentry {
5958        struct tracer_opt               *opt;
5959        struct tracer_flags             *flags;
5960        struct trace_array              *tr;
5961        struct dentry                   *entry;
5962};
5963
5964static ssize_t
5965trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5966                        loff_t *ppos)
5967{
5968        struct trace_option_dentry *topt = filp->private_data;
5969        char *buf;
5970
5971        if (topt->flags->val & topt->opt->bit)
5972                buf = "1\n";
5973        else
5974                buf = "0\n";
5975
5976        return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5977}
5978
5979static ssize_t
5980trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5981                         loff_t *ppos)
5982{
5983        struct trace_option_dentry *topt = filp->private_data;
5984        unsigned long val;
5985        int ret;
5986
5987        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5988        if (ret)
5989                return ret;
5990
5991        if (val != 0 && val != 1)
5992                return -EINVAL;
5993
5994        if (!!(topt->flags->val & topt->opt->bit) != val) {
5995                mutex_lock(&trace_types_lock);
5996                ret = __set_tracer_option(topt->tr, topt->flags,
5997                                          topt->opt, !val);
5998                mutex_unlock(&trace_types_lock);
5999                if (ret)
6000                        return ret;
6001        }
6002
6003        *ppos += cnt;
6004
6005        return cnt;
6006}
6007
6008
6009static const struct file_operations trace_options_fops = {
6010        .open = tracing_open_generic,
6011        .read = trace_options_read,
6012        .write = trace_options_write,
6013        .llseek = generic_file_llseek,
6014};
6015
6016static ssize_t
6017trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6018                        loff_t *ppos)
6019{
6020        long index = (long)filp->private_data;
6021        char *buf;
6022
6023        if (trace_flags & (1 << index))
6024                buf = "1\n";
6025        else
6026                buf = "0\n";
6027
6028        return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6029}
6030
6031static ssize_t
6032trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6033                         loff_t *ppos)
6034{
6035        struct trace_array *tr = &global_trace;
6036        long index = (long)filp->private_data;
6037        unsigned long val;
6038        int ret;
6039
6040        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6041        if (ret)
6042                return ret;
6043
6044        if (val != 0 && val != 1)
6045                return -EINVAL;
6046
6047        mutex_lock(&trace_types_lock);
6048        ret = set_tracer_flag(tr, 1 << index, val);
6049        mutex_unlock(&trace_types_lock);
6050
6051        if (ret < 0)
6052                return ret;
6053
6054        *ppos += cnt;
6055
6056        return cnt;
6057}
6058
6059static const struct file_operations trace_options_core_fops = {
6060        .open = tracing_open_generic,
6061        .read = trace_options_core_read,
6062        .write = trace_options_core_write,
6063        .llseek = generic_file_llseek,
6064};
6065
6066struct dentry *trace_create_file(const char *name,
6067                                 umode_t mode,
6068                                 struct dentry *parent,
6069                                 void *data,
6070                                 const struct file_operations *fops)
6071{
6072        struct dentry *ret;
6073
6074        ret = debugfs_create_file(name, mode, parent, data, fops);
6075        if (!ret)
6076                pr_warning("Could not create debugfs '%s' entry\n", name);
6077
6078        return ret;
6079}
6080
6081
6082static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6083{
6084        struct dentry *d_tracer;
6085
6086        if (tr->options)
6087                return tr->options;
6088
6089        d_tracer = tracing_init_dentry_tr(tr);
6090        if (!d_tracer)
6091                return NULL;
6092
6093        tr->options = debugfs_create_dir("options", d_tracer);
6094        if (!tr->options) {
6095                pr_warning("Could not create debugfs directory 'options'\n");
6096                return NULL;
6097        }
6098
6099        return tr->options;
6100}
6101
6102static void
6103create_trace_option_file(struct trace_array *tr,
6104                         struct trace_option_dentry *topt,
6105                         struct tracer_flags *flags,
6106                         struct tracer_opt *opt)
6107{
6108        struct dentry *t_options;
6109
6110        t_options = trace_options_init_dentry(tr);
6111        if (!t_options)
6112                return;
6113
6114        topt->flags = flags;
6115        topt->opt = opt;
6116        topt->tr = tr;
6117
6118        topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6119                                    &trace_options_fops);
6120
6121}
6122
6123static struct trace_option_dentry *
6124create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6125{
6126        struct trace_option_dentry *topts;
6127        struct tracer_flags *flags;
6128        struct tracer_opt *opts;
6129        int cnt;
6130
6131        if (!tracer)
6132                return NULL;
6133
6134        flags = tracer->flags;
6135
6136        if (!flags || !flags->opts)
6137                return NULL;
6138
6139        opts = flags->opts;
6140
6141        for (cnt = 0; opts[cnt].name; cnt++)
6142                ;
6143
6144        topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6145        if (!topts)
6146                return NULL;
6147
6148        for (cnt = 0; opts[cnt].name; cnt++)
6149                create_trace_option_file(tr, &topts[cnt], flags,
6150                                         &opts[cnt]);
6151
6152        return topts;
6153}
6154
6155static void
6156destroy_trace_option_files(struct trace_option_dentry *topts)
6157{
6158        int cnt;
6159
6160        if (!topts)
6161                return;
6162
6163        for (cnt = 0; topts[cnt].opt; cnt++)
6164                debugfs_remove(topts[cnt].entry);
6165
6166        kfree(topts);
6167}
6168
6169static struct dentry *
6170create_trace_option_core_file(struct trace_array *tr,
6171                              const char *option, long index)
6172{
6173        struct dentry *t_options;
6174
6175        t_options = trace_options_init_dentry(tr);
6176        if (!t_options)
6177                return NULL;
6178
6179        return trace_create_file(option, 0644, t_options, (void *)index,
6180                                    &trace_options_core_fops);
6181}
6182
6183static __init void create_trace_options_dir(struct trace_array *tr)
6184{
6185        struct dentry *t_options;
6186        int i;
6187
6188        t_options = trace_options_init_dentry(tr);
6189        if (!t_options)
6190                return;
6191
6192        for (i = 0; trace_options[i]; i++)
6193                create_trace_option_core_file(tr, trace_options[i], i);
6194}
6195
6196static ssize_t
6197rb_simple_read(struct file *filp, char __user *ubuf,
6198               size_t cnt, loff_t *ppos)
6199{
6200        struct trace_array *tr = filp->private_data;
6201        char buf[64];
6202        int r;
6203
6204        r = tracer_tracing_is_on(tr);
6205        r = sprintf(buf, "%d\n", r);
6206
6207        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6208}
6209
6210static ssize_t
6211rb_simple_write(struct file *filp, const char __user *ubuf,
6212                size_t cnt, loff_t *ppos)
6213{
6214        struct trace_array *tr = filp->private_data;
6215        struct ring_buffer *buffer = tr->trace_buffer.buffer;
6216        unsigned long val;
6217        int ret;
6218
6219        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6220        if (ret)
6221                return ret;
6222
6223        if (buffer) {
6224                mutex_lock(&trace_types_lock);
6225                if (val) {
6226                        tracer_tracing_on(tr);
6227                        if (tr->current_trace->start)
6228                                tr->current_trace->start(tr);
6229                } else {
6230                        tracer_tracing_off(tr);
6231                        if (tr->current_trace->stop)
6232                                tr->current_trace->stop(tr);
6233                }
6234                mutex_unlock(&trace_types_lock);
6235        }
6236
6237        (*ppos)++;
6238
6239        return cnt;
6240}
6241
6242static const struct file_operations rb_simple_fops = {
6243        .open           = tracing_open_generic_tr,
6244        .read           = rb_simple_read,
6245        .write          = rb_simple_write,
6246        .release        = tracing_release_generic_tr,
6247        .llseek         = default_llseek,
6248};
6249
6250struct dentry *trace_instance_dir;
6251
6252static void
6253init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6254
6255static int
6256allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6257{
6258        enum ring_buffer_flags rb_flags;
6259
6260        rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6261
6262        buf->tr = tr;
6263
6264        buf->buffer = ring_buffer_alloc(size, rb_flags);
6265        if (!buf->buffer)
6266                return -ENOMEM;
6267
6268        buf->data = alloc_percpu(struct trace_array_cpu);
6269        if (!buf->data) {
6270                ring_buffer_free(buf->buffer);
6271                return -ENOMEM;
6272        }
6273
6274        /* Allocate the first page for all buffers */
6275        set_buffer_entries(&tr->trace_buffer,
6276                           ring_buffer_size(tr->trace_buffer.buffer, 0));
6277
6278        return 0;
6279}
6280
6281static int allocate_trace_buffers(struct trace_array *tr, int size)
6282{
6283        int ret;
6284
6285        ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6286        if (ret)
6287                return ret;
6288
6289#ifdef CONFIG_TRACER_MAX_TRACE
6290        ret = allocate_trace_buffer(tr, &tr->max_buffer,
6291                                    allocate_snapshot ? size : 1);
6292        if (WARN_ON(ret)) {
6293                ring_buffer_free(tr->trace_buffer.buffer);
6294                free_percpu(tr->trace_buffer.data);
6295                return -ENOMEM;
6296        }
6297        tr->allocated_snapshot = allocate_snapshot;
6298
6299        /*
6300         * Only the top level trace array gets its snapshot allocated
6301         * from the kernel command line.
6302         */
6303        allocate_snapshot = false;
6304#endif
6305        return 0;
6306}
6307
6308static void free_trace_buffer(struct trace_buffer *buf)
6309{
6310        if (buf->buffer) {
6311                ring_buffer_free(buf->buffer);
6312                buf->buffer = NULL;
6313                free_percpu(buf->data);
6314                buf->data = NULL;
6315        }
6316}
6317
6318static void free_trace_buffers(struct trace_array *tr)
6319{
6320        if (!tr)
6321                return;
6322
6323        free_trace_buffer(&tr->trace_buffer);
6324
6325#ifdef CONFIG_TRACER_MAX_TRACE
6326        free_trace_buffer(&tr->max_buffer);
6327#endif
6328}
6329
6330static int new_instance_create(const char *name)
6331{
6332        struct trace_array *tr;
6333        int ret;
6334
6335        mutex_lock(&trace_types_lock);
6336
6337        ret = -EEXIST;
6338        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6339                if (tr->name && strcmp(tr->name, name) == 0)
6340                        goto out_unlock;
6341        }
6342
6343        ret = -ENOMEM;
6344        tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6345        if (!tr)
6346                goto out_unlock;
6347
6348        tr->name = kstrdup(name, GFP_KERNEL);
6349        if (!tr->name)
6350                goto out_free_tr;
6351
6352        if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6353                goto out_free_tr;
6354
6355        cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6356
6357        raw_spin_lock_init(&tr->start_lock);
6358
6359        tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6360
6361        tr->current_trace = &nop_trace;
6362
6363        INIT_LIST_HEAD(&tr->systems);
6364        INIT_LIST_HEAD(&tr->events);
6365
6366        if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6367                goto out_free_tr;
6368
6369        tr->dir = debugfs_create_dir(name, trace_instance_dir);
6370        if (!tr->dir)
6371                goto out_free_tr;
6372
6373        ret = event_trace_add_tracer(tr->dir, tr);
6374        if (ret) {
6375                debugfs_remove_recursive(tr->dir);
6376                goto out_free_tr;
6377        }
6378
6379        init_tracer_debugfs(tr, tr->dir);
6380
6381        list_add(&tr->list, &ftrace_trace_arrays);
6382
6383        mutex_unlock(&trace_types_lock);
6384
6385        return 0;
6386
6387 out_free_tr:
6388        free_trace_buffers(tr);
6389        free_cpumask_var(tr->tracing_cpumask);
6390        kfree(tr->name);
6391        kfree(tr);
6392
6393 out_unlock:
6394        mutex_unlock(&trace_types_lock);
6395
6396        return ret;
6397
6398}
6399
6400static int instance_delete(const char *name)
6401{
6402        struct trace_array *tr;
6403        int found = 0;
6404        int ret;
6405
6406        mutex_lock(&trace_types_lock);
6407
6408        ret = -ENODEV;
6409        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6410                if (tr->name && strcmp(tr->name, name) == 0) {
6411                        found = 1;
6412                        break;
6413                }
6414        }
6415        if (!found)
6416                goto out_unlock;
6417
6418        ret = -EBUSY;
6419        if (tr->ref)
6420                goto out_unlock;
6421
6422        list_del(&tr->list);
6423
6424        tracing_set_nop(tr);
6425        event_trace_del_tracer(tr);
6426        ftrace_destroy_function_files(tr);
6427        debugfs_remove_recursive(tr->dir);
6428        free_trace_buffers(tr);
6429
6430        kfree(tr->name);
6431        kfree(tr);
6432
6433        ret = 0;
6434
6435 out_unlock:
6436        mutex_unlock(&trace_types_lock);
6437
6438        return ret;
6439}
6440
6441static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6442{
6443        struct dentry *parent;
6444        int ret;
6445
6446        /* Paranoid: Make sure the parent is the "instances" directory */
6447        parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6448        if (WARN_ON_ONCE(parent != trace_instance_dir))
6449                return -ENOENT;
6450
6451        /*
6452         * The inode mutex is locked, but debugfs_create_dir() will also
6453         * take the mutex. As the instances directory can not be destroyed
6454         * or changed in any other way, it is safe to unlock it, and
6455         * let the dentry try. If two users try to make the same dir at
6456         * the same time, then the new_instance_create() will determine the
6457         * winner.
6458         */
6459        mutex_unlock(&inode->i_mutex);
6460
6461        ret = new_instance_create(dentry->d_iname);
6462
6463        mutex_lock(&inode->i_mutex);
6464
6465        return ret;
6466}
6467
6468static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6469{
6470        struct dentry *parent;
6471        int ret;
6472
6473        /* Paranoid: Make sure the parent is the "instances" directory */
6474        parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
6475        if (WARN_ON_ONCE(parent != trace_instance_dir))
6476                return -ENOENT;
6477
6478        /* The caller did a dget() on dentry */
6479        mutex_unlock(&dentry->d_inode->i_mutex);
6480
6481        /*
6482         * The inode mutex is locked, but debugfs_create_dir() will also
6483         * take the mutex. As the instances directory can not be destroyed
6484         * or changed in any other way, it is safe to unlock it, and
6485         * let the dentry try. If two users try to make the same dir at
6486         * the same time, then the instance_delete() will determine the
6487         * winner.
6488         */
6489        mutex_unlock(&inode->i_mutex);
6490
6491        ret = instance_delete(dentry->d_iname);
6492
6493        mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6494        mutex_lock(&dentry->d_inode->i_mutex);
6495
6496        return ret;
6497}
6498
6499static const struct inode_operations instance_dir_inode_operations = {
6500        .lookup         = simple_lookup,
6501        .mkdir          = instance_mkdir,
6502        .rmdir          = instance_rmdir,
6503};
6504
6505static __init void create_trace_instances(struct dentry *d_tracer)
6506{
6507        trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6508        if (WARN_ON(!trace_instance_dir))
6509                return;
6510
6511        /* Hijack the dir inode operations, to allow mkdir */
6512        trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6513}
6514
6515static void
6516init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6517{
6518        int cpu;
6519
6520        trace_create_file("available_tracers", 0444, d_tracer,
6521                        tr, &show_traces_fops);
6522
6523        trace_create_file("current_tracer", 0644, d_tracer,
6524                        tr, &set_tracer_fops);
6525
6526        trace_create_file("tracing_cpumask", 0644, d_tracer,
6527                          tr, &tracing_cpumask_fops);
6528
6529        trace_create_file("trace_options", 0644, d_tracer,
6530                          tr, &tracing_iter_fops);
6531
6532        trace_create_file("trace", 0644, d_tracer,
6533                          tr, &tracing_fops);
6534
6535        trace_create_file("trace_pipe", 0444, d_tracer,
6536                          tr, &tracing_pipe_fops);
6537
6538        trace_create_file("buffer_size_kb", 0644, d_tracer,
6539                          tr, &tracing_entries_fops);
6540
6541        trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6542                          tr, &tracing_total_entries_fops);
6543
6544        trace_create_file("free_buffer", 0200, d_tracer,
6545                          tr, &tracing_free_buffer_fops);
6546
6547        trace_create_file("trace_marker", 0220, d_tracer,
6548                          tr, &tracing_mark_fops);
6549
6550        trace_create_file("trace_clock", 0644, d_tracer, tr,
6551                          &trace_clock_fops);
6552
6553        trace_create_file("tracing_on", 0644, d_tracer,
6554                          tr, &rb_simple_fops);
6555
6556#ifdef CONFIG_TRACER_MAX_TRACE
6557        trace_create_file("tracing_max_latency", 0644, d_tracer,
6558                        &tr->max_latency, &tracing_max_lat_fops);
6559#endif
6560
6561        if (ftrace_create_function_files(tr, d_tracer))
6562                WARN(1, "Could not allocate function filter files");
6563
6564#ifdef CONFIG_TRACER_SNAPSHOT
6565        trace_create_file("snapshot", 0644, d_tracer,
6566                          tr, &snapshot_fops);
6567#endif
6568
6569        for_each_tracing_cpu(cpu)
6570                tracing_init_debugfs_percpu(tr, cpu);
6571
6572}
6573
6574static __init int tracer_init_debugfs(void)
6575{
6576        struct dentry *d_tracer;
6577
6578        trace_access_lock_init();
6579
6580        d_tracer = tracing_init_dentry();
6581        if (!d_tracer)
6582                return 0;
6583
6584        init_tracer_debugfs(&global_trace, d_tracer);
6585
6586        trace_create_file("tracing_thresh", 0644, d_tracer,
6587                        &global_trace, &tracing_thresh_fops);
6588
6589        trace_create_file("README", 0444, d_tracer,
6590                        NULL, &tracing_readme_fops);
6591
6592        trace_create_file("saved_cmdlines", 0444, d_tracer,
6593                        NULL, &tracing_saved_cmdlines_fops);
6594
6595        trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6596                          NULL, &tracing_saved_cmdlines_size_fops);
6597
6598#ifdef CONFIG_DYNAMIC_FTRACE
6599        trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6600                        &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6601#endif
6602
6603        create_trace_instances(d_tracer);
6604
6605        create_trace_options_dir(&global_trace);
6606
6607        return 0;
6608}
6609
6610static int trace_panic_handler(struct notifier_block *this,
6611                               unsigned long event, void *unused)
6612{
6613        if (ftrace_dump_on_oops)
6614                ftrace_dump(ftrace_dump_on_oops);
6615        return NOTIFY_OK;
6616}
6617
6618static struct notifier_block trace_panic_notifier = {
6619        .notifier_call  = trace_panic_handler,
6620        .next           = NULL,
6621        .priority       = 150   /* priority: INT_MAX >= x >= 0 */
6622};
6623
6624static int trace_die_handler(struct notifier_block *self,
6625                             unsigned long val,
6626                             void *data)
6627{
6628        switch (val) {
6629        case DIE_OOPS:
6630                if (ftrace_dump_on_oops)
6631                        ftrace_dump(ftrace_dump_on_oops);
6632                break;
6633        default:
6634                break;
6635        }
6636        return NOTIFY_OK;
6637}
6638
6639static struct notifier_block trace_die_notifier = {
6640        .notifier_call = trace_die_handler,
6641        .priority = 200
6642};
6643
6644/*
6645 * printk is set to max of 1024, we really don't need it that big.
6646 * Nothing should be printing 1000 characters anyway.
6647 */
6648#define TRACE_MAX_PRINT         1000
6649
6650/*
6651 * Define here KERN_TRACE so that we have one place to modify
6652 * it if we decide to change what log level the ftrace dump
6653 * should be at.
6654 */
6655#define KERN_TRACE              KERN_EMERG
6656
6657void
6658trace_printk_seq(struct trace_seq *s)
6659{
6660        /* Probably should print a warning here. */
6661        if (s->seq.len >= TRACE_MAX_PRINT)
6662                s->seq.len = TRACE_MAX_PRINT;
6663
6664        /*
6665         * More paranoid code. Although the buffer size is set to
6666         * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6667         * an extra layer of protection.
6668         */
6669        if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6670                s->seq.len = s->seq.size - 1;
6671
6672        /* should be zero ended, but we are paranoid. */
6673        s->buffer[s->seq.len] = 0;
6674
6675        printk(KERN_TRACE "%s", s->buffer);
6676
6677        trace_seq_init(s);
6678}
6679
6680void trace_init_global_iter(struct trace_iterator *iter)
6681{
6682        iter->tr = &global_trace;
6683        iter->trace = iter->tr->current_trace;
6684        iter->cpu_file = RING_BUFFER_ALL_CPUS;
6685        iter->trace_buffer = &global_trace.trace_buffer;
6686
6687        if (iter->trace && iter->trace->open)
6688                iter->trace->open(iter);
6689
6690        /* Annotate start of buffers if we had overruns */
6691        if (ring_buffer_overruns(iter->trace_buffer->buffer))
6692                iter->iter_flags |= TRACE_FILE_ANNOTATE;
6693
6694        /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6695        if (trace_clocks[iter->tr->clock_id].in_ns)
6696                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6697}
6698
6699void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6700{
6701        /* use static because iter can be a bit big for the stack */
6702        static struct trace_iterator iter;
6703        static atomic_t dump_running;
6704        unsigned int old_userobj;
6705        unsigned long flags;
6706        int cnt = 0, cpu;
6707
6708        /* Only allow one dump user at a time. */
6709        if (atomic_inc_return(&dump_running) != 1) {
6710                atomic_dec(&dump_running);
6711                return;
6712        }
6713
6714        /*
6715         * Always turn off tracing when we dump.
6716         * We don't need to show trace output of what happens
6717         * between multiple crashes.
6718         *
6719         * If the user does a sysrq-z, then they can re-enable
6720         * tracing with echo 1 > tracing_on.
6721         */
6722        tracing_off();
6723
6724        local_irq_save(flags);
6725
6726        /* Simulate the iterator */
6727        trace_init_global_iter(&iter);
6728
6729        for_each_tracing_cpu(cpu) {
6730                atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6731        }
6732
6733        old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6734
6735        /* don't look at user memory in panic mode */
6736        trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6737
6738        switch (oops_dump_mode) {
6739        case DUMP_ALL:
6740                iter.cpu_file = RING_BUFFER_ALL_CPUS;
6741                break;
6742        case DUMP_ORIG:
6743                iter.cpu_file = raw_smp_processor_id();
6744                break;
6745        case DUMP_NONE:
6746                goto out_enable;
6747        default:
6748                printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6749                iter.cpu_file = RING_BUFFER_ALL_CPUS;
6750        }
6751
6752        printk(KERN_TRACE "Dumping ftrace buffer:\n");
6753
6754        /* Did function tracer already get disabled? */
6755        if (ftrace_is_dead()) {
6756                printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6757                printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6758        }
6759
6760        /*
6761         * We need to stop all tracing on all CPUS to read the
6762         * the next buffer. This is a bit expensive, but is
6763         * not done often. We fill all what we can read,
6764         * and then release the locks again.
6765         */
6766
6767        while (!trace_empty(&iter)) {
6768
6769                if (!cnt)
6770                        printk(KERN_TRACE "---------------------------------\n");
6771
6772                cnt++;
6773
6774                /* reset all but tr, trace, and overruns */
6775                memset(&iter.seq, 0,
6776                       sizeof(struct trace_iterator) -
6777                       offsetof(struct trace_iterator, seq));
6778                iter.iter_flags |= TRACE_FILE_LAT_FMT;
6779                iter.pos = -1;
6780
6781                if (trace_find_next_entry_inc(&iter) != NULL) {
6782                        int ret;
6783
6784                        ret = print_trace_line(&iter);
6785                        if (ret != TRACE_TYPE_NO_CONSUME)
6786                                trace_consume(&iter);
6787                }
6788                touch_nmi_watchdog();
6789
6790                trace_printk_seq(&iter.seq);
6791        }
6792
6793        if (!cnt)
6794                printk(KERN_TRACE "   (ftrace buffer empty)\n");
6795        else
6796                printk(KERN_TRACE "---------------------------------\n");
6797
6798 out_enable:
6799        trace_flags |= old_userobj;
6800
6801        for_each_tracing_cpu(cpu) {
6802                atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6803        }
6804        atomic_dec(&dump_running);
6805        local_irq_restore(flags);
6806}
6807EXPORT_SYMBOL_GPL(ftrace_dump);
6808
6809__init static int tracer_alloc_buffers(void)
6810{
6811        int ring_buf_size;
6812        int ret = -ENOMEM;
6813
6814
6815        if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6816                goto out;
6817
6818        if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6819                goto out_free_buffer_mask;
6820
6821        /* Only allocate trace_printk buffers if a trace_printk exists */
6822        if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6823                /* Must be called before global_trace.buffer is allocated */
6824                trace_printk_init_buffers();
6825
6826        /* To save memory, keep the ring buffer size to its minimum */
6827        if (ring_buffer_expanded)
6828                ring_buf_size = trace_buf_size;
6829        else
6830                ring_buf_size = 1;
6831
6832        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6833        cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6834
6835        raw_spin_lock_init(&global_trace.start_lock);
6836
6837        /* Used for event triggers */
6838        temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6839        if (!temp_buffer)
6840                goto out_free_cpumask;
6841
6842        if (trace_create_savedcmd() < 0)
6843                goto out_free_temp_buffer;
6844
6845        /* TODO: make the number of buffers hot pluggable with CPUS */
6846        if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6847                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6848                WARN_ON(1);
6849                goto out_free_savedcmd;
6850        }
6851
6852        if (global_trace.buffer_disabled)
6853                tracing_off();
6854
6855        if (trace_boot_clock) {
6856                ret = tracing_set_clock(&global_trace, trace_boot_clock);
6857                if (ret < 0)
6858                        pr_warning("Trace clock %s not defined, going back to default\n",
6859                                   trace_boot_clock);
6860        }
6861
6862        /*
6863         * register_tracer() might reference current_trace, so it
6864         * needs to be set before we register anything. This is
6865         * just a bootstrap of current_trace anyway.
6866         */
6867        global_trace.current_trace = &nop_trace;
6868
6869        global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6870
6871        ftrace_init_global_array_ops(&global_trace);
6872
6873        register_tracer(&nop_trace);
6874
6875        /* All seems OK, enable tracing */
6876        tracing_disabled = 0;
6877
6878        atomic_notifier_chain_register(&panic_notifier_list,
6879                                       &trace_panic_notifier);
6880
6881        register_die_notifier(&trace_die_notifier);
6882
6883        global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6884
6885        INIT_LIST_HEAD(&global_trace.systems);
6886        INIT_LIST_HEAD(&global_trace.events);
6887        list_add(&global_trace.list, &ftrace_trace_arrays);
6888
6889        while (trace_boot_options) {
6890                char *option;
6891
6892                option = strsep(&trace_boot_options, ",");
6893                trace_set_options(&global_trace, option);
6894        }
6895
6896        register_snapshot_cmd();
6897
6898        return 0;
6899
6900out_free_savedcmd:
6901        free_saved_cmdlines_buffer(savedcmd);
6902out_free_temp_buffer:
6903        ring_buffer_free(temp_buffer);
6904out_free_cpumask:
6905        free_cpumask_var(global_trace.tracing_cpumask);
6906out_free_buffer_mask:
6907        free_cpumask_var(tracing_buffer_mask);
6908out:
6909        return ret;
6910}
6911
6912void __init trace_init(void)
6913{
6914        if (tracepoint_printk) {
6915                tracepoint_print_iter =
6916                        kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6917                if (WARN_ON(!tracepoint_print_iter))
6918                        tracepoint_printk = 0;
6919        }
6920        tracer_alloc_buffers();
6921        trace_event_init();     
6922}
6923
6924__init static int clear_boot_tracer(void)
6925{
6926        /*
6927         * The default tracer at boot buffer is an init section.
6928         * This function is called in lateinit. If we did not
6929         * find the boot tracer, then clear it out, to prevent
6930         * later registration from accessing the buffer that is
6931         * about to be freed.
6932         */
6933        if (!default_bootup_tracer)
6934                return 0;
6935
6936        printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6937               default_bootup_tracer);
6938        default_bootup_tracer = NULL;
6939
6940        return 0;
6941}
6942
6943fs_initcall(tracer_init_debugfs);
6944late_initcall(clear_boot_tracer);
6945