linux/kernel/trace/trace_irqsoff.c
<<
>>
Prefs
   1/*
   2 * trace irqs off critical timings
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * From code in the latency_tracer, that is:
   8 *
   9 *  Copyright (C) 2004-2006 Ingo Molnar
  10 *  Copyright (C) 2004 Nadia Yvette Chambers
  11 */
  12#include <linux/kallsyms.h>
  13#include <linux/debugfs.h>
  14#include <linux/uaccess.h>
  15#include <linux/module.h>
  16#include <linux/ftrace.h>
  17#include <linux/fs.h>
  18
  19#include "trace.h"
  20
  21static struct trace_array               *irqsoff_trace __read_mostly;
  22static int                              tracer_enabled __read_mostly;
  23
  24static DEFINE_PER_CPU(int, tracing_cpu);
  25
  26static DEFINE_RAW_SPINLOCK(max_trace_lock);
  27
  28enum {
  29        TRACER_IRQS_OFF         = (1 << 1),
  30        TRACER_PREEMPT_OFF      = (1 << 2),
  31};
  32
  33static int trace_type __read_mostly;
  34
  35static int save_flags;
  36static bool function_enabled;
  37
  38static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  39static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  40
  41#ifdef CONFIG_PREEMPT_TRACER
  42static inline int
  43preempt_trace(void)
  44{
  45        return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  46}
  47#else
  48# define preempt_trace() (0)
  49#endif
  50
  51#ifdef CONFIG_IRQSOFF_TRACER
  52static inline int
  53irq_trace(void)
  54{
  55        return ((trace_type & TRACER_IRQS_OFF) &&
  56                irqs_disabled());
  57}
  58#else
  59# define irq_trace() (0)
  60#endif
  61
  62#define TRACE_DISPLAY_GRAPH     1
  63
  64static struct tracer_opt trace_opts[] = {
  65#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  66        /* display latency trace as call graph */
  67        { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
  68#endif
  69        { } /* Empty entry */
  70};
  71
  72static struct tracer_flags tracer_flags = {
  73        .val  = 0,
  74        .opts = trace_opts,
  75};
  76
  77#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
  78
  79/*
  80 * Sequence count - we record it when starting a measurement and
  81 * skip the latency if the sequence has changed - some other section
  82 * did a maximum and could disturb our measurement with serial console
  83 * printouts, etc. Truly coinciding maximum latencies should be rare
  84 * and what happens together happens separately as well, so this doesn't
  85 * decrease the validity of the maximum found:
  86 */
  87static __cacheline_aligned_in_smp       unsigned long max_sequence;
  88
  89#ifdef CONFIG_FUNCTION_TRACER
  90/*
  91 * Prologue for the preempt and irqs off function tracers.
  92 *
  93 * Returns 1 if it is OK to continue, and data->disabled is
  94 *            incremented.
  95 *         0 if the trace is to be ignored, and data->disabled
  96 *            is kept the same.
  97 *
  98 * Note, this function is also used outside this ifdef but
  99 *  inside the #ifdef of the function graph tracer below.
 100 *  This is OK, since the function graph tracer is
 101 *  dependent on the function tracer.
 102 */
 103static int func_prolog_dec(struct trace_array *tr,
 104                           struct trace_array_cpu **data,
 105                           unsigned long *flags)
 106{
 107        long disabled;
 108        int cpu;
 109
 110        /*
 111         * Does not matter if we preempt. We test the flags
 112         * afterward, to see if irqs are disabled or not.
 113         * If we preempt and get a false positive, the flags
 114         * test will fail.
 115         */
 116        cpu = raw_smp_processor_id();
 117        if (likely(!per_cpu(tracing_cpu, cpu)))
 118                return 0;
 119
 120        local_save_flags(*flags);
 121        /* slight chance to get a false positive on tracing_cpu */
 122        if (!irqs_disabled_flags(*flags))
 123                return 0;
 124
 125        *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 126        disabled = atomic_inc_return(&(*data)->disabled);
 127
 128        if (likely(disabled == 1))
 129                return 1;
 130
 131        atomic_dec(&(*data)->disabled);
 132
 133        return 0;
 134}
 135
 136/*
 137 * irqsoff uses its own tracer function to keep the overhead down:
 138 */
 139static void
 140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
 141                    struct ftrace_ops *op, struct pt_regs *pt_regs)
 142{
 143        struct trace_array *tr = irqsoff_trace;
 144        struct trace_array_cpu *data;
 145        unsigned long flags;
 146
 147        if (!func_prolog_dec(tr, &data, &flags))
 148                return;
 149
 150        trace_function(tr, ip, parent_ip, flags, preempt_count());
 151
 152        atomic_dec(&data->disabled);
 153}
 154#endif /* CONFIG_FUNCTION_TRACER */
 155
 156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 157static int
 158irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 159{
 160        int cpu;
 161
 162        if (!(bit & TRACE_DISPLAY_GRAPH))
 163                return -EINVAL;
 164
 165        if (!(is_graph() ^ set))
 166                return 0;
 167
 168        stop_irqsoff_tracer(irqsoff_trace, !set);
 169
 170        for_each_possible_cpu(cpu)
 171                per_cpu(tracing_cpu, cpu) = 0;
 172
 173        tr->max_latency = 0;
 174        tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
 175
 176        return start_irqsoff_tracer(irqsoff_trace, set);
 177}
 178
 179static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 180{
 181        struct trace_array *tr = irqsoff_trace;
 182        struct trace_array_cpu *data;
 183        unsigned long flags;
 184        int ret;
 185        int pc;
 186
 187        if (!func_prolog_dec(tr, &data, &flags))
 188                return 0;
 189
 190        pc = preempt_count();
 191        ret = __trace_graph_entry(tr, trace, flags, pc);
 192        atomic_dec(&data->disabled);
 193
 194        return ret;
 195}
 196
 197static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
 198{
 199        struct trace_array *tr = irqsoff_trace;
 200        struct trace_array_cpu *data;
 201        unsigned long flags;
 202        int pc;
 203
 204        if (!func_prolog_dec(tr, &data, &flags))
 205                return;
 206
 207        pc = preempt_count();
 208        __trace_graph_return(tr, trace, flags, pc);
 209        atomic_dec(&data->disabled);
 210}
 211
 212static void irqsoff_trace_open(struct trace_iterator *iter)
 213{
 214        if (is_graph())
 215                graph_trace_open(iter);
 216
 217}
 218
 219static void irqsoff_trace_close(struct trace_iterator *iter)
 220{
 221        if (iter->private)
 222                graph_trace_close(iter);
 223}
 224
 225#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
 226                            TRACE_GRAPH_PRINT_PROC | \
 227                            TRACE_GRAPH_PRINT_ABS_TIME | \
 228                            TRACE_GRAPH_PRINT_DURATION)
 229
 230static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 231{
 232        /*
 233         * In graph mode call the graph tracer output function,
 234         * otherwise go with the TRACE_FN event handler
 235         */
 236        if (is_graph())
 237                return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 238
 239        return TRACE_TYPE_UNHANDLED;
 240}
 241
 242static void irqsoff_print_header(struct seq_file *s)
 243{
 244        if (is_graph())
 245                print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
 246        else
 247                trace_default_header(s);
 248}
 249
 250static void
 251__trace_function(struct trace_array *tr,
 252                 unsigned long ip, unsigned long parent_ip,
 253                 unsigned long flags, int pc)
 254{
 255        if (is_graph())
 256                trace_graph_function(tr, ip, parent_ip, flags, pc);
 257        else
 258                trace_function(tr, ip, parent_ip, flags, pc);
 259}
 260
 261#else
 262#define __trace_function trace_function
 263
 264static int
 265irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 266{
 267        return -EINVAL;
 268}
 269
 270static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 271{
 272        return -1;
 273}
 274
 275static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 276{
 277        return TRACE_TYPE_UNHANDLED;
 278}
 279
 280static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
 281static void irqsoff_trace_open(struct trace_iterator *iter) { }
 282static void irqsoff_trace_close(struct trace_iterator *iter) { }
 283
 284#ifdef CONFIG_FUNCTION_TRACER
 285static void irqsoff_print_header(struct seq_file *s)
 286{
 287        trace_default_header(s);
 288}
 289#else
 290static void irqsoff_print_header(struct seq_file *s)
 291{
 292        trace_latency_header(s);
 293}
 294#endif /* CONFIG_FUNCTION_TRACER */
 295#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 296
 297/*
 298 * Should this new latency be reported/recorded?
 299 */
 300static int report_latency(struct trace_array *tr, cycle_t delta)
 301{
 302        if (tracing_thresh) {
 303                if (delta < tracing_thresh)
 304                        return 0;
 305        } else {
 306                if (delta <= tr->max_latency)
 307                        return 0;
 308        }
 309        return 1;
 310}
 311
 312static void
 313check_critical_timing(struct trace_array *tr,
 314                      struct trace_array_cpu *data,
 315                      unsigned long parent_ip,
 316                      int cpu)
 317{
 318        cycle_t T0, T1, delta;
 319        unsigned long flags;
 320        int pc;
 321
 322        T0 = data->preempt_timestamp;
 323        T1 = ftrace_now(cpu);
 324        delta = T1-T0;
 325
 326        local_save_flags(flags);
 327
 328        pc = preempt_count();
 329
 330        if (!report_latency(tr, delta))
 331                goto out;
 332
 333        raw_spin_lock_irqsave(&max_trace_lock, flags);
 334
 335        /* check if we are still the max latency */
 336        if (!report_latency(tr, delta))
 337                goto out_unlock;
 338
 339        __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 340        /* Skip 5 functions to get to the irq/preempt enable function */
 341        __trace_stack(tr, flags, 5, pc);
 342
 343        if (data->critical_sequence != max_sequence)
 344                goto out_unlock;
 345
 346        data->critical_end = parent_ip;
 347
 348        if (likely(!is_tracing_stopped())) {
 349                tr->max_latency = delta;
 350                update_max_tr_single(tr, current, cpu);
 351        }
 352
 353        max_sequence++;
 354
 355out_unlock:
 356        raw_spin_unlock_irqrestore(&max_trace_lock, flags);
 357
 358out:
 359        data->critical_sequence = max_sequence;
 360        data->preempt_timestamp = ftrace_now(cpu);
 361        __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 362}
 363
 364static inline void
 365start_critical_timing(unsigned long ip, unsigned long parent_ip)
 366{
 367        int cpu;
 368        struct trace_array *tr = irqsoff_trace;
 369        struct trace_array_cpu *data;
 370        unsigned long flags;
 371
 372        if (!tracer_enabled || !tracing_is_enabled())
 373                return;
 374
 375        cpu = raw_smp_processor_id();
 376
 377        if (per_cpu(tracing_cpu, cpu))
 378                return;
 379
 380        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 381
 382        if (unlikely(!data) || atomic_read(&data->disabled))
 383                return;
 384
 385        atomic_inc(&data->disabled);
 386
 387        data->critical_sequence = max_sequence;
 388        data->preempt_timestamp = ftrace_now(cpu);
 389        data->critical_start = parent_ip ? : ip;
 390
 391        local_save_flags(flags);
 392
 393        __trace_function(tr, ip, parent_ip, flags, preempt_count());
 394
 395        per_cpu(tracing_cpu, cpu) = 1;
 396
 397        atomic_dec(&data->disabled);
 398}
 399
 400static inline void
 401stop_critical_timing(unsigned long ip, unsigned long parent_ip)
 402{
 403        int cpu;
 404        struct trace_array *tr = irqsoff_trace;
 405        struct trace_array_cpu *data;
 406        unsigned long flags;
 407
 408        cpu = raw_smp_processor_id();
 409        /* Always clear the tracing cpu on stopping the trace */
 410        if (unlikely(per_cpu(tracing_cpu, cpu)))
 411                per_cpu(tracing_cpu, cpu) = 0;
 412        else
 413                return;
 414
 415        if (!tracer_enabled || !tracing_is_enabled())
 416                return;
 417
 418        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 419
 420        if (unlikely(!data) ||
 421            !data->critical_start || atomic_read(&data->disabled))
 422                return;
 423
 424        atomic_inc(&data->disabled);
 425
 426        local_save_flags(flags);
 427        __trace_function(tr, ip, parent_ip, flags, preempt_count());
 428        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 429        data->critical_start = 0;
 430        atomic_dec(&data->disabled);
 431}
 432
 433/* start and stop critical timings used to for stoppage (in idle) */
 434void start_critical_timings(void)
 435{
 436        if (preempt_trace() || irq_trace())
 437                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 438}
 439EXPORT_SYMBOL_GPL(start_critical_timings);
 440
 441void stop_critical_timings(void)
 442{
 443        if (preempt_trace() || irq_trace())
 444                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 445}
 446EXPORT_SYMBOL_GPL(stop_critical_timings);
 447
 448#ifdef CONFIG_IRQSOFF_TRACER
 449#ifdef CONFIG_PROVE_LOCKING
 450void time_hardirqs_on(unsigned long a0, unsigned long a1)
 451{
 452        if (!preempt_trace() && irq_trace())
 453                stop_critical_timing(a0, a1);
 454}
 455
 456void time_hardirqs_off(unsigned long a0, unsigned long a1)
 457{
 458        if (!preempt_trace() && irq_trace())
 459                start_critical_timing(a0, a1);
 460}
 461
 462#else /* !CONFIG_PROVE_LOCKING */
 463
 464/*
 465 * Stubs:
 466 */
 467
 468void trace_softirqs_on(unsigned long ip)
 469{
 470}
 471
 472void trace_softirqs_off(unsigned long ip)
 473{
 474}
 475
 476inline void print_irqtrace_events(struct task_struct *curr)
 477{
 478}
 479
 480/*
 481 * We are only interested in hardirq on/off events:
 482 */
 483void trace_hardirqs_on(void)
 484{
 485        if (!preempt_trace() && irq_trace())
 486                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 487}
 488EXPORT_SYMBOL(trace_hardirqs_on);
 489
 490void trace_hardirqs_off(void)
 491{
 492        if (!preempt_trace() && irq_trace())
 493                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 494}
 495EXPORT_SYMBOL(trace_hardirqs_off);
 496
 497__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 498{
 499        if (!preempt_trace() && irq_trace())
 500                stop_critical_timing(CALLER_ADDR0, caller_addr);
 501}
 502EXPORT_SYMBOL(trace_hardirqs_on_caller);
 503
 504__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 505{
 506        if (!preempt_trace() && irq_trace())
 507                start_critical_timing(CALLER_ADDR0, caller_addr);
 508}
 509EXPORT_SYMBOL(trace_hardirqs_off_caller);
 510
 511#endif /* CONFIG_PROVE_LOCKING */
 512#endif /*  CONFIG_IRQSOFF_TRACER */
 513
 514#ifdef CONFIG_PREEMPT_TRACER
 515void trace_preempt_on(unsigned long a0, unsigned long a1)
 516{
 517        if (preempt_trace() && !irq_trace())
 518                stop_critical_timing(a0, a1);
 519}
 520
 521void trace_preempt_off(unsigned long a0, unsigned long a1)
 522{
 523        if (preempt_trace() && !irq_trace())
 524                start_critical_timing(a0, a1);
 525}
 526#endif /* CONFIG_PREEMPT_TRACER */
 527
 528static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
 529{
 530        int ret;
 531
 532        /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
 533        if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
 534                return 0;
 535
 536        if (graph)
 537                ret = register_ftrace_graph(&irqsoff_graph_return,
 538                                            &irqsoff_graph_entry);
 539        else
 540                ret = register_ftrace_function(tr->ops);
 541
 542        if (!ret)
 543                function_enabled = true;
 544
 545        return ret;
 546}
 547
 548static void unregister_irqsoff_function(struct trace_array *tr, int graph)
 549{
 550        if (!function_enabled)
 551                return;
 552
 553        if (graph)
 554                unregister_ftrace_graph();
 555        else
 556                unregister_ftrace_function(tr->ops);
 557
 558        function_enabled = false;
 559}
 560
 561static void irqsoff_function_set(struct trace_array *tr, int set)
 562{
 563        if (set)
 564                register_irqsoff_function(tr, is_graph(), 1);
 565        else
 566                unregister_irqsoff_function(tr, is_graph());
 567}
 568
 569static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
 570{
 571        struct tracer *tracer = tr->current_trace;
 572
 573        if (mask & TRACE_ITER_FUNCTION)
 574                irqsoff_function_set(tr, set);
 575
 576        return trace_keep_overwrite(tracer, mask, set);
 577}
 578
 579static int start_irqsoff_tracer(struct trace_array *tr, int graph)
 580{
 581        int ret;
 582
 583        ret = register_irqsoff_function(tr, graph, 0);
 584
 585        if (!ret && tracing_is_enabled())
 586                tracer_enabled = 1;
 587        else
 588                tracer_enabled = 0;
 589
 590        return ret;
 591}
 592
 593static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 594{
 595        tracer_enabled = 0;
 596
 597        unregister_irqsoff_function(tr, graph);
 598}
 599
 600static bool irqsoff_busy;
 601
 602static int __irqsoff_tracer_init(struct trace_array *tr)
 603{
 604        if (irqsoff_busy)
 605                return -EBUSY;
 606
 607        save_flags = trace_flags;
 608
 609        /* non overwrite screws up the latency tracers */
 610        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 611        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
 612
 613        tr->max_latency = 0;
 614        irqsoff_trace = tr;
 615        /* make sure that the tracer is visible */
 616        smp_wmb();
 617        tracing_reset_online_cpus(&tr->trace_buffer);
 618
 619        ftrace_init_array_ops(tr, irqsoff_tracer_call);
 620
 621        /* Only toplevel instance supports graph tracing */
 622        if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 623                                      is_graph())))
 624                printk(KERN_ERR "failed to start irqsoff tracer\n");
 625
 626        irqsoff_busy = true;
 627        return 0;
 628}
 629
 630static void irqsoff_tracer_reset(struct trace_array *tr)
 631{
 632        int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
 633        int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 634
 635        stop_irqsoff_tracer(tr, is_graph());
 636
 637        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 638        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
 639        ftrace_reset_array_ops(tr);
 640
 641        irqsoff_busy = false;
 642}
 643
 644static void irqsoff_tracer_start(struct trace_array *tr)
 645{
 646        tracer_enabled = 1;
 647}
 648
 649static void irqsoff_tracer_stop(struct trace_array *tr)
 650{
 651        tracer_enabled = 0;
 652}
 653
 654#ifdef CONFIG_IRQSOFF_TRACER
 655static int irqsoff_tracer_init(struct trace_array *tr)
 656{
 657        trace_type = TRACER_IRQS_OFF;
 658
 659        return __irqsoff_tracer_init(tr);
 660}
 661static struct tracer irqsoff_tracer __read_mostly =
 662{
 663        .name           = "irqsoff",
 664        .init           = irqsoff_tracer_init,
 665        .reset          = irqsoff_tracer_reset,
 666        .start          = irqsoff_tracer_start,
 667        .stop           = irqsoff_tracer_stop,
 668        .print_max      = true,
 669        .print_header   = irqsoff_print_header,
 670        .print_line     = irqsoff_print_line,
 671        .flags          = &tracer_flags,
 672        .set_flag       = irqsoff_set_flag,
 673        .flag_changed   = irqsoff_flag_changed,
 674#ifdef CONFIG_FTRACE_SELFTEST
 675        .selftest    = trace_selftest_startup_irqsoff,
 676#endif
 677        .open           = irqsoff_trace_open,
 678        .close          = irqsoff_trace_close,
 679        .allow_instances = true,
 680        .use_max_tr     = true,
 681};
 682# define register_irqsoff(trace) register_tracer(&trace)
 683#else
 684# define register_irqsoff(trace) do { } while (0)
 685#endif
 686
 687#ifdef CONFIG_PREEMPT_TRACER
 688static int preemptoff_tracer_init(struct trace_array *tr)
 689{
 690        trace_type = TRACER_PREEMPT_OFF;
 691
 692        return __irqsoff_tracer_init(tr);
 693}
 694
 695static struct tracer preemptoff_tracer __read_mostly =
 696{
 697        .name           = "preemptoff",
 698        .init           = preemptoff_tracer_init,
 699        .reset          = irqsoff_tracer_reset,
 700        .start          = irqsoff_tracer_start,
 701        .stop           = irqsoff_tracer_stop,
 702        .print_max      = true,
 703        .print_header   = irqsoff_print_header,
 704        .print_line     = irqsoff_print_line,
 705        .flags          = &tracer_flags,
 706        .set_flag       = irqsoff_set_flag,
 707        .flag_changed   = irqsoff_flag_changed,
 708#ifdef CONFIG_FTRACE_SELFTEST
 709        .selftest    = trace_selftest_startup_preemptoff,
 710#endif
 711        .open           = irqsoff_trace_open,
 712        .close          = irqsoff_trace_close,
 713        .allow_instances = true,
 714        .use_max_tr     = true,
 715};
 716# define register_preemptoff(trace) register_tracer(&trace)
 717#else
 718# define register_preemptoff(trace) do { } while (0)
 719#endif
 720
 721#if defined(CONFIG_IRQSOFF_TRACER) && \
 722        defined(CONFIG_PREEMPT_TRACER)
 723
 724static int preemptirqsoff_tracer_init(struct trace_array *tr)
 725{
 726        trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
 727
 728        return __irqsoff_tracer_init(tr);
 729}
 730
 731static struct tracer preemptirqsoff_tracer __read_mostly =
 732{
 733        .name           = "preemptirqsoff",
 734        .init           = preemptirqsoff_tracer_init,
 735        .reset          = irqsoff_tracer_reset,
 736        .start          = irqsoff_tracer_start,
 737        .stop           = irqsoff_tracer_stop,
 738        .print_max      = true,
 739        .print_header   = irqsoff_print_header,
 740        .print_line     = irqsoff_print_line,
 741        .flags          = &tracer_flags,
 742        .set_flag       = irqsoff_set_flag,
 743        .flag_changed   = irqsoff_flag_changed,
 744#ifdef CONFIG_FTRACE_SELFTEST
 745        .selftest    = trace_selftest_startup_preemptirqsoff,
 746#endif
 747        .open           = irqsoff_trace_open,
 748        .close          = irqsoff_trace_close,
 749        .allow_instances = true,
 750        .use_max_tr     = true,
 751};
 752
 753# define register_preemptirqsoff(trace) register_tracer(&trace)
 754#else
 755# define register_preemptirqsoff(trace) do { } while (0)
 756#endif
 757
 758__init static int init_irqsoff_tracer(void)
 759{
 760        register_irqsoff(irqsoff_tracer);
 761        register_preemptoff(preemptoff_tracer);
 762        register_preemptirqsoff(preemptirqsoff_tracer);
 763
 764        return 0;
 765}
 766core_initcall(init_irqsoff_tracer);
 767