linux/kernel/trace/trace_irqsoff.c
<<
>>
Prefs
   1/*
   2 * trace irqs off critical timings
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * From code in the latency_tracer, that is:
   8 *
   9 *  Copyright (C) 2004-2006 Ingo Molnar
  10 *  Copyright (C) 2004 Nadia Yvette Chambers
  11 */
  12#include <linux/kallsyms.h>
  13#include <linux/uaccess.h>
  14#include <linux/module.h>
  15#include <linux/ftrace.h>
  16
  17#include "trace.h"
  18
  19#define CREATE_TRACE_POINTS
  20#include <trace/events/preemptirq.h>
  21
  22#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
  23static struct trace_array               *irqsoff_trace __read_mostly;
  24static int                              tracer_enabled __read_mostly;
  25
  26static DEFINE_PER_CPU(int, tracing_cpu);
  27
  28static DEFINE_RAW_SPINLOCK(max_trace_lock);
  29
  30enum {
  31        TRACER_IRQS_OFF         = (1 << 1),
  32        TRACER_PREEMPT_OFF      = (1 << 2),
  33};
  34
  35static int trace_type __read_mostly;
  36
  37static int save_flags;
  38
  39static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  40static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  41
  42#ifdef CONFIG_PREEMPT_TRACER
  43static inline int
  44preempt_trace(void)
  45{
  46        return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  47}
  48#else
  49# define preempt_trace() (0)
  50#endif
  51
  52#ifdef CONFIG_IRQSOFF_TRACER
  53static inline int
  54irq_trace(void)
  55{
  56        return ((trace_type & TRACER_IRQS_OFF) &&
  57                irqs_disabled());
  58}
  59#else
  60# define irq_trace() (0)
  61#endif
  62
  63#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  64static int irqsoff_display_graph(struct trace_array *tr, int set);
  65# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  66#else
  67static inline int irqsoff_display_graph(struct trace_array *tr, int set)
  68{
  69        return -EINVAL;
  70}
  71# define is_graph(tr) false
  72#endif
  73
  74/*
  75 * Sequence count - we record it when starting a measurement and
  76 * skip the latency if the sequence has changed - some other section
  77 * did a maximum and could disturb our measurement with serial console
  78 * printouts, etc. Truly coinciding maximum latencies should be rare
  79 * and what happens together happens separately as well, so this doesn't
  80 * decrease the validity of the maximum found:
  81 */
  82static __cacheline_aligned_in_smp       unsigned long max_sequence;
  83
  84#ifdef CONFIG_FUNCTION_TRACER
  85/*
  86 * Prologue for the preempt and irqs off function tracers.
  87 *
  88 * Returns 1 if it is OK to continue, and data->disabled is
  89 *            incremented.
  90 *         0 if the trace is to be ignored, and data->disabled
  91 *            is kept the same.
  92 *
  93 * Note, this function is also used outside this ifdef but
  94 *  inside the #ifdef of the function graph tracer below.
  95 *  This is OK, since the function graph tracer is
  96 *  dependent on the function tracer.
  97 */
  98static int func_prolog_dec(struct trace_array *tr,
  99                           struct trace_array_cpu **data,
 100                           unsigned long *flags)
 101{
 102        long disabled;
 103        int cpu;
 104
 105        /*
 106         * Does not matter if we preempt. We test the flags
 107         * afterward, to see if irqs are disabled or not.
 108         * If we preempt and get a false positive, the flags
 109         * test will fail.
 110         */
 111        cpu = raw_smp_processor_id();
 112        if (likely(!per_cpu(tracing_cpu, cpu)))
 113                return 0;
 114
 115        local_save_flags(*flags);
 116        /*
 117         * Slight chance to get a false positive on tracing_cpu,
 118         * although I'm starting to think there isn't a chance.
 119         * Leave this for now just to be paranoid.
 120         */
 121        if (!irqs_disabled_flags(*flags) && !preempt_count())
 122                return 0;
 123
 124        *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 125        disabled = atomic_inc_return(&(*data)->disabled);
 126
 127        if (likely(disabled == 1))
 128                return 1;
 129
 130        atomic_dec(&(*data)->disabled);
 131
 132        return 0;
 133}
 134
 135/*
 136 * irqsoff uses its own tracer function to keep the overhead down:
 137 */
 138static void
 139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
 140                    struct ftrace_ops *op, struct pt_regs *pt_regs)
 141{
 142        struct trace_array *tr = irqsoff_trace;
 143        struct trace_array_cpu *data;
 144        unsigned long flags;
 145
 146        if (!func_prolog_dec(tr, &data, &flags))
 147                return;
 148
 149        trace_function(tr, ip, parent_ip, flags, preempt_count());
 150
 151        atomic_dec(&data->disabled);
 152}
 153#endif /* CONFIG_FUNCTION_TRACER */
 154
 155#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 156static int irqsoff_display_graph(struct trace_array *tr, int set)
 157{
 158        int cpu;
 159
 160        if (!(is_graph(tr) ^ set))
 161                return 0;
 162
 163        stop_irqsoff_tracer(irqsoff_trace, !set);
 164
 165        for_each_possible_cpu(cpu)
 166                per_cpu(tracing_cpu, cpu) = 0;
 167
 168        tr->max_latency = 0;
 169        tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
 170
 171        return start_irqsoff_tracer(irqsoff_trace, set);
 172}
 173
 174static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 175{
 176        struct trace_array *tr = irqsoff_trace;
 177        struct trace_array_cpu *data;
 178        unsigned long flags;
 179        int ret;
 180        int pc;
 181
 182        if (ftrace_graph_ignore_func(trace))
 183                return 0;
 184        /*
 185         * Do not trace a function if it's filtered by set_graph_notrace.
 186         * Make the index of ret stack negative to indicate that it should
 187         * ignore further functions.  But it needs its own ret stack entry
 188         * to recover the original index in order to continue tracing after
 189         * returning from the function.
 190         */
 191        if (ftrace_graph_notrace_addr(trace->func))
 192                return 1;
 193
 194        if (!func_prolog_dec(tr, &data, &flags))
 195                return 0;
 196
 197        pc = preempt_count();
 198        ret = __trace_graph_entry(tr, trace, flags, pc);
 199        atomic_dec(&data->disabled);
 200
 201        return ret;
 202}
 203
 204static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
 205{
 206        struct trace_array *tr = irqsoff_trace;
 207        struct trace_array_cpu *data;
 208        unsigned long flags;
 209        int pc;
 210
 211        if (!func_prolog_dec(tr, &data, &flags))
 212                return;
 213
 214        pc = preempt_count();
 215        __trace_graph_return(tr, trace, flags, pc);
 216        atomic_dec(&data->disabled);
 217}
 218
 219static void irqsoff_trace_open(struct trace_iterator *iter)
 220{
 221        if (is_graph(iter->tr))
 222                graph_trace_open(iter);
 223
 224}
 225
 226static void irqsoff_trace_close(struct trace_iterator *iter)
 227{
 228        if (iter->private)
 229                graph_trace_close(iter);
 230}
 231
 232#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
 233                            TRACE_GRAPH_PRINT_PROC | \
 234                            TRACE_GRAPH_PRINT_ABS_TIME | \
 235                            TRACE_GRAPH_PRINT_DURATION)
 236
 237static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 238{
 239        /*
 240         * In graph mode call the graph tracer output function,
 241         * otherwise go with the TRACE_FN event handler
 242         */
 243        if (is_graph(iter->tr))
 244                return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 245
 246        return TRACE_TYPE_UNHANDLED;
 247}
 248
 249static void irqsoff_print_header(struct seq_file *s)
 250{
 251        struct trace_array *tr = irqsoff_trace;
 252
 253        if (is_graph(tr))
 254                print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
 255        else
 256                trace_default_header(s);
 257}
 258
 259static void
 260__trace_function(struct trace_array *tr,
 261                 unsigned long ip, unsigned long parent_ip,
 262                 unsigned long flags, int pc)
 263{
 264        if (is_graph(tr))
 265                trace_graph_function(tr, ip, parent_ip, flags, pc);
 266        else
 267                trace_function(tr, ip, parent_ip, flags, pc);
 268}
 269
 270#else
 271#define __trace_function trace_function
 272
 273#ifdef CONFIG_FUNCTION_TRACER
 274static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 275{
 276        return -1;
 277}
 278#endif
 279
 280static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 281{
 282        return TRACE_TYPE_UNHANDLED;
 283}
 284
 285static void irqsoff_trace_open(struct trace_iterator *iter) { }
 286static void irqsoff_trace_close(struct trace_iterator *iter) { }
 287
 288#ifdef CONFIG_FUNCTION_TRACER
 289static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
 290static void irqsoff_print_header(struct seq_file *s)
 291{
 292        trace_default_header(s);
 293}
 294#else
 295static void irqsoff_print_header(struct seq_file *s)
 296{
 297        trace_latency_header(s);
 298}
 299#endif /* CONFIG_FUNCTION_TRACER */
 300#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 301
 302/*
 303 * Should this new latency be reported/recorded?
 304 */
 305static bool report_latency(struct trace_array *tr, u64 delta)
 306{
 307        if (tracing_thresh) {
 308                if (delta < tracing_thresh)
 309                        return false;
 310        } else {
 311                if (delta <= tr->max_latency)
 312                        return false;
 313        }
 314        return true;
 315}
 316
 317static void
 318check_critical_timing(struct trace_array *tr,
 319                      struct trace_array_cpu *data,
 320                      unsigned long parent_ip,
 321                      int cpu)
 322{
 323        u64 T0, T1, delta;
 324        unsigned long flags;
 325        int pc;
 326
 327        T0 = data->preempt_timestamp;
 328        T1 = ftrace_now(cpu);
 329        delta = T1-T0;
 330
 331        local_save_flags(flags);
 332
 333        pc = preempt_count();
 334
 335        if (!report_latency(tr, delta))
 336                goto out;
 337
 338        raw_spin_lock_irqsave(&max_trace_lock, flags);
 339
 340        /* check if we are still the max latency */
 341        if (!report_latency(tr, delta))
 342                goto out_unlock;
 343
 344        __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 345        /* Skip 5 functions to get to the irq/preempt enable function */
 346        __trace_stack(tr, flags, 5, pc);
 347
 348        if (data->critical_sequence != max_sequence)
 349                goto out_unlock;
 350
 351        data->critical_end = parent_ip;
 352
 353        if (likely(!is_tracing_stopped())) {
 354                tr->max_latency = delta;
 355                update_max_tr_single(tr, current, cpu);
 356        }
 357
 358        max_sequence++;
 359
 360out_unlock:
 361        raw_spin_unlock_irqrestore(&max_trace_lock, flags);
 362
 363out:
 364        data->critical_sequence = max_sequence;
 365        data->preempt_timestamp = ftrace_now(cpu);
 366        __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 367}
 368
 369static inline void
 370start_critical_timing(unsigned long ip, unsigned long parent_ip)
 371{
 372        int cpu;
 373        struct trace_array *tr = irqsoff_trace;
 374        struct trace_array_cpu *data;
 375        unsigned long flags;
 376
 377        if (!tracer_enabled || !tracing_is_enabled())
 378                return;
 379
 380        cpu = raw_smp_processor_id();
 381
 382        if (per_cpu(tracing_cpu, cpu))
 383                return;
 384
 385        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 386
 387        if (unlikely(!data) || atomic_read(&data->disabled))
 388                return;
 389
 390        atomic_inc(&data->disabled);
 391
 392        data->critical_sequence = max_sequence;
 393        data->preempt_timestamp = ftrace_now(cpu);
 394        data->critical_start = parent_ip ? : ip;
 395
 396        local_save_flags(flags);
 397
 398        __trace_function(tr, ip, parent_ip, flags, preempt_count());
 399
 400        per_cpu(tracing_cpu, cpu) = 1;
 401
 402        atomic_dec(&data->disabled);
 403}
 404
 405static inline void
 406stop_critical_timing(unsigned long ip, unsigned long parent_ip)
 407{
 408        int cpu;
 409        struct trace_array *tr = irqsoff_trace;
 410        struct trace_array_cpu *data;
 411        unsigned long flags;
 412
 413        cpu = raw_smp_processor_id();
 414        /* Always clear the tracing cpu on stopping the trace */
 415        if (unlikely(per_cpu(tracing_cpu, cpu)))
 416                per_cpu(tracing_cpu, cpu) = 0;
 417        else
 418                return;
 419
 420        if (!tracer_enabled || !tracing_is_enabled())
 421                return;
 422
 423        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 424
 425        if (unlikely(!data) ||
 426            !data->critical_start || atomic_read(&data->disabled))
 427                return;
 428
 429        atomic_inc(&data->disabled);
 430
 431        local_save_flags(flags);
 432        __trace_function(tr, ip, parent_ip, flags, preempt_count());
 433        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 434        data->critical_start = 0;
 435        atomic_dec(&data->disabled);
 436}
 437
 438/* start and stop critical timings used to for stoppage (in idle) */
 439void start_critical_timings(void)
 440{
 441        if (preempt_trace() || irq_trace())
 442                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 443}
 444EXPORT_SYMBOL_GPL(start_critical_timings);
 445
 446void stop_critical_timings(void)
 447{
 448        if (preempt_trace() || irq_trace())
 449                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 450}
 451EXPORT_SYMBOL_GPL(stop_critical_timings);
 452
 453#ifdef CONFIG_IRQSOFF_TRACER
 454#ifdef CONFIG_PROVE_LOCKING
 455void time_hardirqs_on(unsigned long a0, unsigned long a1)
 456{
 457        if (!preempt_trace() && irq_trace())
 458                stop_critical_timing(a0, a1);
 459}
 460
 461void time_hardirqs_off(unsigned long a0, unsigned long a1)
 462{
 463        if (!preempt_trace() && irq_trace())
 464                start_critical_timing(a0, a1);
 465}
 466
 467#else /* !CONFIG_PROVE_LOCKING */
 468
 469/*
 470 * We are only interested in hardirq on/off events:
 471 */
 472static inline void tracer_hardirqs_on(void)
 473{
 474        if (!preempt_trace() && irq_trace())
 475                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 476}
 477
 478static inline void tracer_hardirqs_off(void)
 479{
 480        if (!preempt_trace() && irq_trace())
 481                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 482}
 483
 484static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
 485{
 486        if (!preempt_trace() && irq_trace())
 487                stop_critical_timing(CALLER_ADDR0, caller_addr);
 488}
 489
 490static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
 491{
 492        if (!preempt_trace() && irq_trace())
 493                start_critical_timing(CALLER_ADDR0, caller_addr);
 494}
 495
 496#endif /* CONFIG_PROVE_LOCKING */
 497#endif /*  CONFIG_IRQSOFF_TRACER */
 498
 499#ifdef CONFIG_PREEMPT_TRACER
 500static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
 501{
 502        if (preempt_trace() && !irq_trace())
 503                stop_critical_timing(a0, a1);
 504}
 505
 506static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
 507{
 508        if (preempt_trace() && !irq_trace())
 509                start_critical_timing(a0, a1);
 510}
 511#endif /* CONFIG_PREEMPT_TRACER */
 512
 513#ifdef CONFIG_FUNCTION_TRACER
 514static bool function_enabled;
 515
 516static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
 517{
 518        int ret;
 519
 520        /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
 521        if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
 522                return 0;
 523
 524        if (graph)
 525                ret = register_ftrace_graph(&irqsoff_graph_return,
 526                                            &irqsoff_graph_entry);
 527        else
 528                ret = register_ftrace_function(tr->ops);
 529
 530        if (!ret)
 531                function_enabled = true;
 532
 533        return ret;
 534}
 535
 536static void unregister_irqsoff_function(struct trace_array *tr, int graph)
 537{
 538        if (!function_enabled)
 539                return;
 540
 541        if (graph)
 542                unregister_ftrace_graph();
 543        else
 544                unregister_ftrace_function(tr->ops);
 545
 546        function_enabled = false;
 547}
 548
 549static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
 550{
 551        if (!(mask & TRACE_ITER_FUNCTION))
 552                return 0;
 553
 554        if (set)
 555                register_irqsoff_function(tr, is_graph(tr), 1);
 556        else
 557                unregister_irqsoff_function(tr, is_graph(tr));
 558        return 1;
 559}
 560#else
 561static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
 562{
 563        return 0;
 564}
 565static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
 566static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
 567{
 568        return 0;
 569}
 570#endif /* CONFIG_FUNCTION_TRACER */
 571
 572static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
 573{
 574        struct tracer *tracer = tr->current_trace;
 575
 576        if (irqsoff_function_set(tr, mask, set))
 577                return 0;
 578
 579#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 580        if (mask & TRACE_ITER_DISPLAY_GRAPH)
 581                return irqsoff_display_graph(tr, set);
 582#endif
 583
 584        return trace_keep_overwrite(tracer, mask, set);
 585}
 586
 587static int start_irqsoff_tracer(struct trace_array *tr, int graph)
 588{
 589        int ret;
 590
 591        ret = register_irqsoff_function(tr, graph, 0);
 592
 593        if (!ret && tracing_is_enabled())
 594                tracer_enabled = 1;
 595        else
 596                tracer_enabled = 0;
 597
 598        return ret;
 599}
 600
 601static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 602{
 603        tracer_enabled = 0;
 604
 605        unregister_irqsoff_function(tr, graph);
 606}
 607
 608static bool irqsoff_busy;
 609
 610static int __irqsoff_tracer_init(struct trace_array *tr)
 611{
 612        if (irqsoff_busy)
 613                return -EBUSY;
 614
 615        save_flags = tr->trace_flags;
 616
 617        /* non overwrite screws up the latency tracers */
 618        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 619        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
 620
 621        tr->max_latency = 0;
 622        irqsoff_trace = tr;
 623        /* make sure that the tracer is visible */
 624        smp_wmb();
 625
 626        ftrace_init_array_ops(tr, irqsoff_tracer_call);
 627
 628        /* Only toplevel instance supports graph tracing */
 629        if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 630                                      is_graph(tr))))
 631                printk(KERN_ERR "failed to start irqsoff tracer\n");
 632
 633        irqsoff_busy = true;
 634        return 0;
 635}
 636
 637static void irqsoff_tracer_reset(struct trace_array *tr)
 638{
 639        int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
 640        int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 641
 642        stop_irqsoff_tracer(tr, is_graph(tr));
 643
 644        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 645        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
 646        ftrace_reset_array_ops(tr);
 647
 648        irqsoff_busy = false;
 649}
 650
 651static void irqsoff_tracer_start(struct trace_array *tr)
 652{
 653        tracer_enabled = 1;
 654}
 655
 656static void irqsoff_tracer_stop(struct trace_array *tr)
 657{
 658        tracer_enabled = 0;
 659}
 660
 661#ifdef CONFIG_IRQSOFF_TRACER
 662static int irqsoff_tracer_init(struct trace_array *tr)
 663{
 664        trace_type = TRACER_IRQS_OFF;
 665
 666        return __irqsoff_tracer_init(tr);
 667}
 668static struct tracer irqsoff_tracer __read_mostly =
 669{
 670        .name           = "irqsoff",
 671        .init           = irqsoff_tracer_init,
 672        .reset          = irqsoff_tracer_reset,
 673        .start          = irqsoff_tracer_start,
 674        .stop           = irqsoff_tracer_stop,
 675        .print_max      = true,
 676        .print_header   = irqsoff_print_header,
 677        .print_line     = irqsoff_print_line,
 678        .flag_changed   = irqsoff_flag_changed,
 679#ifdef CONFIG_FTRACE_SELFTEST
 680        .selftest    = trace_selftest_startup_irqsoff,
 681#endif
 682        .open           = irqsoff_trace_open,
 683        .close          = irqsoff_trace_close,
 684        .allow_instances = true,
 685        .use_max_tr     = true,
 686};
 687# define register_irqsoff(trace) register_tracer(&trace)
 688#else
 689# define register_irqsoff(trace) do { } while (0)
 690#endif
 691
 692#ifdef CONFIG_PREEMPT_TRACER
 693static int preemptoff_tracer_init(struct trace_array *tr)
 694{
 695        trace_type = TRACER_PREEMPT_OFF;
 696
 697        return __irqsoff_tracer_init(tr);
 698}
 699
 700static struct tracer preemptoff_tracer __read_mostly =
 701{
 702        .name           = "preemptoff",
 703        .init           = preemptoff_tracer_init,
 704        .reset          = irqsoff_tracer_reset,
 705        .start          = irqsoff_tracer_start,
 706        .stop           = irqsoff_tracer_stop,
 707        .print_max      = true,
 708        .print_header   = irqsoff_print_header,
 709        .print_line     = irqsoff_print_line,
 710        .flag_changed   = irqsoff_flag_changed,
 711#ifdef CONFIG_FTRACE_SELFTEST
 712        .selftest    = trace_selftest_startup_preemptoff,
 713#endif
 714        .open           = irqsoff_trace_open,
 715        .close          = irqsoff_trace_close,
 716        .allow_instances = true,
 717        .use_max_tr     = true,
 718};
 719# define register_preemptoff(trace) register_tracer(&trace)
 720#else
 721# define register_preemptoff(trace) do { } while (0)
 722#endif
 723
 724#if defined(CONFIG_IRQSOFF_TRACER) && \
 725        defined(CONFIG_PREEMPT_TRACER)
 726
 727static int preemptirqsoff_tracer_init(struct trace_array *tr)
 728{
 729        trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
 730
 731        return __irqsoff_tracer_init(tr);
 732}
 733
 734static struct tracer preemptirqsoff_tracer __read_mostly =
 735{
 736        .name           = "preemptirqsoff",
 737        .init           = preemptirqsoff_tracer_init,
 738        .reset          = irqsoff_tracer_reset,
 739        .start          = irqsoff_tracer_start,
 740        .stop           = irqsoff_tracer_stop,
 741        .print_max      = true,
 742        .print_header   = irqsoff_print_header,
 743        .print_line     = irqsoff_print_line,
 744        .flag_changed   = irqsoff_flag_changed,
 745#ifdef CONFIG_FTRACE_SELFTEST
 746        .selftest    = trace_selftest_startup_preemptirqsoff,
 747#endif
 748        .open           = irqsoff_trace_open,
 749        .close          = irqsoff_trace_close,
 750        .allow_instances = true,
 751        .use_max_tr     = true,
 752};
 753
 754# define register_preemptirqsoff(trace) register_tracer(&trace)
 755#else
 756# define register_preemptirqsoff(trace) do { } while (0)
 757#endif
 758
 759__init static int init_irqsoff_tracer(void)
 760{
 761        register_irqsoff(irqsoff_tracer);
 762        register_preemptoff(preemptoff_tracer);
 763        register_preemptirqsoff(preemptirqsoff_tracer);
 764
 765        return 0;
 766}
 767core_initcall(init_irqsoff_tracer);
 768#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
 769
 770#ifndef CONFIG_IRQSOFF_TRACER
 771static inline void tracer_hardirqs_on(void) { }
 772static inline void tracer_hardirqs_off(void) { }
 773static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
 774static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
 775#endif
 776
 777#ifndef CONFIG_PREEMPT_TRACER
 778static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
 779static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
 780#endif
 781
 782#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
 783/* Per-cpu variable to prevent redundant calls when IRQs already off */
 784static DEFINE_PER_CPU(int, tracing_irq_cpu);
 785
 786void trace_hardirqs_on(void)
 787{
 788        if (!this_cpu_read(tracing_irq_cpu))
 789                return;
 790
 791        trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 792        tracer_hardirqs_on();
 793
 794        this_cpu_write(tracing_irq_cpu, 0);
 795}
 796EXPORT_SYMBOL(trace_hardirqs_on);
 797
 798void trace_hardirqs_off(void)
 799{
 800        if (this_cpu_read(tracing_irq_cpu))
 801                return;
 802
 803        this_cpu_write(tracing_irq_cpu, 1);
 804
 805        trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 806        tracer_hardirqs_off();
 807}
 808EXPORT_SYMBOL(trace_hardirqs_off);
 809
 810__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 811{
 812        if (!this_cpu_read(tracing_irq_cpu))
 813                return;
 814
 815        trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
 816        tracer_hardirqs_on_caller(caller_addr);
 817
 818        this_cpu_write(tracing_irq_cpu, 0);
 819}
 820EXPORT_SYMBOL(trace_hardirqs_on_caller);
 821
 822__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 823{
 824        if (this_cpu_read(tracing_irq_cpu))
 825                return;
 826
 827        this_cpu_write(tracing_irq_cpu, 1);
 828
 829        trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
 830        tracer_hardirqs_off_caller(caller_addr);
 831}
 832EXPORT_SYMBOL(trace_hardirqs_off_caller);
 833
 834/*
 835 * Stubs:
 836 */
 837
 838void trace_softirqs_on(unsigned long ip)
 839{
 840}
 841
 842void trace_softirqs_off(unsigned long ip)
 843{
 844}
 845
 846inline void print_irqtrace_events(struct task_struct *curr)
 847{
 848}
 849#endif
 850
 851#if defined(CONFIG_PREEMPT_TRACER) || \
 852        (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
 853void trace_preempt_on(unsigned long a0, unsigned long a1)
 854{
 855        trace_preempt_enable_rcuidle(a0, a1);
 856        tracer_preempt_on(a0, a1);
 857}
 858
 859void trace_preempt_off(unsigned long a0, unsigned long a1)
 860{
 861        trace_preempt_disable_rcuidle(a0, a1);
 862        tracer_preempt_off(a0, a1);
 863}
 864#endif
 865