linux/kernel/trace/trace_irqsoff.c
<<
>>
Prefs
   1/*
   2 * trace irqs off critical timings
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * From code in the latency_tracer, that is:
   8 *
   9 *  Copyright (C) 2004-2006 Ingo Molnar
  10 *  Copyright (C) 2004 Nadia Yvette Chambers
  11 */
  12#include <linux/kallsyms.h>
  13#include <linux/debugfs.h>
  14#include <linux/uaccess.h>
  15#include <linux/module.h>
  16#include <linux/ftrace.h>
  17#include <linux/fs.h>
  18
  19#include "trace.h"
  20
  21static struct trace_array               *irqsoff_trace __read_mostly;
  22static int                              tracer_enabled __read_mostly;
  23
  24static DEFINE_PER_CPU(int, tracing_cpu);
  25
  26static DEFINE_RAW_SPINLOCK(max_trace_lock);
  27
  28enum {
  29        TRACER_IRQS_OFF         = (1 << 1),
  30        TRACER_PREEMPT_OFF      = (1 << 2),
  31};
  32
  33static int trace_type __read_mostly;
  34
  35static int save_flags;
  36
  37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  39
  40#ifdef CONFIG_PREEMPT_TRACER
  41static inline int
  42preempt_trace(void)
  43{
  44        return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  45}
  46#else
  47# define preempt_trace() (0)
  48#endif
  49
  50#ifdef CONFIG_IRQSOFF_TRACER
  51static inline int
  52irq_trace(void)
  53{
  54        return ((trace_type & TRACER_IRQS_OFF) &&
  55                irqs_disabled());
  56}
  57#else
  58# define irq_trace() (0)
  59#endif
  60
  61#define TRACE_DISPLAY_GRAPH     1
  62
  63static struct tracer_opt trace_opts[] = {
  64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  65        /* display latency trace as call graph */
  66        { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
  67#endif
  68        { } /* Empty entry */
  69};
  70
  71static struct tracer_flags tracer_flags = {
  72        .val  = 0,
  73        .opts = trace_opts,
  74};
  75
  76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
  77
  78/*
  79 * Sequence count - we record it when starting a measurement and
  80 * skip the latency if the sequence has changed - some other section
  81 * did a maximum and could disturb our measurement with serial console
  82 * printouts, etc. Truly coinciding maximum latencies should be rare
  83 * and what happens together happens separately as well, so this doesn't
  84 * decrease the validity of the maximum found:
  85 */
  86static __cacheline_aligned_in_smp       unsigned long max_sequence;
  87
  88#ifdef CONFIG_FUNCTION_TRACER
  89/*
  90 * Prologue for the preempt and irqs off function tracers.
  91 *
  92 * Returns 1 if it is OK to continue, and data->disabled is
  93 *            incremented.
  94 *         0 if the trace is to be ignored, and data->disabled
  95 *            is kept the same.
  96 *
  97 * Note, this function is also used outside this ifdef but
  98 *  inside the #ifdef of the function graph tracer below.
  99 *  This is OK, since the function graph tracer is
 100 *  dependent on the function tracer.
 101 */
 102static int func_prolog_dec(struct trace_array *tr,
 103                           struct trace_array_cpu **data,
 104                           unsigned long *flags)
 105{
 106        long disabled;
 107        int cpu;
 108
 109        /*
 110         * Does not matter if we preempt. We test the flags
 111         * afterward, to see if irqs are disabled or not.
 112         * If we preempt and get a false positive, the flags
 113         * test will fail.
 114         */
 115        cpu = raw_smp_processor_id();
 116        if (likely(!per_cpu(tracing_cpu, cpu)))
 117                return 0;
 118
 119        local_save_flags(*flags);
 120        /* slight chance to get a false positive on tracing_cpu */
 121        if (!irqs_disabled_flags(*flags))
 122                return 0;
 123
 124        *data = tr->data[cpu];
 125        disabled = atomic_inc_return(&(*data)->disabled);
 126
 127        if (likely(disabled == 1))
 128                return 1;
 129
 130        atomic_dec(&(*data)->disabled);
 131
 132        return 0;
 133}
 134
 135/*
 136 * irqsoff uses its own tracer function to keep the overhead down:
 137 */
 138static void
 139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
 140                    struct ftrace_ops *op, struct pt_regs *pt_regs)
 141{
 142        struct trace_array *tr = irqsoff_trace;
 143        struct trace_array_cpu *data;
 144        unsigned long flags;
 145
 146        if (!func_prolog_dec(tr, &data, &flags))
 147                return;
 148
 149        trace_function(tr, ip, parent_ip, flags, preempt_count());
 150
 151        atomic_dec(&data->disabled);
 152}
 153
 154static struct ftrace_ops trace_ops __read_mostly =
 155{
 156        .func = irqsoff_tracer_call,
 157        .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 158};
 159#endif /* CONFIG_FUNCTION_TRACER */
 160
 161#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 162static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
 163{
 164        int cpu;
 165
 166        if (!(bit & TRACE_DISPLAY_GRAPH))
 167                return -EINVAL;
 168
 169        if (!(is_graph() ^ set))
 170                return 0;
 171
 172        stop_irqsoff_tracer(irqsoff_trace, !set);
 173
 174        for_each_possible_cpu(cpu)
 175                per_cpu(tracing_cpu, cpu) = 0;
 176
 177        tracing_max_latency = 0;
 178        tracing_reset_online_cpus(irqsoff_trace);
 179
 180        return start_irqsoff_tracer(irqsoff_trace, set);
 181}
 182
 183static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 184{
 185        struct trace_array *tr = irqsoff_trace;
 186        struct trace_array_cpu *data;
 187        unsigned long flags;
 188        int ret;
 189        int pc;
 190
 191        if (!func_prolog_dec(tr, &data, &flags))
 192                return 0;
 193
 194        pc = preempt_count();
 195        ret = __trace_graph_entry(tr, trace, flags, pc);
 196        atomic_dec(&data->disabled);
 197
 198        return ret;
 199}
 200
 201static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
 202{
 203        struct trace_array *tr = irqsoff_trace;
 204        struct trace_array_cpu *data;
 205        unsigned long flags;
 206        int pc;
 207
 208        if (!func_prolog_dec(tr, &data, &flags))
 209                return;
 210
 211        pc = preempt_count();
 212        __trace_graph_return(tr, trace, flags, pc);
 213        atomic_dec(&data->disabled);
 214}
 215
 216static void irqsoff_trace_open(struct trace_iterator *iter)
 217{
 218        if (is_graph())
 219                graph_trace_open(iter);
 220
 221}
 222
 223static void irqsoff_trace_close(struct trace_iterator *iter)
 224{
 225        if (iter->private)
 226                graph_trace_close(iter);
 227}
 228
 229#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
 230                            TRACE_GRAPH_PRINT_PROC | \
 231                            TRACE_GRAPH_PRINT_ABS_TIME | \
 232                            TRACE_GRAPH_PRINT_DURATION)
 233
 234static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 235{
 236        /*
 237         * In graph mode call the graph tracer output function,
 238         * otherwise go with the TRACE_FN event handler
 239         */
 240        if (is_graph())
 241                return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 242
 243        return TRACE_TYPE_UNHANDLED;
 244}
 245
 246static void irqsoff_print_header(struct seq_file *s)
 247{
 248        if (is_graph())
 249                print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
 250        else
 251                trace_default_header(s);
 252}
 253
 254static void
 255__trace_function(struct trace_array *tr,
 256                 unsigned long ip, unsigned long parent_ip,
 257                 unsigned long flags, int pc)
 258{
 259        if (is_graph())
 260                trace_graph_function(tr, ip, parent_ip, flags, pc);
 261        else
 262                trace_function(tr, ip, parent_ip, flags, pc);
 263}
 264
 265#else
 266#define __trace_function trace_function
 267
 268static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
 269{
 270        return -EINVAL;
 271}
 272
 273static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 274{
 275        return -1;
 276}
 277
 278static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
 279{
 280        return TRACE_TYPE_UNHANDLED;
 281}
 282
 283static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
 284static void irqsoff_trace_open(struct trace_iterator *iter) { }
 285static void irqsoff_trace_close(struct trace_iterator *iter) { }
 286
 287#ifdef CONFIG_FUNCTION_TRACER
 288static void irqsoff_print_header(struct seq_file *s)
 289{
 290        trace_default_header(s);
 291}
 292#else
 293static void irqsoff_print_header(struct seq_file *s)
 294{
 295        trace_latency_header(s);
 296}
 297#endif /* CONFIG_FUNCTION_TRACER */
 298#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 299
 300/*
 301 * Should this new latency be reported/recorded?
 302 */
 303static int report_latency(cycle_t delta)
 304{
 305        if (tracing_thresh) {
 306                if (delta < tracing_thresh)
 307                        return 0;
 308        } else {
 309                if (delta <= tracing_max_latency)
 310                        return 0;
 311        }
 312        return 1;
 313}
 314
 315static void
 316check_critical_timing(struct trace_array *tr,
 317                      struct trace_array_cpu *data,
 318                      unsigned long parent_ip,
 319                      int cpu)
 320{
 321        cycle_t T0, T1, delta;
 322        unsigned long flags;
 323        int pc;
 324
 325        T0 = data->preempt_timestamp;
 326        T1 = ftrace_now(cpu);
 327        delta = T1-T0;
 328
 329        local_save_flags(flags);
 330
 331        pc = preempt_count();
 332
 333        if (!report_latency(delta))
 334                goto out;
 335
 336        raw_spin_lock_irqsave(&max_trace_lock, flags);
 337
 338        /* check if we are still the max latency */
 339        if (!report_latency(delta))
 340                goto out_unlock;
 341
 342        __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 343        /* Skip 5 functions to get to the irq/preempt enable function */
 344        __trace_stack(tr, flags, 5, pc);
 345
 346        if (data->critical_sequence != max_sequence)
 347                goto out_unlock;
 348
 349        data->critical_end = parent_ip;
 350
 351        if (likely(!is_tracing_stopped())) {
 352                tracing_max_latency = delta;
 353                update_max_tr_single(tr, current, cpu);
 354        }
 355
 356        max_sequence++;
 357
 358out_unlock:
 359        raw_spin_unlock_irqrestore(&max_trace_lock, flags);
 360
 361out:
 362        data->critical_sequence = max_sequence;
 363        data->preempt_timestamp = ftrace_now(cpu);
 364        __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
 365}
 366
 367static inline void
 368start_critical_timing(unsigned long ip, unsigned long parent_ip)
 369{
 370        int cpu;
 371        struct trace_array *tr = irqsoff_trace;
 372        struct trace_array_cpu *data;
 373        unsigned long flags;
 374
 375        if (likely(!tracer_enabled))
 376                return;
 377
 378        cpu = raw_smp_processor_id();
 379
 380        if (per_cpu(tracing_cpu, cpu))
 381                return;
 382
 383        data = tr->data[cpu];
 384
 385        if (unlikely(!data) || atomic_read(&data->disabled))
 386                return;
 387
 388        atomic_inc(&data->disabled);
 389
 390        data->critical_sequence = max_sequence;
 391        data->preempt_timestamp = ftrace_now(cpu);
 392        data->critical_start = parent_ip ? : ip;
 393
 394        local_save_flags(flags);
 395
 396        __trace_function(tr, ip, parent_ip, flags, preempt_count());
 397
 398        per_cpu(tracing_cpu, cpu) = 1;
 399
 400        atomic_dec(&data->disabled);
 401}
 402
 403static inline void
 404stop_critical_timing(unsigned long ip, unsigned long parent_ip)
 405{
 406        int cpu;
 407        struct trace_array *tr = irqsoff_trace;
 408        struct trace_array_cpu *data;
 409        unsigned long flags;
 410
 411        cpu = raw_smp_processor_id();
 412        /* Always clear the tracing cpu on stopping the trace */
 413        if (unlikely(per_cpu(tracing_cpu, cpu)))
 414                per_cpu(tracing_cpu, cpu) = 0;
 415        else
 416                return;
 417
 418        if (!tracer_enabled)
 419                return;
 420
 421        data = tr->data[cpu];
 422
 423        if (unlikely(!data) ||
 424            !data->critical_start || atomic_read(&data->disabled))
 425                return;
 426
 427        atomic_inc(&data->disabled);
 428
 429        local_save_flags(flags);
 430        __trace_function(tr, ip, parent_ip, flags, preempt_count());
 431        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 432        data->critical_start = 0;
 433        atomic_dec(&data->disabled);
 434}
 435
 436/* start and stop critical timings used to for stoppage (in idle) */
 437void start_critical_timings(void)
 438{
 439        if (preempt_trace() || irq_trace())
 440                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 441}
 442EXPORT_SYMBOL_GPL(start_critical_timings);
 443
 444void stop_critical_timings(void)
 445{
 446        if (preempt_trace() || irq_trace())
 447                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 448}
 449EXPORT_SYMBOL_GPL(stop_critical_timings);
 450
 451#ifdef CONFIG_IRQSOFF_TRACER
 452#ifdef CONFIG_PROVE_LOCKING
 453void time_hardirqs_on(unsigned long a0, unsigned long a1)
 454{
 455        if (!preempt_trace() && irq_trace())
 456                stop_critical_timing(a0, a1);
 457}
 458
 459void time_hardirqs_off(unsigned long a0, unsigned long a1)
 460{
 461        if (!preempt_trace() && irq_trace())
 462                start_critical_timing(a0, a1);
 463}
 464
 465#else /* !CONFIG_PROVE_LOCKING */
 466
 467/*
 468 * Stubs:
 469 */
 470
 471void trace_softirqs_on(unsigned long ip)
 472{
 473}
 474
 475void trace_softirqs_off(unsigned long ip)
 476{
 477}
 478
 479inline void print_irqtrace_events(struct task_struct *curr)
 480{
 481}
 482
 483/*
 484 * We are only interested in hardirq on/off events:
 485 */
 486void trace_hardirqs_on(void)
 487{
 488        if (!preempt_trace() && irq_trace())
 489                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 490}
 491EXPORT_SYMBOL(trace_hardirqs_on);
 492
 493void trace_hardirqs_off(void)
 494{
 495        if (!preempt_trace() && irq_trace())
 496                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 497}
 498EXPORT_SYMBOL(trace_hardirqs_off);
 499
 500void trace_hardirqs_on_caller(unsigned long caller_addr)
 501{
 502        if (!preempt_trace() && irq_trace())
 503                stop_critical_timing(CALLER_ADDR0, caller_addr);
 504}
 505EXPORT_SYMBOL(trace_hardirqs_on_caller);
 506
 507void trace_hardirqs_off_caller(unsigned long caller_addr)
 508{
 509        if (!preempt_trace() && irq_trace())
 510                start_critical_timing(CALLER_ADDR0, caller_addr);
 511}
 512EXPORT_SYMBOL(trace_hardirqs_off_caller);
 513
 514#endif /* CONFIG_PROVE_LOCKING */
 515#endif /*  CONFIG_IRQSOFF_TRACER */
 516
 517#ifdef CONFIG_PREEMPT_TRACER
 518void trace_preempt_on(unsigned long a0, unsigned long a1)
 519{
 520        if (preempt_trace() && !irq_trace())
 521                stop_critical_timing(a0, a1);
 522}
 523
 524void trace_preempt_off(unsigned long a0, unsigned long a1)
 525{
 526        if (preempt_trace() && !irq_trace())
 527                start_critical_timing(a0, a1);
 528}
 529#endif /* CONFIG_PREEMPT_TRACER */
 530
 531static int start_irqsoff_tracer(struct trace_array *tr, int graph)
 532{
 533        int ret = 0;
 534
 535        if (!graph)
 536                ret = register_ftrace_function(&trace_ops);
 537        else
 538                ret = register_ftrace_graph(&irqsoff_graph_return,
 539                                            &irqsoff_graph_entry);
 540
 541        if (!ret && tracing_is_enabled())
 542                tracer_enabled = 1;
 543        else
 544                tracer_enabled = 0;
 545
 546        return ret;
 547}
 548
 549static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 550{
 551        tracer_enabled = 0;
 552
 553        if (!graph)
 554                unregister_ftrace_function(&trace_ops);
 555        else
 556                unregister_ftrace_graph();
 557}
 558
 559static void __irqsoff_tracer_init(struct trace_array *tr)
 560{
 561        save_flags = trace_flags;
 562
 563        /* non overwrite screws up the latency tracers */
 564        set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
 565        set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
 566
 567        tracing_max_latency = 0;
 568        irqsoff_trace = tr;
 569        /* make sure that the tracer is visible */
 570        smp_wmb();
 571        tracing_reset_online_cpus(tr);
 572
 573        if (start_irqsoff_tracer(tr, is_graph()))
 574                printk(KERN_ERR "failed to start irqsoff tracer\n");
 575}
 576
 577static void irqsoff_tracer_reset(struct trace_array *tr)
 578{
 579        int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
 580        int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 581
 582        stop_irqsoff_tracer(tr, is_graph());
 583
 584        set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
 585        set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
 586}
 587
 588static void irqsoff_tracer_start(struct trace_array *tr)
 589{
 590        tracer_enabled = 1;
 591}
 592
 593static void irqsoff_tracer_stop(struct trace_array *tr)
 594{
 595        tracer_enabled = 0;
 596}
 597
 598#ifdef CONFIG_IRQSOFF_TRACER
 599static int irqsoff_tracer_init(struct trace_array *tr)
 600{
 601        trace_type = TRACER_IRQS_OFF;
 602
 603        __irqsoff_tracer_init(tr);
 604        return 0;
 605}
 606static struct tracer irqsoff_tracer __read_mostly =
 607{
 608        .name           = "irqsoff",
 609        .init           = irqsoff_tracer_init,
 610        .reset          = irqsoff_tracer_reset,
 611        .start          = irqsoff_tracer_start,
 612        .stop           = irqsoff_tracer_stop,
 613        .print_max      = true,
 614        .print_header   = irqsoff_print_header,
 615        .print_line     = irqsoff_print_line,
 616        .flags          = &tracer_flags,
 617        .set_flag       = irqsoff_set_flag,
 618        .flag_changed   = trace_keep_overwrite,
 619#ifdef CONFIG_FTRACE_SELFTEST
 620        .selftest    = trace_selftest_startup_irqsoff,
 621#endif
 622        .open           = irqsoff_trace_open,
 623        .close          = irqsoff_trace_close,
 624        .use_max_tr     = true,
 625};
 626# define register_irqsoff(trace) register_tracer(&trace)
 627#else
 628# define register_irqsoff(trace) do { } while (0)
 629#endif
 630
 631#ifdef CONFIG_PREEMPT_TRACER
 632static int preemptoff_tracer_init(struct trace_array *tr)
 633{
 634        trace_type = TRACER_PREEMPT_OFF;
 635
 636        __irqsoff_tracer_init(tr);
 637        return 0;
 638}
 639
 640static struct tracer preemptoff_tracer __read_mostly =
 641{
 642        .name           = "preemptoff",
 643        .init           = preemptoff_tracer_init,
 644        .reset          = irqsoff_tracer_reset,
 645        .start          = irqsoff_tracer_start,
 646        .stop           = irqsoff_tracer_stop,
 647        .print_max      = true,
 648        .print_header   = irqsoff_print_header,
 649        .print_line     = irqsoff_print_line,
 650        .flags          = &tracer_flags,
 651        .set_flag       = irqsoff_set_flag,
 652        .flag_changed   = trace_keep_overwrite,
 653#ifdef CONFIG_FTRACE_SELFTEST
 654        .selftest    = trace_selftest_startup_preemptoff,
 655#endif
 656        .open           = irqsoff_trace_open,
 657        .close          = irqsoff_trace_close,
 658        .use_max_tr     = true,
 659};
 660# define register_preemptoff(trace) register_tracer(&trace)
 661#else
 662# define register_preemptoff(trace) do { } while (0)
 663#endif
 664
 665#if defined(CONFIG_IRQSOFF_TRACER) && \
 666        defined(CONFIG_PREEMPT_TRACER)
 667
 668static int preemptirqsoff_tracer_init(struct trace_array *tr)
 669{
 670        trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
 671
 672        __irqsoff_tracer_init(tr);
 673        return 0;
 674}
 675
 676static struct tracer preemptirqsoff_tracer __read_mostly =
 677{
 678        .name           = "preemptirqsoff",
 679        .init           = preemptirqsoff_tracer_init,
 680        .reset          = irqsoff_tracer_reset,
 681        .start          = irqsoff_tracer_start,
 682        .stop           = irqsoff_tracer_stop,
 683        .print_max      = true,
 684        .print_header   = irqsoff_print_header,
 685        .print_line     = irqsoff_print_line,
 686        .flags          = &tracer_flags,
 687        .set_flag       = irqsoff_set_flag,
 688        .flag_changed   = trace_keep_overwrite,
 689#ifdef CONFIG_FTRACE_SELFTEST
 690        .selftest    = trace_selftest_startup_preemptirqsoff,
 691#endif
 692        .open           = irqsoff_trace_open,
 693        .close          = irqsoff_trace_close,
 694        .use_max_tr     = true,
 695};
 696
 697# define register_preemptirqsoff(trace) register_tracer(&trace)
 698#else
 699# define register_preemptirqsoff(trace) do { } while (0)
 700#endif
 701
 702__init static int init_irqsoff_tracer(void)
 703{
 704        register_irqsoff(irqsoff_tracer);
 705        register_preemptoff(preemptoff_tracer);
 706        register_preemptirqsoff(preemptirqsoff_tracer);
 707
 708        return 0;
 709}
 710core_initcall(init_irqsoff_tracer);
 711