linux/kernel/trace/trace_sched_wakeup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * trace task wakeup timings
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Based on code from the latency_tracer, that is:
   9 *
  10 *  Copyright (C) 2004-2006 Ingo Molnar
  11 *  Copyright (C) 2004 Nadia Yvette Chambers
  12 */
  13#include <linux/module.h>
  14#include <linux/kallsyms.h>
  15#include <linux/uaccess.h>
  16#include <linux/ftrace.h>
  17#include <linux/sched/rt.h>
  18#include <linux/sched/deadline.h>
  19#include <trace/events/sched.h>
  20#include "trace.h"
  21
  22static struct trace_array       *wakeup_trace;
  23static int __read_mostly        tracer_enabled;
  24
  25static struct task_struct       *wakeup_task;
  26static int                      wakeup_cpu;
  27static int                      wakeup_current_cpu;
  28static unsigned                 wakeup_prio = -1;
  29static int                      wakeup_rt;
  30static int                      wakeup_dl;
  31static int                      tracing_dl = 0;
  32
  33static arch_spinlock_t wakeup_lock =
  34        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  35
  36static void wakeup_reset(struct trace_array *tr);
  37static void __wakeup_reset(struct trace_array *tr);
  38static int start_func_tracer(struct trace_array *tr, int graph);
  39static void stop_func_tracer(struct trace_array *tr, int graph);
  40
  41static int save_flags;
  42
  43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
  45#else
  46# define is_graph(tr) false
  47#endif
  48
  49#ifdef CONFIG_FUNCTION_TRACER
  50
  51static bool function_enabled;
  52
  53/*
  54 * Prologue for the wakeup function tracers.
  55 *
  56 * Returns 1 if it is OK to continue, and preemption
  57 *            is disabled and data->disabled is incremented.
  58 *         0 if the trace is to be ignored, and preemption
  59 *            is not disabled and data->disabled is
  60 *            kept the same.
  61 *
  62 * Note, this function is also used outside this ifdef but
  63 *  inside the #ifdef of the function graph tracer below.
  64 *  This is OK, since the function graph tracer is
  65 *  dependent on the function tracer.
  66 */
  67static int
  68func_prolog_preempt_disable(struct trace_array *tr,
  69                            struct trace_array_cpu **data,
  70                            int *pc)
  71{
  72        long disabled;
  73        int cpu;
  74
  75        if (likely(!wakeup_task))
  76                return 0;
  77
  78        *pc = preempt_count();
  79        preempt_disable_notrace();
  80
  81        cpu = raw_smp_processor_id();
  82        if (cpu != wakeup_current_cpu)
  83                goto out_enable;
  84
  85        *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  86        disabled = atomic_inc_return(&(*data)->disabled);
  87        if (unlikely(disabled != 1))
  88                goto out;
  89
  90        return 1;
  91
  92out:
  93        atomic_dec(&(*data)->disabled);
  94
  95out_enable:
  96        preempt_enable_notrace();
  97        return 0;
  98}
  99
 100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 101
 102static int wakeup_display_graph(struct trace_array *tr, int set)
 103{
 104        if (!(is_graph(tr) ^ set))
 105                return 0;
 106
 107        stop_func_tracer(tr, !set);
 108
 109        wakeup_reset(wakeup_trace);
 110        tr->max_latency = 0;
 111
 112        return start_func_tracer(tr, set);
 113}
 114
 115static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
 116{
 117        struct trace_array *tr = wakeup_trace;
 118        struct trace_array_cpu *data;
 119        unsigned long flags;
 120        int pc, ret = 0;
 121
 122        if (ftrace_graph_ignore_func(trace))
 123                return 0;
 124        /*
 125         * Do not trace a function if it's filtered by set_graph_notrace.
 126         * Make the index of ret stack negative to indicate that it should
 127         * ignore further functions.  But it needs its own ret stack entry
 128         * to recover the original index in order to continue tracing after
 129         * returning from the function.
 130         */
 131        if (ftrace_graph_notrace_addr(trace->func))
 132                return 1;
 133
 134        if (!func_prolog_preempt_disable(tr, &data, &pc))
 135                return 0;
 136
 137        local_save_flags(flags);
 138        ret = __trace_graph_entry(tr, trace, flags, pc);
 139        atomic_dec(&data->disabled);
 140        preempt_enable_notrace();
 141
 142        return ret;
 143}
 144
 145static void wakeup_graph_return(struct ftrace_graph_ret *trace)
 146{
 147        struct trace_array *tr = wakeup_trace;
 148        struct trace_array_cpu *data;
 149        unsigned long flags;
 150        int pc;
 151
 152        ftrace_graph_addr_finish(trace);
 153
 154        if (!func_prolog_preempt_disable(tr, &data, &pc))
 155                return;
 156
 157        local_save_flags(flags);
 158        __trace_graph_return(tr, trace, flags, pc);
 159        atomic_dec(&data->disabled);
 160
 161        preempt_enable_notrace();
 162        return;
 163}
 164
 165static struct fgraph_ops fgraph_wakeup_ops = {
 166        .entryfunc = &wakeup_graph_entry,
 167        .retfunc = &wakeup_graph_return,
 168};
 169
 170static void wakeup_trace_open(struct trace_iterator *iter)
 171{
 172        if (is_graph(iter->tr))
 173                graph_trace_open(iter);
 174}
 175
 176static void wakeup_trace_close(struct trace_iterator *iter)
 177{
 178        if (iter->private)
 179                graph_trace_close(iter);
 180}
 181
 182#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
 183                            TRACE_GRAPH_PRINT_CPU |  \
 184                            TRACE_GRAPH_PRINT_REL_TIME | \
 185                            TRACE_GRAPH_PRINT_DURATION | \
 186                            TRACE_GRAPH_PRINT_OVERHEAD | \
 187                            TRACE_GRAPH_PRINT_IRQS)
 188
 189static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
 190{
 191        /*
 192         * In graph mode call the graph tracer output function,
 193         * otherwise go with the TRACE_FN event handler
 194         */
 195        if (is_graph(iter->tr))
 196                return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
 197
 198        return TRACE_TYPE_UNHANDLED;
 199}
 200
 201static void wakeup_print_header(struct seq_file *s)
 202{
 203        if (is_graph(wakeup_trace))
 204                print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
 205        else
 206                trace_default_header(s);
 207}
 208#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
 209
 210/*
 211 * wakeup uses its own tracer function to keep the overhead down:
 212 */
 213static void
 214wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
 215                   struct ftrace_ops *op, struct pt_regs *pt_regs)
 216{
 217        struct trace_array *tr = wakeup_trace;
 218        struct trace_array_cpu *data;
 219        unsigned long flags;
 220        int pc;
 221
 222        if (!func_prolog_preempt_disable(tr, &data, &pc))
 223                return;
 224
 225        local_irq_save(flags);
 226        trace_function(tr, ip, parent_ip, flags, pc);
 227        local_irq_restore(flags);
 228
 229        atomic_dec(&data->disabled);
 230        preempt_enable_notrace();
 231}
 232
 233static int register_wakeup_function(struct trace_array *tr, int graph, int set)
 234{
 235        int ret;
 236
 237        /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
 238        if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
 239                return 0;
 240
 241        if (graph)
 242                ret = register_ftrace_graph(&fgraph_wakeup_ops);
 243        else
 244                ret = register_ftrace_function(tr->ops);
 245
 246        if (!ret)
 247                function_enabled = true;
 248
 249        return ret;
 250}
 251
 252static void unregister_wakeup_function(struct trace_array *tr, int graph)
 253{
 254        if (!function_enabled)
 255                return;
 256
 257        if (graph)
 258                unregister_ftrace_graph(&fgraph_wakeup_ops);
 259        else
 260                unregister_ftrace_function(tr->ops);
 261
 262        function_enabled = false;
 263}
 264
 265static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
 266{
 267        if (!(mask & TRACE_ITER_FUNCTION))
 268                return 0;
 269
 270        if (set)
 271                register_wakeup_function(tr, is_graph(tr), 1);
 272        else
 273                unregister_wakeup_function(tr, is_graph(tr));
 274        return 1;
 275}
 276#else /* CONFIG_FUNCTION_TRACER */
 277static int register_wakeup_function(struct trace_array *tr, int graph, int set)
 278{
 279        return 0;
 280}
 281static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
 282static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
 283{
 284        return 0;
 285}
 286#endif /* else CONFIG_FUNCTION_TRACER */
 287
 288#ifndef CONFIG_FUNCTION_GRAPH_TRACER
 289static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
 290{
 291        return TRACE_TYPE_UNHANDLED;
 292}
 293
 294static void wakeup_trace_open(struct trace_iterator *iter) { }
 295static void wakeup_trace_close(struct trace_iterator *iter) { }
 296
 297static void wakeup_print_header(struct seq_file *s)
 298{
 299        trace_default_header(s);
 300}
 301#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
 302
 303static void
 304__trace_function(struct trace_array *tr,
 305                 unsigned long ip, unsigned long parent_ip,
 306                 unsigned long flags, int pc)
 307{
 308        if (is_graph(tr))
 309                trace_graph_function(tr, ip, parent_ip, flags, pc);
 310        else
 311                trace_function(tr, ip, parent_ip, flags, pc);
 312}
 313
 314static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
 315{
 316        struct tracer *tracer = tr->current_trace;
 317
 318        if (wakeup_function_set(tr, mask, set))
 319                return 0;
 320
 321#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 322        if (mask & TRACE_ITER_DISPLAY_GRAPH)
 323                return wakeup_display_graph(tr, set);
 324#endif
 325
 326        return trace_keep_overwrite(tracer, mask, set);
 327}
 328
 329static int start_func_tracer(struct trace_array *tr, int graph)
 330{
 331        int ret;
 332
 333        ret = register_wakeup_function(tr, graph, 0);
 334
 335        if (!ret && tracing_is_enabled())
 336                tracer_enabled = 1;
 337        else
 338                tracer_enabled = 0;
 339
 340        return ret;
 341}
 342
 343static void stop_func_tracer(struct trace_array *tr, int graph)
 344{
 345        tracer_enabled = 0;
 346
 347        unregister_wakeup_function(tr, graph);
 348}
 349
 350/*
 351 * Should this new latency be reported/recorded?
 352 */
 353static bool report_latency(struct trace_array *tr, u64 delta)
 354{
 355        if (tracing_thresh) {
 356                if (delta < tracing_thresh)
 357                        return false;
 358        } else {
 359                if (delta <= tr->max_latency)
 360                        return false;
 361        }
 362        return true;
 363}
 364
 365static void
 366probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
 367{
 368        if (task != wakeup_task)
 369                return;
 370
 371        wakeup_current_cpu = cpu;
 372}
 373
 374static void
 375tracing_sched_switch_trace(struct trace_array *tr,
 376                           struct task_struct *prev,
 377                           struct task_struct *next,
 378                           unsigned long flags, int pc)
 379{
 380        struct trace_event_call *call = &event_context_switch;
 381        struct ring_buffer *buffer = tr->trace_buffer.buffer;
 382        struct ring_buffer_event *event;
 383        struct ctx_switch_entry *entry;
 384
 385        event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
 386                                          sizeof(*entry), flags, pc);
 387        if (!event)
 388                return;
 389        entry   = ring_buffer_event_data(event);
 390        entry->prev_pid                 = prev->pid;
 391        entry->prev_prio                = prev->prio;
 392        entry->prev_state               = task_state_index(prev);
 393        entry->next_pid                 = next->pid;
 394        entry->next_prio                = next->prio;
 395        entry->next_state               = task_state_index(next);
 396        entry->next_cpu = task_cpu(next);
 397
 398        if (!call_filter_check_discard(call, entry, buffer, event))
 399                trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
 400}
 401
 402static void
 403tracing_sched_wakeup_trace(struct trace_array *tr,
 404                           struct task_struct *wakee,
 405                           struct task_struct *curr,
 406                           unsigned long flags, int pc)
 407{
 408        struct trace_event_call *call = &event_wakeup;
 409        struct ring_buffer_event *event;
 410        struct ctx_switch_entry *entry;
 411        struct ring_buffer *buffer = tr->trace_buffer.buffer;
 412
 413        event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
 414                                          sizeof(*entry), flags, pc);
 415        if (!event)
 416                return;
 417        entry   = ring_buffer_event_data(event);
 418        entry->prev_pid                 = curr->pid;
 419        entry->prev_prio                = curr->prio;
 420        entry->prev_state               = task_state_index(curr);
 421        entry->next_pid                 = wakee->pid;
 422        entry->next_prio                = wakee->prio;
 423        entry->next_state               = task_state_index(wakee);
 424        entry->next_cpu                 = task_cpu(wakee);
 425
 426        if (!call_filter_check_discard(call, entry, buffer, event))
 427                trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
 428}
 429
 430static void notrace
 431probe_wakeup_sched_switch(void *ignore, bool preempt,
 432                          struct task_struct *prev, struct task_struct *next)
 433{
 434        struct trace_array_cpu *data;
 435        u64 T0, T1, delta;
 436        unsigned long flags;
 437        long disabled;
 438        int cpu;
 439        int pc;
 440
 441        tracing_record_cmdline(prev);
 442
 443        if (unlikely(!tracer_enabled))
 444                return;
 445
 446        /*
 447         * When we start a new trace, we set wakeup_task to NULL
 448         * and then set tracer_enabled = 1. We want to make sure
 449         * that another CPU does not see the tracer_enabled = 1
 450         * and the wakeup_task with an older task, that might
 451         * actually be the same as next.
 452         */
 453        smp_rmb();
 454
 455        if (next != wakeup_task)
 456                return;
 457
 458        pc = preempt_count();
 459
 460        /* disable local data, not wakeup_cpu data */
 461        cpu = raw_smp_processor_id();
 462        disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 463        if (likely(disabled != 1))
 464                goto out;
 465
 466        local_irq_save(flags);
 467        arch_spin_lock(&wakeup_lock);
 468
 469        /* We could race with grabbing wakeup_lock */
 470        if (unlikely(!tracer_enabled || next != wakeup_task))
 471                goto out_unlock;
 472
 473        /* The task we are waiting for is waking up */
 474        data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
 475
 476        __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
 477        tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 478        __trace_stack(wakeup_trace, flags, 0, pc);
 479
 480        T0 = data->preempt_timestamp;
 481        T1 = ftrace_now(cpu);
 482        delta = T1-T0;
 483
 484        if (!report_latency(wakeup_trace, delta))
 485                goto out_unlock;
 486
 487        if (likely(!is_tracing_stopped())) {
 488                wakeup_trace->max_latency = delta;
 489                update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
 490        }
 491
 492out_unlock:
 493        __wakeup_reset(wakeup_trace);
 494        arch_spin_unlock(&wakeup_lock);
 495        local_irq_restore(flags);
 496out:
 497        atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 498}
 499
 500static void __wakeup_reset(struct trace_array *tr)
 501{
 502        wakeup_cpu = -1;
 503        wakeup_prio = -1;
 504        tracing_dl = 0;
 505
 506        if (wakeup_task)
 507                put_task_struct(wakeup_task);
 508
 509        wakeup_task = NULL;
 510}
 511
 512static void wakeup_reset(struct trace_array *tr)
 513{
 514        unsigned long flags;
 515
 516        tracing_reset_online_cpus(&tr->trace_buffer);
 517
 518        local_irq_save(flags);
 519        arch_spin_lock(&wakeup_lock);
 520        __wakeup_reset(tr);
 521        arch_spin_unlock(&wakeup_lock);
 522        local_irq_restore(flags);
 523}
 524
 525static void
 526probe_wakeup(void *ignore, struct task_struct *p)
 527{
 528        struct trace_array_cpu *data;
 529        int cpu = smp_processor_id();
 530        unsigned long flags;
 531        long disabled;
 532        int pc;
 533
 534        if (likely(!tracer_enabled))
 535                return;
 536
 537        tracing_record_cmdline(p);
 538        tracing_record_cmdline(current);
 539
 540        /*
 541         * Semantic is like this:
 542         *  - wakeup tracer handles all tasks in the system, independently
 543         *    from their scheduling class;
 544         *  - wakeup_rt tracer handles tasks belonging to sched_dl and
 545         *    sched_rt class;
 546         *  - wakeup_dl handles tasks belonging to sched_dl class only.
 547         */
 548        if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
 549            (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
 550            (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
 551                return;
 552
 553        pc = preempt_count();
 554        disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 555        if (unlikely(disabled != 1))
 556                goto out;
 557
 558        /* interrupts should be off from try_to_wake_up */
 559        arch_spin_lock(&wakeup_lock);
 560
 561        /* check for races. */
 562        if (!tracer_enabled || tracing_dl ||
 563            (!dl_task(p) && p->prio >= wakeup_prio))
 564                goto out_locked;
 565
 566        /* reset the trace */
 567        __wakeup_reset(wakeup_trace);
 568
 569        wakeup_cpu = task_cpu(p);
 570        wakeup_current_cpu = wakeup_cpu;
 571        wakeup_prio = p->prio;
 572
 573        /*
 574         * Once you start tracing a -deadline task, don't bother tracing
 575         * another task until the first one wakes up.
 576         */
 577        if (dl_task(p))
 578                tracing_dl = 1;
 579        else
 580                tracing_dl = 0;
 581
 582        wakeup_task = get_task_struct(p);
 583
 584        local_save_flags(flags);
 585
 586        data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
 587        data->preempt_timestamp = ftrace_now(cpu);
 588        tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
 589        __trace_stack(wakeup_trace, flags, 0, pc);
 590
 591        /*
 592         * We must be careful in using CALLER_ADDR2. But since wake_up
 593         * is not called by an assembly function  (where as schedule is)
 594         * it should be safe to use it here.
 595         */
 596        __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
 597
 598out_locked:
 599        arch_spin_unlock(&wakeup_lock);
 600out:
 601        atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
 602}
 603
 604static void start_wakeup_tracer(struct trace_array *tr)
 605{
 606        int ret;
 607
 608        ret = register_trace_sched_wakeup(probe_wakeup, NULL);
 609        if (ret) {
 610                pr_info("wakeup trace: Couldn't activate tracepoint"
 611                        " probe to kernel_sched_wakeup\n");
 612                return;
 613        }
 614
 615        ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
 616        if (ret) {
 617                pr_info("wakeup trace: Couldn't activate tracepoint"
 618                        " probe to kernel_sched_wakeup_new\n");
 619                goto fail_deprobe;
 620        }
 621
 622        ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 623        if (ret) {
 624                pr_info("sched trace: Couldn't activate tracepoint"
 625                        " probe to kernel_sched_switch\n");
 626                goto fail_deprobe_wake_new;
 627        }
 628
 629        ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
 630        if (ret) {
 631                pr_info("wakeup trace: Couldn't activate tracepoint"
 632                        " probe to kernel_sched_migrate_task\n");
 633                return;
 634        }
 635
 636        wakeup_reset(tr);
 637
 638        /*
 639         * Don't let the tracer_enabled = 1 show up before
 640         * the wakeup_task is reset. This may be overkill since
 641         * wakeup_reset does a spin_unlock after setting the
 642         * wakeup_task to NULL, but I want to be safe.
 643         * This is a slow path anyway.
 644         */
 645        smp_wmb();
 646
 647        if (start_func_tracer(tr, is_graph(tr)))
 648                printk(KERN_ERR "failed to start wakeup tracer\n");
 649
 650        return;
 651fail_deprobe_wake_new:
 652        unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
 653fail_deprobe:
 654        unregister_trace_sched_wakeup(probe_wakeup, NULL);
 655}
 656
 657static void stop_wakeup_tracer(struct trace_array *tr)
 658{
 659        tracer_enabled = 0;
 660        stop_func_tracer(tr, is_graph(tr));
 661        unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
 662        unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
 663        unregister_trace_sched_wakeup(probe_wakeup, NULL);
 664        unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
 665}
 666
 667static bool wakeup_busy;
 668
 669static int __wakeup_tracer_init(struct trace_array *tr)
 670{
 671        save_flags = tr->trace_flags;
 672
 673        /* non overwrite screws up the latency tracers */
 674        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 675        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
 676
 677        tr->max_latency = 0;
 678        wakeup_trace = tr;
 679        ftrace_init_array_ops(tr, wakeup_tracer_call);
 680        start_wakeup_tracer(tr);
 681
 682        wakeup_busy = true;
 683        return 0;
 684}
 685
 686static int wakeup_tracer_init(struct trace_array *tr)
 687{
 688        if (wakeup_busy)
 689                return -EBUSY;
 690
 691        wakeup_dl = 0;
 692        wakeup_rt = 0;
 693        return __wakeup_tracer_init(tr);
 694}
 695
 696static int wakeup_rt_tracer_init(struct trace_array *tr)
 697{
 698        if (wakeup_busy)
 699                return -EBUSY;
 700
 701        wakeup_dl = 0;
 702        wakeup_rt = 1;
 703        return __wakeup_tracer_init(tr);
 704}
 705
 706static int wakeup_dl_tracer_init(struct trace_array *tr)
 707{
 708        if (wakeup_busy)
 709                return -EBUSY;
 710
 711        wakeup_dl = 1;
 712        wakeup_rt = 0;
 713        return __wakeup_tracer_init(tr);
 714}
 715
 716static void wakeup_tracer_reset(struct trace_array *tr)
 717{
 718        int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
 719        int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 720
 721        stop_wakeup_tracer(tr);
 722        /* make sure we put back any tasks we are tracing */
 723        wakeup_reset(tr);
 724
 725        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 726        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
 727        ftrace_reset_array_ops(tr);
 728        wakeup_busy = false;
 729}
 730
 731static void wakeup_tracer_start(struct trace_array *tr)
 732{
 733        wakeup_reset(tr);
 734        tracer_enabled = 1;
 735}
 736
 737static void wakeup_tracer_stop(struct trace_array *tr)
 738{
 739        tracer_enabled = 0;
 740}
 741
 742static struct tracer wakeup_tracer __read_mostly =
 743{
 744        .name           = "wakeup",
 745        .init           = wakeup_tracer_init,
 746        .reset          = wakeup_tracer_reset,
 747        .start          = wakeup_tracer_start,
 748        .stop           = wakeup_tracer_stop,
 749        .print_max      = true,
 750        .print_header   = wakeup_print_header,
 751        .print_line     = wakeup_print_line,
 752        .flag_changed   = wakeup_flag_changed,
 753#ifdef CONFIG_FTRACE_SELFTEST
 754        .selftest    = trace_selftest_startup_wakeup,
 755#endif
 756        .open           = wakeup_trace_open,
 757        .close          = wakeup_trace_close,
 758        .allow_instances = true,
 759        .use_max_tr     = true,
 760};
 761
 762static struct tracer wakeup_rt_tracer __read_mostly =
 763{
 764        .name           = "wakeup_rt",
 765        .init           = wakeup_rt_tracer_init,
 766        .reset          = wakeup_tracer_reset,
 767        .start          = wakeup_tracer_start,
 768        .stop           = wakeup_tracer_stop,
 769        .print_max      = true,
 770        .print_header   = wakeup_print_header,
 771        .print_line     = wakeup_print_line,
 772        .flag_changed   = wakeup_flag_changed,
 773#ifdef CONFIG_FTRACE_SELFTEST
 774        .selftest    = trace_selftest_startup_wakeup,
 775#endif
 776        .open           = wakeup_trace_open,
 777        .close          = wakeup_trace_close,
 778        .allow_instances = true,
 779        .use_max_tr     = true,
 780};
 781
 782static struct tracer wakeup_dl_tracer __read_mostly =
 783{
 784        .name           = "wakeup_dl",
 785        .init           = wakeup_dl_tracer_init,
 786        .reset          = wakeup_tracer_reset,
 787        .start          = wakeup_tracer_start,
 788        .stop           = wakeup_tracer_stop,
 789        .print_max      = true,
 790        .print_header   = wakeup_print_header,
 791        .print_line     = wakeup_print_line,
 792        .flag_changed   = wakeup_flag_changed,
 793#ifdef CONFIG_FTRACE_SELFTEST
 794        .selftest    = trace_selftest_startup_wakeup,
 795#endif
 796        .open           = wakeup_trace_open,
 797        .close          = wakeup_trace_close,
 798        .allow_instances = true,
 799        .use_max_tr     = true,
 800};
 801
 802__init static int init_wakeup_tracer(void)
 803{
 804        int ret;
 805
 806        ret = register_tracer(&wakeup_tracer);
 807        if (ret)
 808                return ret;
 809
 810        ret = register_tracer(&wakeup_rt_tracer);
 811        if (ret)
 812                return ret;
 813
 814        ret = register_tracer(&wakeup_dl_tracer);
 815        if (ret)
 816                return ret;
 817
 818        return 0;
 819}
 820core_initcall(init_wakeup_tracer);
 821