linux/kernel/trace/trace_sched_switch.c
<<
>>
Prefs
   1/*
   2 * trace context switch
   3 *
   4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
   5 *
   6 */
   7#include <linux/module.h>
   8#include <linux/fs.h>
   9#include <linux/debugfs.h>
  10#include <linux/kallsyms.h>
  11#include <linux/uaccess.h>
  12#include <linux/ftrace.h>
  13#include <trace/events/sched.h>
  14
  15#include "trace.h"
  16
  17static struct trace_array       *ctx_trace;
  18static int __read_mostly        tracer_enabled;
  19static int                      sched_ref;
  20static DEFINE_MUTEX(sched_register_mutex);
  21static int                      sched_stopped;
  22
  23
  24void
  25tracing_sched_switch_trace(struct trace_array *tr,
  26                           struct task_struct *prev,
  27                           struct task_struct *next,
  28                           unsigned long flags, int pc)
  29{
  30        struct ftrace_event_call *call = &event_context_switch;
  31        struct ring_buffer *buffer = tr->buffer;
  32        struct ring_buffer_event *event;
  33        struct ctx_switch_entry *entry;
  34
  35        event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
  36                                          sizeof(*entry), flags, pc);
  37        if (!event)
  38                return;
  39        entry   = ring_buffer_event_data(event);
  40        entry->prev_pid                 = prev->pid;
  41        entry->prev_prio                = prev->prio;
  42        entry->prev_state               = prev->state;
  43        entry->next_pid                 = next->pid;
  44        entry->next_prio                = next->prio;
  45        entry->next_state               = next->state;
  46        entry->next_cpu = task_cpu(next);
  47
  48        if (!filter_check_discard(call, entry, buffer, event))
  49                trace_buffer_unlock_commit(buffer, event, flags, pc);
  50}
  51
  52static void
  53probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  54                        struct task_struct *next)
  55{
  56        struct trace_array_cpu *data;
  57        unsigned long flags;
  58        int cpu;
  59        int pc;
  60
  61        if (unlikely(!sched_ref))
  62                return;
  63
  64        tracing_record_cmdline(prev);
  65        tracing_record_cmdline(next);
  66
  67        if (!tracer_enabled || sched_stopped)
  68                return;
  69
  70        pc = preempt_count();
  71        local_irq_save(flags);
  72        cpu = raw_smp_processor_id();
  73        data = ctx_trace->data[cpu];
  74
  75        if (likely(!atomic_read(&data->disabled)))
  76                tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
  77
  78        local_irq_restore(flags);
  79}
  80
  81void
  82tracing_sched_wakeup_trace(struct trace_array *tr,
  83                           struct task_struct *wakee,
  84                           struct task_struct *curr,
  85                           unsigned long flags, int pc)
  86{
  87        struct ftrace_event_call *call = &event_wakeup;
  88        struct ring_buffer_event *event;
  89        struct ctx_switch_entry *entry;
  90        struct ring_buffer *buffer = tr->buffer;
  91
  92        event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
  93                                          sizeof(*entry), flags, pc);
  94        if (!event)
  95                return;
  96        entry   = ring_buffer_event_data(event);
  97        entry->prev_pid                 = curr->pid;
  98        entry->prev_prio                = curr->prio;
  99        entry->prev_state               = curr->state;
 100        entry->next_pid                 = wakee->pid;
 101        entry->next_prio                = wakee->prio;
 102        entry->next_state               = wakee->state;
 103        entry->next_cpu                 = task_cpu(wakee);
 104
 105        if (!filter_check_discard(call, entry, buffer, event))
 106                ring_buffer_unlock_commit(buffer, event);
 107        ftrace_trace_stack(tr->buffer, flags, 6, pc);
 108        ftrace_trace_userstack(tr->buffer, flags, pc);
 109}
 110
 111static void
 112probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
 113{
 114        struct trace_array_cpu *data;
 115        unsigned long flags;
 116        int cpu, pc;
 117
 118        if (unlikely(!sched_ref))
 119                return;
 120
 121        tracing_record_cmdline(current);
 122
 123        if (!tracer_enabled || sched_stopped)
 124                return;
 125
 126        pc = preempt_count();
 127        local_irq_save(flags);
 128        cpu = raw_smp_processor_id();
 129        data = ctx_trace->data[cpu];
 130
 131        if (likely(!atomic_read(&data->disabled)))
 132                tracing_sched_wakeup_trace(ctx_trace, wakee, current,
 133                                           flags, pc);
 134
 135        local_irq_restore(flags);
 136}
 137
 138static int tracing_sched_register(void)
 139{
 140        int ret;
 141
 142        ret = register_trace_sched_wakeup(probe_sched_wakeup);
 143        if (ret) {
 144                pr_info("wakeup trace: Couldn't activate tracepoint"
 145                        " probe to kernel_sched_wakeup\n");
 146                return ret;
 147        }
 148
 149        ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
 150        if (ret) {
 151                pr_info("wakeup trace: Couldn't activate tracepoint"
 152                        " probe to kernel_sched_wakeup_new\n");
 153                goto fail_deprobe;
 154        }
 155
 156        ret = register_trace_sched_switch(probe_sched_switch);
 157        if (ret) {
 158                pr_info("sched trace: Couldn't activate tracepoint"
 159                        " probe to kernel_sched_switch\n");
 160                goto fail_deprobe_wake_new;
 161        }
 162
 163        return ret;
 164fail_deprobe_wake_new:
 165        unregister_trace_sched_wakeup_new(probe_sched_wakeup);
 166fail_deprobe:
 167        unregister_trace_sched_wakeup(probe_sched_wakeup);
 168        return ret;
 169}
 170
 171static void tracing_sched_unregister(void)
 172{
 173        unregister_trace_sched_switch(probe_sched_switch);
 174        unregister_trace_sched_wakeup_new(probe_sched_wakeup);
 175        unregister_trace_sched_wakeup(probe_sched_wakeup);
 176}
 177
 178static void tracing_start_sched_switch(void)
 179{
 180        mutex_lock(&sched_register_mutex);
 181        if (!(sched_ref++))
 182                tracing_sched_register();
 183        mutex_unlock(&sched_register_mutex);
 184}
 185
 186static void tracing_stop_sched_switch(void)
 187{
 188        mutex_lock(&sched_register_mutex);
 189        if (!(--sched_ref))
 190                tracing_sched_unregister();
 191        mutex_unlock(&sched_register_mutex);
 192}
 193
 194void tracing_start_cmdline_record(void)
 195{
 196        tracing_start_sched_switch();
 197}
 198
 199void tracing_stop_cmdline_record(void)
 200{
 201        tracing_stop_sched_switch();
 202}
 203
 204/**
 205 * tracing_start_sched_switch_record - start tracing context switches
 206 *
 207 * Turns on context switch tracing for a tracer.
 208 */
 209void tracing_start_sched_switch_record(void)
 210{
 211        if (unlikely(!ctx_trace)) {
 212                WARN_ON(1);
 213                return;
 214        }
 215
 216        tracing_start_sched_switch();
 217
 218        mutex_lock(&sched_register_mutex);
 219        tracer_enabled++;
 220        mutex_unlock(&sched_register_mutex);
 221}
 222
 223/**
 224 * tracing_stop_sched_switch_record - start tracing context switches
 225 *
 226 * Turns off context switch tracing for a tracer.
 227 */
 228void tracing_stop_sched_switch_record(void)
 229{
 230        mutex_lock(&sched_register_mutex);
 231        tracer_enabled--;
 232        WARN_ON(tracer_enabled < 0);
 233        mutex_unlock(&sched_register_mutex);
 234
 235        tracing_stop_sched_switch();
 236}
 237
 238/**
 239 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
 240 * @tr: trace array pointer to assign
 241 *
 242 * Some tracers might want to record the context switches in their
 243 * trace. This function lets those tracers assign the trace array
 244 * to use.
 245 */
 246void tracing_sched_switch_assign_trace(struct trace_array *tr)
 247{
 248        ctx_trace = tr;
 249}
 250
 251static void stop_sched_trace(struct trace_array *tr)
 252{
 253        tracing_stop_sched_switch_record();
 254}
 255
 256static int sched_switch_trace_init(struct trace_array *tr)
 257{
 258        ctx_trace = tr;
 259        tracing_reset_online_cpus(tr);
 260        tracing_start_sched_switch_record();
 261        return 0;
 262}
 263
 264static void sched_switch_trace_reset(struct trace_array *tr)
 265{
 266        if (sched_ref)
 267                stop_sched_trace(tr);
 268}
 269
 270static void sched_switch_trace_start(struct trace_array *tr)
 271{
 272        sched_stopped = 0;
 273}
 274
 275static void sched_switch_trace_stop(struct trace_array *tr)
 276{
 277        sched_stopped = 1;
 278}
 279
 280static struct tracer sched_switch_trace __read_mostly =
 281{
 282        .name           = "sched_switch",
 283        .init           = sched_switch_trace_init,
 284        .reset          = sched_switch_trace_reset,
 285        .start          = sched_switch_trace_start,
 286        .stop           = sched_switch_trace_stop,
 287        .wait_pipe      = poll_wait_pipe,
 288#ifdef CONFIG_FTRACE_SELFTEST
 289        .selftest    = trace_selftest_startup_sched_switch,
 290#endif
 291};
 292
 293__init static int init_sched_switch_trace(void)
 294{
 295        return register_tracer(&sched_switch_trace);
 296}
 297device_initcall(init_sched_switch_trace);
 298
 299