linux/include/trace/events/sched.h
<<
>>
Prefs
   1#undef TRACE_SYSTEM
   2#define TRACE_SYSTEM sched
   3
   4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
   5#define _TRACE_SCHED_H
   6
   7#include <linux/sched.h>
   8#include <linux/tracepoint.h>
   9
  10/*
  11 * Tracepoint for calling kthread_stop, performed to end a kthread:
  12 */
  13TRACE_EVENT(sched_kthread_stop,
  14
  15        TP_PROTO(struct task_struct *t),
  16
  17        TP_ARGS(t),
  18
  19        TP_STRUCT__entry(
  20                __array(        char,   comm,   TASK_COMM_LEN   )
  21                __field(        pid_t,  pid                     )
  22        ),
  23
  24        TP_fast_assign(
  25                memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
  26                __entry->pid    = t->pid;
  27        ),
  28
  29        TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  30);
  31
  32/*
  33 * Tracepoint for the return value of the kthread stopping:
  34 */
  35TRACE_EVENT(sched_kthread_stop_ret,
  36
  37        TP_PROTO(int ret),
  38
  39        TP_ARGS(ret),
  40
  41        TP_STRUCT__entry(
  42                __field(        int,    ret     )
  43        ),
  44
  45        TP_fast_assign(
  46                __entry->ret    = ret;
  47        ),
  48
  49        TP_printk("ret=%d", __entry->ret)
  50);
  51
  52/*
  53 * Tracepoint for waking up a task:
  54 */
  55DECLARE_EVENT_CLASS(sched_wakeup_template,
  56
  57        TP_PROTO(struct task_struct *p, int success),
  58
  59        TP_ARGS(p, success),
  60
  61        TP_STRUCT__entry(
  62                __array(        char,   comm,   TASK_COMM_LEN   )
  63                __field(        pid_t,  pid                     )
  64                __field(        int,    prio                    )
  65                __field(        int,    success                 )
  66                __field(        int,    target_cpu              )
  67        ),
  68
  69        TP_fast_assign(
  70                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  71                __entry->pid            = p->pid;
  72                __entry->prio           = p->prio;
  73                __entry->success        = success;
  74                __entry->target_cpu     = task_cpu(p);
  75        ),
  76
  77        TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
  78                  __entry->comm, __entry->pid, __entry->prio,
  79                  __entry->success, __entry->target_cpu)
  80);
  81
  82DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
  83             TP_PROTO(struct task_struct *p, int success),
  84             TP_ARGS(p, success));
  85
  86/*
  87 * Tracepoint for waking up a new task:
  88 */
  89DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
  90             TP_PROTO(struct task_struct *p, int success),
  91             TP_ARGS(p, success));
  92
  93#ifdef CREATE_TRACE_POINTS
  94static inline long __trace_sched_switch_state(struct task_struct *p)
  95{
  96        long state = p->state;
  97
  98#ifdef CONFIG_PREEMPT
  99        /*
 100         * For all intents and purposes a preempted task is a running task.
 101         */
 102        if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
 103                state = TASK_RUNNING;
 104#endif
 105
 106        return state;
 107}
 108#endif
 109
 110/*
 111 * Tracepoint for task switches, performed by the scheduler:
 112 */
 113TRACE_EVENT(sched_switch,
 114
 115        TP_PROTO(struct task_struct *prev,
 116                 struct task_struct *next),
 117
 118        TP_ARGS(prev, next),
 119
 120        TP_STRUCT__entry(
 121                __array(        char,   prev_comm,      TASK_COMM_LEN   )
 122                __field(        pid_t,  prev_pid                        )
 123                __field(        int,    prev_prio                       )
 124                __field(        long,   prev_state                      )
 125                __array(        char,   next_comm,      TASK_COMM_LEN   )
 126                __field(        pid_t,  next_pid                        )
 127                __field(        int,    next_prio                       )
 128        ),
 129
 130        TP_fast_assign(
 131                memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
 132                __entry->prev_pid       = prev->pid;
 133                __entry->prev_prio      = prev->prio;
 134                __entry->prev_state     = __trace_sched_switch_state(prev);
 135                memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
 136                __entry->next_pid       = next->pid;
 137                __entry->next_prio      = next->prio;
 138        ),
 139
 140        TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
 141                __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
 142                __entry->prev_state ?
 143                  __print_flags(__entry->prev_state, "|",
 144                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
 145                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
 146                                { 128, "W" }) : "R",
 147                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 148);
 149
 150/*
 151 * Tracepoint for a task being migrated:
 152 */
 153TRACE_EVENT(sched_migrate_task,
 154
 155        TP_PROTO(struct task_struct *p, int dest_cpu),
 156
 157        TP_ARGS(p, dest_cpu),
 158
 159        TP_STRUCT__entry(
 160                __array(        char,   comm,   TASK_COMM_LEN   )
 161                __field(        pid_t,  pid                     )
 162                __field(        int,    prio                    )
 163                __field(        int,    orig_cpu                )
 164                __field(        int,    dest_cpu                )
 165        ),
 166
 167        TP_fast_assign(
 168                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 169                __entry->pid            = p->pid;
 170                __entry->prio           = p->prio;
 171                __entry->orig_cpu       = task_cpu(p);
 172                __entry->dest_cpu       = dest_cpu;
 173        ),
 174
 175        TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
 176                  __entry->comm, __entry->pid, __entry->prio,
 177                  __entry->orig_cpu, __entry->dest_cpu)
 178);
 179
 180DECLARE_EVENT_CLASS(sched_process_template,
 181
 182        TP_PROTO(struct task_struct *p),
 183
 184        TP_ARGS(p),
 185
 186        TP_STRUCT__entry(
 187                __array(        char,   comm,   TASK_COMM_LEN   )
 188                __field(        pid_t,  pid                     )
 189                __field(        int,    prio                    )
 190        ),
 191
 192        TP_fast_assign(
 193                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 194                __entry->pid            = p->pid;
 195                __entry->prio           = p->prio;
 196        ),
 197
 198        TP_printk("comm=%s pid=%d prio=%d",
 199                  __entry->comm, __entry->pid, __entry->prio)
 200);
 201
 202/*
 203 * Tracepoint for freeing a task:
 204 */
 205DEFINE_EVENT(sched_process_template, sched_process_free,
 206             TP_PROTO(struct task_struct *p),
 207             TP_ARGS(p));
 208             
 209
 210/*
 211 * Tracepoint for a task exiting:
 212 */
 213DEFINE_EVENT(sched_process_template, sched_process_exit,
 214             TP_PROTO(struct task_struct *p),
 215             TP_ARGS(p));
 216
 217/*
 218 * Tracepoint for waiting on task to unschedule:
 219 */
 220DEFINE_EVENT(sched_process_template, sched_wait_task,
 221        TP_PROTO(struct task_struct *p),
 222        TP_ARGS(p));
 223
 224/*
 225 * Tracepoint for a waiting task:
 226 */
 227TRACE_EVENT(sched_process_wait,
 228
 229        TP_PROTO(struct pid *pid),
 230
 231        TP_ARGS(pid),
 232
 233        TP_STRUCT__entry(
 234                __array(        char,   comm,   TASK_COMM_LEN   )
 235                __field(        pid_t,  pid                     )
 236                __field(        int,    prio                    )
 237        ),
 238
 239        TP_fast_assign(
 240                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 241                __entry->pid            = pid_nr(pid);
 242                __entry->prio           = current->prio;
 243        ),
 244
 245        TP_printk("comm=%s pid=%d prio=%d",
 246                  __entry->comm, __entry->pid, __entry->prio)
 247);
 248
 249/*
 250 * Tracepoint for do_fork:
 251 */
 252TRACE_EVENT(sched_process_fork,
 253
 254        TP_PROTO(struct task_struct *parent, struct task_struct *child),
 255
 256        TP_ARGS(parent, child),
 257
 258        TP_STRUCT__entry(
 259                __array(        char,   parent_comm,    TASK_COMM_LEN   )
 260                __field(        pid_t,  parent_pid                      )
 261                __array(        char,   child_comm,     TASK_COMM_LEN   )
 262                __field(        pid_t,  child_pid                       )
 263        ),
 264
 265        TP_fast_assign(
 266                memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
 267                __entry->parent_pid     = parent->pid;
 268                memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
 269                __entry->child_pid      = child->pid;
 270        ),
 271
 272        TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
 273                __entry->parent_comm, __entry->parent_pid,
 274                __entry->child_comm, __entry->child_pid)
 275);
 276
 277/*
 278 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
 279 *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
 280 */
 281DECLARE_EVENT_CLASS(sched_stat_template,
 282
 283        TP_PROTO(struct task_struct *tsk, u64 delay),
 284
 285        TP_ARGS(tsk, delay),
 286
 287        TP_STRUCT__entry(
 288                __array( char,  comm,   TASK_COMM_LEN   )
 289                __field( pid_t, pid                     )
 290                __field( u64,   delay                   )
 291        ),
 292
 293        TP_fast_assign(
 294                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 295                __entry->pid    = tsk->pid;
 296                __entry->delay  = delay;
 297        )
 298        TP_perf_assign(
 299                __perf_count(delay);
 300        ),
 301
 302        TP_printk("comm=%s pid=%d delay=%Lu [ns]",
 303                        __entry->comm, __entry->pid,
 304                        (unsigned long long)__entry->delay)
 305);
 306
 307
 308/*
 309 * Tracepoint for accounting wait time (time the task is runnable
 310 * but not actually running due to scheduler contention).
 311 */
 312DEFINE_EVENT(sched_stat_template, sched_stat_wait,
 313             TP_PROTO(struct task_struct *tsk, u64 delay),
 314             TP_ARGS(tsk, delay));
 315
 316/*
 317 * Tracepoint for accounting sleep time (time the task is not runnable,
 318 * including iowait, see below).
 319 */
 320DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
 321             TP_PROTO(struct task_struct *tsk, u64 delay),
 322             TP_ARGS(tsk, delay));
 323
 324/*
 325 * Tracepoint for accounting iowait time (time the task is not runnable
 326 * due to waiting on IO to complete).
 327 */
 328DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
 329             TP_PROTO(struct task_struct *tsk, u64 delay),
 330             TP_ARGS(tsk, delay));
 331
 332/*
 333 * Tracepoint for accounting runtime (time the task is executing
 334 * on a CPU).
 335 */
 336TRACE_EVENT(sched_stat_runtime,
 337
 338        TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
 339
 340        TP_ARGS(tsk, runtime, vruntime),
 341
 342        TP_STRUCT__entry(
 343                __array( char,  comm,   TASK_COMM_LEN   )
 344                __field( pid_t, pid                     )
 345                __field( u64,   runtime                 )
 346                __field( u64,   vruntime                        )
 347        ),
 348
 349        TP_fast_assign(
 350                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 351                __entry->pid            = tsk->pid;
 352                __entry->runtime        = runtime;
 353                __entry->vruntime       = vruntime;
 354        )
 355        TP_perf_assign(
 356                __perf_count(runtime);
 357        ),
 358
 359        TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
 360                        __entry->comm, __entry->pid,
 361                        (unsigned long long)__entry->runtime,
 362                        (unsigned long long)__entry->vruntime)
 363);
 364
 365/*
 366 * Tracepoint for showing priority inheritance modifying a tasks
 367 * priority.
 368 */
 369TRACE_EVENT(sched_pi_setprio,
 370
 371        TP_PROTO(struct task_struct *tsk, int newprio),
 372
 373        TP_ARGS(tsk, newprio),
 374
 375        TP_STRUCT__entry(
 376                __array( char,  comm,   TASK_COMM_LEN   )
 377                __field( pid_t, pid                     )
 378                __field( int,   oldprio                 )
 379                __field( int,   newprio                 )
 380        ),
 381
 382        TP_fast_assign(
 383                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 384                __entry->pid            = tsk->pid;
 385                __entry->oldprio        = tsk->prio;
 386                __entry->newprio        = newprio;
 387        ),
 388
 389        TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
 390                        __entry->comm, __entry->pid,
 391                        __entry->oldprio, __entry->newprio)
 392);
 393
 394#endif /* _TRACE_SCHED_H */
 395
 396/* This part must be outside protection */
 397#include <trace/define_trace.h>
 398