linux/include/trace/events/sched.h
<<
>>
Prefs
   1#undef TRACE_SYSTEM
   2#define TRACE_SYSTEM sched
   3
   4#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
   5#define _TRACE_SCHED_H
   6
   7#include <linux/sched.h>
   8#include <linux/tracepoint.h>
   9#include <linux/binfmts.h>
  10
  11/*
  12 * Tracepoint for calling kthread_stop, performed to end a kthread:
  13 */
  14TRACE_EVENT(sched_kthread_stop,
  15
  16        TP_PROTO(struct task_struct *t),
  17
  18        TP_ARGS(t),
  19
  20        TP_STRUCT__entry(
  21                __array(        char,   comm,   TASK_COMM_LEN   )
  22                __field(        pid_t,  pid                     )
  23        ),
  24
  25        TP_fast_assign(
  26                memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
  27                __entry->pid    = t->pid;
  28        ),
  29
  30        TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  31);
  32
  33/*
  34 * Tracepoint for the return value of the kthread stopping:
  35 */
  36TRACE_EVENT(sched_kthread_stop_ret,
  37
  38        TP_PROTO(int ret),
  39
  40        TP_ARGS(ret),
  41
  42        TP_STRUCT__entry(
  43                __field(        int,    ret     )
  44        ),
  45
  46        TP_fast_assign(
  47                __entry->ret    = ret;
  48        ),
  49
  50        TP_printk("ret=%d", __entry->ret)
  51);
  52
  53/*
  54 * Tracepoint for waking up a task:
  55 */
  56DECLARE_EVENT_CLASS(sched_wakeup_template,
  57
  58        TP_PROTO(struct task_struct *p, int success),
  59
  60        TP_ARGS(__perf_task(p), success),
  61
  62        TP_STRUCT__entry(
  63                __array(        char,   comm,   TASK_COMM_LEN   )
  64                __field(        pid_t,  pid                     )
  65                __field(        int,    prio                    )
  66                __field(        int,    success                 )
  67                __field(        int,    target_cpu              )
  68        ),
  69
  70        TP_fast_assign(
  71                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  72                __entry->pid            = p->pid;
  73                __entry->prio           = p->prio;
  74                __entry->success        = success;
  75                __entry->target_cpu     = task_cpu(p);
  76        ),
  77
  78        TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
  79                  __entry->comm, __entry->pid, __entry->prio,
  80                  __entry->success, __entry->target_cpu)
  81);
  82
  83DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
  84             TP_PROTO(struct task_struct *p, int success),
  85             TP_ARGS(p, success));
  86
  87/*
  88 * Tracepoint for waking up a new task:
  89 */
  90DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
  91             TP_PROTO(struct task_struct *p, int success),
  92             TP_ARGS(p, success));
  93
  94#ifdef CREATE_TRACE_POINTS
  95static inline long __trace_sched_switch_state(struct task_struct *p)
  96{
  97        long state = p->state;
  98
  99#ifdef CONFIG_PREEMPT
 100        /*
 101         * For all intents and purposes a preempted task is a running task.
 102         */
 103        if (task_preempt_count(p) & PREEMPT_ACTIVE)
 104                state = TASK_RUNNING | TASK_STATE_MAX;
 105#endif
 106
 107        return state;
 108}
 109#endif
 110
 111/*
 112 * Tracepoint for task switches, performed by the scheduler:
 113 */
 114TRACE_EVENT(sched_switch,
 115
 116        TP_PROTO(struct task_struct *prev,
 117                 struct task_struct *next),
 118
 119        TP_ARGS(prev, next),
 120
 121        TP_STRUCT__entry(
 122                __array(        char,   prev_comm,      TASK_COMM_LEN   )
 123                __field(        pid_t,  prev_pid                        )
 124                __field(        int,    prev_prio                       )
 125                __field(        long,   prev_state                      )
 126                __array(        char,   next_comm,      TASK_COMM_LEN   )
 127                __field(        pid_t,  next_pid                        )
 128                __field(        int,    next_prio                       )
 129        ),
 130
 131        TP_fast_assign(
 132                memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
 133                __entry->prev_pid       = prev->pid;
 134                __entry->prev_prio      = prev->prio;
 135                __entry->prev_state     = __trace_sched_switch_state(prev);
 136                memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
 137                __entry->next_pid       = next->pid;
 138                __entry->next_prio      = next->prio;
 139        ),
 140
 141        TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
 142                __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
 143                __entry->prev_state & (TASK_STATE_MAX-1) ?
 144                  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
 145                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
 146                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
 147                                { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
 148                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
 149                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 150);
 151
 152/*
 153 * Tracepoint for a task being migrated:
 154 */
 155TRACE_EVENT(sched_migrate_task,
 156
 157        TP_PROTO(struct task_struct *p, int dest_cpu),
 158
 159        TP_ARGS(p, dest_cpu),
 160
 161        TP_STRUCT__entry(
 162                __array(        char,   comm,   TASK_COMM_LEN   )
 163                __field(        pid_t,  pid                     )
 164                __field(        int,    prio                    )
 165                __field(        int,    orig_cpu                )
 166                __field(        int,    dest_cpu                )
 167        ),
 168
 169        TP_fast_assign(
 170                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 171                __entry->pid            = p->pid;
 172                __entry->prio           = p->prio;
 173                __entry->orig_cpu       = task_cpu(p);
 174                __entry->dest_cpu       = dest_cpu;
 175        ),
 176
 177        TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
 178                  __entry->comm, __entry->pid, __entry->prio,
 179                  __entry->orig_cpu, __entry->dest_cpu)
 180);
 181
 182DECLARE_EVENT_CLASS(sched_process_template,
 183
 184        TP_PROTO(struct task_struct *p),
 185
 186        TP_ARGS(p),
 187
 188        TP_STRUCT__entry(
 189                __array(        char,   comm,   TASK_COMM_LEN   )
 190                __field(        pid_t,  pid                     )
 191                __field(        int,    prio                    )
 192        ),
 193
 194        TP_fast_assign(
 195                memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 196                __entry->pid            = p->pid;
 197                __entry->prio           = p->prio;
 198        ),
 199
 200        TP_printk("comm=%s pid=%d prio=%d",
 201                  __entry->comm, __entry->pid, __entry->prio)
 202);
 203
 204/*
 205 * Tracepoint for freeing a task:
 206 */
 207DEFINE_EVENT(sched_process_template, sched_process_free,
 208             TP_PROTO(struct task_struct *p),
 209             TP_ARGS(p));
 210             
 211
 212/*
 213 * Tracepoint for a task exiting:
 214 */
 215DEFINE_EVENT(sched_process_template, sched_process_exit,
 216             TP_PROTO(struct task_struct *p),
 217             TP_ARGS(p));
 218
 219/*
 220 * Tracepoint for waiting on task to unschedule:
 221 */
 222DEFINE_EVENT(sched_process_template, sched_wait_task,
 223        TP_PROTO(struct task_struct *p),
 224        TP_ARGS(p));
 225
 226/*
 227 * Tracepoint for a waiting task:
 228 */
 229TRACE_EVENT(sched_process_wait,
 230
 231        TP_PROTO(struct pid *pid),
 232
 233        TP_ARGS(pid),
 234
 235        TP_STRUCT__entry(
 236                __array(        char,   comm,   TASK_COMM_LEN   )
 237                __field(        pid_t,  pid                     )
 238                __field(        int,    prio                    )
 239        ),
 240
 241        TP_fast_assign(
 242                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 243                __entry->pid            = pid_nr(pid);
 244                __entry->prio           = current->prio;
 245        ),
 246
 247        TP_printk("comm=%s pid=%d prio=%d",
 248                  __entry->comm, __entry->pid, __entry->prio)
 249);
 250
 251/*
 252 * Tracepoint for do_fork:
 253 */
 254TRACE_EVENT(sched_process_fork,
 255
 256        TP_PROTO(struct task_struct *parent, struct task_struct *child),
 257
 258        TP_ARGS(parent, child),
 259
 260        TP_STRUCT__entry(
 261                __array(        char,   parent_comm,    TASK_COMM_LEN   )
 262                __field(        pid_t,  parent_pid                      )
 263                __array(        char,   child_comm,     TASK_COMM_LEN   )
 264                __field(        pid_t,  child_pid                       )
 265        ),
 266
 267        TP_fast_assign(
 268                memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
 269                __entry->parent_pid     = parent->pid;
 270                memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
 271                __entry->child_pid      = child->pid;
 272        ),
 273
 274        TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
 275                __entry->parent_comm, __entry->parent_pid,
 276                __entry->child_comm, __entry->child_pid)
 277);
 278
 279/*
 280 * Tracepoint for exec:
 281 */
 282TRACE_EVENT(sched_process_exec,
 283
 284        TP_PROTO(struct task_struct *p, pid_t old_pid,
 285                 struct linux_binprm *bprm),
 286
 287        TP_ARGS(p, old_pid, bprm),
 288
 289        TP_STRUCT__entry(
 290                __string(       filename,       bprm->filename  )
 291                __field(        pid_t,          pid             )
 292                __field(        pid_t,          old_pid         )
 293        ),
 294
 295        TP_fast_assign(
 296                __assign_str(filename, bprm->filename);
 297                __entry->pid            = p->pid;
 298                __entry->old_pid        = old_pid;
 299        ),
 300
 301        TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
 302                  __entry->pid, __entry->old_pid)
 303);
 304
 305/*
 306 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
 307 *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
 308 */
 309DECLARE_EVENT_CLASS(sched_stat_template,
 310
 311        TP_PROTO(struct task_struct *tsk, u64 delay),
 312
 313        TP_ARGS(__perf_task(tsk), __perf_count(delay)),
 314
 315        TP_STRUCT__entry(
 316                __array( char,  comm,   TASK_COMM_LEN   )
 317                __field( pid_t, pid                     )
 318                __field( u64,   delay                   )
 319        ),
 320
 321        TP_fast_assign(
 322                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 323                __entry->pid    = tsk->pid;
 324                __entry->delay  = delay;
 325        ),
 326
 327        TP_printk("comm=%s pid=%d delay=%Lu [ns]",
 328                        __entry->comm, __entry->pid,
 329                        (unsigned long long)__entry->delay)
 330);
 331
 332
 333/*
 334 * Tracepoint for accounting wait time (time the task is runnable
 335 * but not actually running due to scheduler contention).
 336 */
 337DEFINE_EVENT(sched_stat_template, sched_stat_wait,
 338             TP_PROTO(struct task_struct *tsk, u64 delay),
 339             TP_ARGS(tsk, delay));
 340
 341/*
 342 * Tracepoint for accounting sleep time (time the task is not runnable,
 343 * including iowait, see below).
 344 */
 345DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
 346             TP_PROTO(struct task_struct *tsk, u64 delay),
 347             TP_ARGS(tsk, delay));
 348
 349/*
 350 * Tracepoint for accounting iowait time (time the task is not runnable
 351 * due to waiting on IO to complete).
 352 */
 353DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
 354             TP_PROTO(struct task_struct *tsk, u64 delay),
 355             TP_ARGS(tsk, delay));
 356
 357/*
 358 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
 359 */
 360DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
 361             TP_PROTO(struct task_struct *tsk, u64 delay),
 362             TP_ARGS(tsk, delay));
 363
 364/*
 365 * Tracepoint for accounting runtime (time the task is executing
 366 * on a CPU).
 367 */
 368DECLARE_EVENT_CLASS(sched_stat_runtime,
 369
 370        TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
 371
 372        TP_ARGS(tsk, __perf_count(runtime), vruntime),
 373
 374        TP_STRUCT__entry(
 375                __array( char,  comm,   TASK_COMM_LEN   )
 376                __field( pid_t, pid                     )
 377                __field( u64,   runtime                 )
 378                __field( u64,   vruntime                        )
 379        ),
 380
 381        TP_fast_assign(
 382                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 383                __entry->pid            = tsk->pid;
 384                __entry->runtime        = runtime;
 385                __entry->vruntime       = vruntime;
 386        ),
 387
 388        TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
 389                        __entry->comm, __entry->pid,
 390                        (unsigned long long)__entry->runtime,
 391                        (unsigned long long)__entry->vruntime)
 392);
 393
 394DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
 395             TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
 396             TP_ARGS(tsk, runtime, vruntime));
 397
 398/*
 399 * Tracepoint for showing priority inheritance modifying a tasks
 400 * priority.
 401 */
 402TRACE_EVENT(sched_pi_setprio,
 403
 404        TP_PROTO(struct task_struct *tsk, int newprio),
 405
 406        TP_ARGS(tsk, newprio),
 407
 408        TP_STRUCT__entry(
 409                __array( char,  comm,   TASK_COMM_LEN   )
 410                __field( pid_t, pid                     )
 411                __field( int,   oldprio                 )
 412                __field( int,   newprio                 )
 413        ),
 414
 415        TP_fast_assign(
 416                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 417                __entry->pid            = tsk->pid;
 418                __entry->oldprio        = tsk->prio;
 419                __entry->newprio        = newprio;
 420        ),
 421
 422        TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
 423                        __entry->comm, __entry->pid,
 424                        __entry->oldprio, __entry->newprio)
 425);
 426
 427#ifdef CONFIG_DETECT_HUNG_TASK
 428TRACE_EVENT(sched_process_hang,
 429        TP_PROTO(struct task_struct *tsk),
 430        TP_ARGS(tsk),
 431
 432        TP_STRUCT__entry(
 433                __array( char,  comm,   TASK_COMM_LEN   )
 434                __field( pid_t, pid                     )
 435        ),
 436
 437        TP_fast_assign(
 438                memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
 439                __entry->pid = tsk->pid;
 440        ),
 441
 442        TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
 443);
 444#endif /* CONFIG_DETECT_HUNG_TASK */
 445
 446DECLARE_EVENT_CLASS(sched_move_task_template,
 447
 448        TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
 449
 450        TP_ARGS(tsk, src_cpu, dst_cpu),
 451
 452        TP_STRUCT__entry(
 453                __field( pid_t, pid                     )
 454                __field( pid_t, tgid                    )
 455                __field( pid_t, ngid                    )
 456                __field( int,   src_cpu                 )
 457                __field( int,   src_nid                 )
 458                __field( int,   dst_cpu                 )
 459                __field( int,   dst_nid                 )
 460        ),
 461
 462        TP_fast_assign(
 463                __entry->pid            = task_pid_nr(tsk);
 464                __entry->tgid           = task_tgid_nr(tsk);
 465                __entry->ngid           = task_numa_group_id(tsk);
 466                __entry->src_cpu        = src_cpu;
 467                __entry->src_nid        = cpu_to_node(src_cpu);
 468                __entry->dst_cpu        = dst_cpu;
 469                __entry->dst_nid        = cpu_to_node(dst_cpu);
 470        ),
 471
 472        TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
 473                        __entry->pid, __entry->tgid, __entry->ngid,
 474                        __entry->src_cpu, __entry->src_nid,
 475                        __entry->dst_cpu, __entry->dst_nid)
 476);
 477
 478/*
 479 * Tracks migration of tasks from one runqueue to another. Can be used to
 480 * detect if automatic NUMA balancing is bouncing between nodes
 481 */
 482DEFINE_EVENT(sched_move_task_template, sched_move_numa,
 483        TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
 484
 485        TP_ARGS(tsk, src_cpu, dst_cpu)
 486);
 487
 488DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
 489        TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
 490
 491        TP_ARGS(tsk, src_cpu, dst_cpu)
 492);
 493
 494TRACE_EVENT(sched_swap_numa,
 495
 496        TP_PROTO(struct task_struct *src_tsk, int src_cpu,
 497                 struct task_struct *dst_tsk, int dst_cpu),
 498
 499        TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
 500
 501        TP_STRUCT__entry(
 502                __field( pid_t, src_pid                 )
 503                __field( pid_t, src_tgid                )
 504                __field( pid_t, src_ngid                )
 505                __field( int,   src_cpu                 )
 506                __field( int,   src_nid                 )
 507                __field( pid_t, dst_pid                 )
 508                __field( pid_t, dst_tgid                )
 509                __field( pid_t, dst_ngid                )
 510                __field( int,   dst_cpu                 )
 511                __field( int,   dst_nid                 )
 512        ),
 513
 514        TP_fast_assign(
 515                __entry->src_pid        = task_pid_nr(src_tsk);
 516                __entry->src_tgid       = task_tgid_nr(src_tsk);
 517                __entry->src_ngid       = task_numa_group_id(src_tsk);
 518                __entry->src_cpu        = src_cpu;
 519                __entry->src_nid        = cpu_to_node(src_cpu);
 520                __entry->dst_pid        = task_pid_nr(dst_tsk);
 521                __entry->dst_tgid       = task_tgid_nr(dst_tsk);
 522                __entry->dst_ngid       = task_numa_group_id(dst_tsk);
 523                __entry->dst_cpu        = dst_cpu;
 524                __entry->dst_nid        = cpu_to_node(dst_cpu);
 525        ),
 526
 527        TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
 528                        __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
 529                        __entry->src_cpu, __entry->src_nid,
 530                        __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
 531                        __entry->dst_cpu, __entry->dst_nid)
 532);
 533#endif /* _TRACE_SCHED_H */
 534
 535/* This part must be outside protection */
 536#include <trace/define_trace.h>
 537