linux/kernel/sched/debug.c
<<
>>
Prefs
   1/*
   2 * kernel/sched/debug.c
   3 *
   4 * Print the CFS rbtree
   5 *
   6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/proc_fs.h>
  14#include <linux/sched.h>
  15#include <linux/seq_file.h>
  16#include <linux/kallsyms.h>
  17#include <linux/utsname.h>
  18#include <linux/mempolicy.h>
  19#include <linux/debugfs.h>
  20
  21#include "sched.h"
  22
  23static DEFINE_SPINLOCK(sched_debug_lock);
  24
  25/*
  26 * This allows printing both to /proc/sched_debug and
  27 * to the console
  28 */
  29#define SEQ_printf(m, x...)                     \
  30 do {                                           \
  31        if (m)                                  \
  32                seq_printf(m, x);               \
  33        else                                    \
  34                printk(x);                      \
  35 } while (0)
  36
  37/*
  38 * Ease the printing of nsec fields:
  39 */
  40static long long nsec_high(unsigned long long nsec)
  41{
  42        if ((long long)nsec < 0) {
  43                nsec = -nsec;
  44                do_div(nsec, 1000000);
  45                return -nsec;
  46        }
  47        do_div(nsec, 1000000);
  48
  49        return nsec;
  50}
  51
  52static unsigned long nsec_low(unsigned long long nsec)
  53{
  54        if ((long long)nsec < 0)
  55                nsec = -nsec;
  56
  57        return do_div(nsec, 1000000);
  58}
  59
  60#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
  61
  62#define SCHED_FEAT(name, enabled)       \
  63        #name ,
  64
  65static const char * const sched_feat_names[] = {
  66#include "features.h"
  67};
  68
  69#undef SCHED_FEAT
  70
  71static int sched_feat_show(struct seq_file *m, void *v)
  72{
  73        int i;
  74
  75        for (i = 0; i < __SCHED_FEAT_NR; i++) {
  76                if (!(sysctl_sched_features & (1UL << i)))
  77                        seq_puts(m, "NO_");
  78                seq_printf(m, "%s ", sched_feat_names[i]);
  79        }
  80        seq_puts(m, "\n");
  81
  82        return 0;
  83}
  84
  85#ifdef HAVE_JUMP_LABEL
  86
  87#define jump_label_key__true  STATIC_KEY_INIT_TRUE
  88#define jump_label_key__false STATIC_KEY_INIT_FALSE
  89
  90#define SCHED_FEAT(name, enabled)       \
  91        jump_label_key__##enabled ,
  92
  93struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
  94#include "features.h"
  95};
  96
  97#undef SCHED_FEAT
  98
  99static void sched_feat_disable(int i)
 100{
 101        static_key_disable(&sched_feat_keys[i]);
 102}
 103
 104static void sched_feat_enable(int i)
 105{
 106        static_key_enable(&sched_feat_keys[i]);
 107}
 108#else
 109static void sched_feat_disable(int i) { };
 110static void sched_feat_enable(int i) { };
 111#endif /* HAVE_JUMP_LABEL */
 112
 113static int sched_feat_set(char *cmp)
 114{
 115        int i;
 116        int neg = 0;
 117
 118        if (strncmp(cmp, "NO_", 3) == 0) {
 119                neg = 1;
 120                cmp += 3;
 121        }
 122
 123        for (i = 0; i < __SCHED_FEAT_NR; i++) {
 124                if (strcmp(cmp, sched_feat_names[i]) == 0) {
 125                        if (neg) {
 126                                sysctl_sched_features &= ~(1UL << i);
 127                                sched_feat_disable(i);
 128                        } else {
 129                                sysctl_sched_features |= (1UL << i);
 130                                sched_feat_enable(i);
 131                        }
 132                        break;
 133                }
 134        }
 135
 136        return i;
 137}
 138
 139static ssize_t
 140sched_feat_write(struct file *filp, const char __user *ubuf,
 141                size_t cnt, loff_t *ppos)
 142{
 143        char buf[64];
 144        char *cmp;
 145        int i;
 146        struct inode *inode;
 147
 148        if (cnt > 63)
 149                cnt = 63;
 150
 151        if (copy_from_user(&buf, ubuf, cnt))
 152                return -EFAULT;
 153
 154        buf[cnt] = 0;
 155        cmp = strstrip(buf);
 156
 157        /* Ensure the static_key remains in a consistent state */
 158        inode = file_inode(filp);
 159        inode_lock(inode);
 160        i = sched_feat_set(cmp);
 161        inode_unlock(inode);
 162        if (i == __SCHED_FEAT_NR)
 163                return -EINVAL;
 164
 165        *ppos += cnt;
 166
 167        return cnt;
 168}
 169
 170static int sched_feat_open(struct inode *inode, struct file *filp)
 171{
 172        return single_open(filp, sched_feat_show, NULL);
 173}
 174
 175static const struct file_operations sched_feat_fops = {
 176        .open           = sched_feat_open,
 177        .write          = sched_feat_write,
 178        .read           = seq_read,
 179        .llseek         = seq_lseek,
 180        .release        = single_release,
 181};
 182
 183static __init int sched_init_debug(void)
 184{
 185        debugfs_create_file("sched_features", 0644, NULL, NULL,
 186                        &sched_feat_fops);
 187
 188        return 0;
 189}
 190late_initcall(sched_init_debug);
 191
 192#ifdef CONFIG_SMP
 193
 194#ifdef CONFIG_SYSCTL
 195
 196static struct ctl_table sd_ctl_dir[] = {
 197        {
 198                .procname       = "sched_domain",
 199                .mode           = 0555,
 200        },
 201        {}
 202};
 203
 204static struct ctl_table sd_ctl_root[] = {
 205        {
 206                .procname       = "kernel",
 207                .mode           = 0555,
 208                .child          = sd_ctl_dir,
 209        },
 210        {}
 211};
 212
 213static struct ctl_table *sd_alloc_ctl_entry(int n)
 214{
 215        struct ctl_table *entry =
 216                kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
 217
 218        return entry;
 219}
 220
 221static void sd_free_ctl_entry(struct ctl_table **tablep)
 222{
 223        struct ctl_table *entry;
 224
 225        /*
 226         * In the intermediate directories, both the child directory and
 227         * procname are dynamically allocated and could fail but the mode
 228         * will always be set. In the lowest directory the names are
 229         * static strings and all have proc handlers.
 230         */
 231        for (entry = *tablep; entry->mode; entry++) {
 232                if (entry->child)
 233                        sd_free_ctl_entry(&entry->child);
 234                if (entry->proc_handler == NULL)
 235                        kfree(entry->procname);
 236        }
 237
 238        kfree(*tablep);
 239        *tablep = NULL;
 240}
 241
 242static int min_load_idx = 0;
 243static int max_load_idx = CPU_LOAD_IDX_MAX-1;
 244
 245static void
 246set_table_entry(struct ctl_table *entry,
 247                const char *procname, void *data, int maxlen,
 248                umode_t mode, proc_handler *proc_handler,
 249                bool load_idx)
 250{
 251        entry->procname = procname;
 252        entry->data = data;
 253        entry->maxlen = maxlen;
 254        entry->mode = mode;
 255        entry->proc_handler = proc_handler;
 256
 257        if (load_idx) {
 258                entry->extra1 = &min_load_idx;
 259                entry->extra2 = &max_load_idx;
 260        }
 261}
 262
 263static struct ctl_table *
 264sd_alloc_ctl_domain_table(struct sched_domain *sd)
 265{
 266        struct ctl_table *table = sd_alloc_ctl_entry(14);
 267
 268        if (table == NULL)
 269                return NULL;
 270
 271        set_table_entry(&table[0], "min_interval", &sd->min_interval,
 272                sizeof(long), 0644, proc_doulongvec_minmax, false);
 273        set_table_entry(&table[1], "max_interval", &sd->max_interval,
 274                sizeof(long), 0644, proc_doulongvec_minmax, false);
 275        set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
 276                sizeof(int), 0644, proc_dointvec_minmax, true);
 277        set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
 278                sizeof(int), 0644, proc_dointvec_minmax, true);
 279        set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
 280                sizeof(int), 0644, proc_dointvec_minmax, true);
 281        set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
 282                sizeof(int), 0644, proc_dointvec_minmax, true);
 283        set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
 284                sizeof(int), 0644, proc_dointvec_minmax, true);
 285        set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
 286                sizeof(int), 0644, proc_dointvec_minmax, false);
 287        set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
 288                sizeof(int), 0644, proc_dointvec_minmax, false);
 289        set_table_entry(&table[9], "cache_nice_tries",
 290                &sd->cache_nice_tries,
 291                sizeof(int), 0644, proc_dointvec_minmax, false);
 292        set_table_entry(&table[10], "flags", &sd->flags,
 293                sizeof(int), 0644, proc_dointvec_minmax, false);
 294        set_table_entry(&table[11], "max_newidle_lb_cost",
 295                &sd->max_newidle_lb_cost,
 296                sizeof(long), 0644, proc_doulongvec_minmax, false);
 297        set_table_entry(&table[12], "name", sd->name,
 298                CORENAME_MAX_SIZE, 0444, proc_dostring, false);
 299        /* &table[13] is terminator */
 300
 301        return table;
 302}
 303
 304static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 305{
 306        struct ctl_table *entry, *table;
 307        struct sched_domain *sd;
 308        int domain_num = 0, i;
 309        char buf[32];
 310
 311        for_each_domain(cpu, sd)
 312                domain_num++;
 313        entry = table = sd_alloc_ctl_entry(domain_num + 1);
 314        if (table == NULL)
 315                return NULL;
 316
 317        i = 0;
 318        for_each_domain(cpu, sd) {
 319                snprintf(buf, 32, "domain%d", i);
 320                entry->procname = kstrdup(buf, GFP_KERNEL);
 321                entry->mode = 0555;
 322                entry->child = sd_alloc_ctl_domain_table(sd);
 323                entry++;
 324                i++;
 325        }
 326        return table;
 327}
 328
 329static struct ctl_table_header *sd_sysctl_header;
 330void register_sched_domain_sysctl(void)
 331{
 332        int i, cpu_num = num_possible_cpus();
 333        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
 334        char buf[32];
 335
 336        WARN_ON(sd_ctl_dir[0].child);
 337        sd_ctl_dir[0].child = entry;
 338
 339        if (entry == NULL)
 340                return;
 341
 342        for_each_possible_cpu(i) {
 343                snprintf(buf, 32, "cpu%d", i);
 344                entry->procname = kstrdup(buf, GFP_KERNEL);
 345                entry->mode = 0555;
 346                entry->child = sd_alloc_ctl_cpu_table(i);
 347                entry++;
 348        }
 349
 350        WARN_ON(sd_sysctl_header);
 351        sd_sysctl_header = register_sysctl_table(sd_ctl_root);
 352}
 353
 354/* may be called multiple times per register */
 355void unregister_sched_domain_sysctl(void)
 356{
 357        unregister_sysctl_table(sd_sysctl_header);
 358        sd_sysctl_header = NULL;
 359        if (sd_ctl_dir[0].child)
 360                sd_free_ctl_entry(&sd_ctl_dir[0].child);
 361}
 362#endif /* CONFIG_SYSCTL */
 363#endif /* CONFIG_SMP */
 364
 365#ifdef CONFIG_FAIR_GROUP_SCHED
 366static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
 367{
 368        struct sched_entity *se = tg->se[cpu];
 369
 370#define P(F) \
 371        SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
 372#define PN(F) \
 373        SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 374
 375        if (!se)
 376                return;
 377
 378        PN(se->exec_start);
 379        PN(se->vruntime);
 380        PN(se->sum_exec_runtime);
 381#ifdef CONFIG_SCHEDSTATS
 382        if (schedstat_enabled()) {
 383                PN(se->statistics.wait_start);
 384                PN(se->statistics.sleep_start);
 385                PN(se->statistics.block_start);
 386                PN(se->statistics.sleep_max);
 387                PN(se->statistics.block_max);
 388                PN(se->statistics.exec_max);
 389                PN(se->statistics.slice_max);
 390                PN(se->statistics.wait_max);
 391                PN(se->statistics.wait_sum);
 392                P(se->statistics.wait_count);
 393        }
 394#endif
 395        P(se->load.weight);
 396#ifdef CONFIG_SMP
 397        P(se->avg.load_avg);
 398        P(se->avg.util_avg);
 399#endif
 400#undef PN
 401#undef P
 402}
 403#endif
 404
 405#ifdef CONFIG_CGROUP_SCHED
 406static char group_path[PATH_MAX];
 407
 408static char *task_group_path(struct task_group *tg)
 409{
 410        if (autogroup_path(tg, group_path, PATH_MAX))
 411                return group_path;
 412
 413        return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
 414}
 415#endif
 416
 417static void
 418print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 419{
 420        if (rq->curr == p)
 421                SEQ_printf(m, "R");
 422        else
 423                SEQ_printf(m, " ");
 424
 425        SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
 426                p->comm, task_pid_nr(p),
 427                SPLIT_NS(p->se.vruntime),
 428                (long long)(p->nvcsw + p->nivcsw),
 429                p->prio);
 430#ifdef CONFIG_SCHEDSTATS
 431        if (schedstat_enabled()) {
 432                SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
 433                        SPLIT_NS(p->se.statistics.wait_sum),
 434                        SPLIT_NS(p->se.sum_exec_runtime),
 435                        SPLIT_NS(p->se.statistics.sum_sleep_runtime));
 436        }
 437#else
 438        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
 439                0LL, 0L,
 440                SPLIT_NS(p->se.sum_exec_runtime),
 441                0LL, 0L);
 442#endif
 443#ifdef CONFIG_NUMA_BALANCING
 444        SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 445#endif
 446#ifdef CONFIG_CGROUP_SCHED
 447        SEQ_printf(m, " %s", task_group_path(task_group(p)));
 448#endif
 449
 450        SEQ_printf(m, "\n");
 451}
 452
 453static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
 454{
 455        struct task_struct *g, *p;
 456
 457        SEQ_printf(m,
 458        "\nrunnable tasks:\n"
 459        "            task   PID         tree-key  switches  prio"
 460        "     wait-time             sum-exec        sum-sleep\n"
 461        "------------------------------------------------------"
 462        "----------------------------------------------------\n");
 463
 464        rcu_read_lock();
 465        for_each_process_thread(g, p) {
 466                if (task_cpu(p) != rq_cpu)
 467                        continue;
 468
 469                print_task(m, rq, p);
 470        }
 471        rcu_read_unlock();
 472}
 473
 474void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 475{
 476        s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
 477                spread, rq0_min_vruntime, spread0;
 478        struct rq *rq = cpu_rq(cpu);
 479        struct sched_entity *last;
 480        unsigned long flags;
 481
 482#ifdef CONFIG_FAIR_GROUP_SCHED
 483        SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
 484#else
 485        SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
 486#endif
 487        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
 488                        SPLIT_NS(cfs_rq->exec_clock));
 489
 490        raw_spin_lock_irqsave(&rq->lock, flags);
 491        if (cfs_rq->rb_leftmost)
 492                MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
 493        last = __pick_last_entity(cfs_rq);
 494        if (last)
 495                max_vruntime = last->vruntime;
 496        min_vruntime = cfs_rq->min_vruntime;
 497        rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
 498        raw_spin_unlock_irqrestore(&rq->lock, flags);
 499        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
 500                        SPLIT_NS(MIN_vruntime));
 501        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
 502                        SPLIT_NS(min_vruntime));
 503        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
 504                        SPLIT_NS(max_vruntime));
 505        spread = max_vruntime - MIN_vruntime;
 506        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
 507                        SPLIT_NS(spread));
 508        spread0 = min_vruntime - rq0_min_vruntime;
 509        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
 510                        SPLIT_NS(spread0));
 511        SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
 512                        cfs_rq->nr_spread_over);
 513        SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 514        SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 515#ifdef CONFIG_SMP
 516        SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
 517                        cfs_rq->avg.load_avg);
 518        SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
 519                        cfs_rq->runnable_load_avg);
 520        SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
 521                        cfs_rq->avg.util_avg);
 522        SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
 523                        atomic_long_read(&cfs_rq->removed_load_avg));
 524        SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
 525                        atomic_long_read(&cfs_rq->removed_util_avg));
 526#ifdef CONFIG_FAIR_GROUP_SCHED
 527        SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
 528                        cfs_rq->tg_load_avg_contrib);
 529        SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
 530                        atomic_long_read(&cfs_rq->tg->load_avg));
 531#endif
 532#endif
 533#ifdef CONFIG_CFS_BANDWIDTH
 534        SEQ_printf(m, "  .%-30s: %d\n", "throttled",
 535                        cfs_rq->throttled);
 536        SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
 537                        cfs_rq->throttle_count);
 538#endif
 539
 540#ifdef CONFIG_FAIR_GROUP_SCHED
 541        print_cfs_group_stats(m, cpu, cfs_rq->tg);
 542#endif
 543}
 544
 545void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 546{
 547#ifdef CONFIG_RT_GROUP_SCHED
 548        SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
 549#else
 550        SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
 551#endif
 552
 553#define P(x) \
 554        SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
 555#define PN(x) \
 556        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
 557
 558        P(rt_nr_running);
 559        P(rt_throttled);
 560        PN(rt_time);
 561        PN(rt_runtime);
 562
 563#undef PN
 564#undef P
 565}
 566
 567void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
 568{
 569        struct dl_bw *dl_bw;
 570
 571        SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
 572        SEQ_printf(m, "  .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
 573#ifdef CONFIG_SMP
 574        dl_bw = &cpu_rq(cpu)->rd->dl_bw;
 575#else
 576        dl_bw = &dl_rq->dl_bw;
 577#endif
 578        SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
 579        SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
 580}
 581
 582extern __read_mostly int sched_clock_running;
 583
 584static void print_cpu(struct seq_file *m, int cpu)
 585{
 586        struct rq *rq = cpu_rq(cpu);
 587        unsigned long flags;
 588
 589#ifdef CONFIG_X86
 590        {
 591                unsigned int freq = cpu_khz ? : 1;
 592
 593                SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
 594                           cpu, freq / 1000, (freq % 1000));
 595        }
 596#else
 597        SEQ_printf(m, "cpu#%d\n", cpu);
 598#endif
 599
 600#define P(x)                                                            \
 601do {                                                                    \
 602        if (sizeof(rq->x) == 4)                                         \
 603                SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));    \
 604        else                                                            \
 605                SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
 606} while (0)
 607
 608#define PN(x) \
 609        SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
 610
 611        P(nr_running);
 612        SEQ_printf(m, "  .%-30s: %lu\n", "load",
 613                   rq->load.weight);
 614        P(nr_switches);
 615        P(nr_load_updates);
 616        P(nr_uninterruptible);
 617        PN(next_balance);
 618        SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
 619        PN(clock);
 620        PN(clock_task);
 621        P(cpu_load[0]);
 622        P(cpu_load[1]);
 623        P(cpu_load[2]);
 624        P(cpu_load[3]);
 625        P(cpu_load[4]);
 626#undef P
 627#undef PN
 628
 629#ifdef CONFIG_SCHEDSTATS
 630#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
 631#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
 632
 633#ifdef CONFIG_SMP
 634        P64(avg_idle);
 635        P64(max_idle_balance_cost);
 636#endif
 637
 638        if (schedstat_enabled()) {
 639                P(yld_count);
 640                P(sched_count);
 641                P(sched_goidle);
 642                P(ttwu_count);
 643                P(ttwu_local);
 644        }
 645
 646#undef P
 647#undef P64
 648#endif
 649        spin_lock_irqsave(&sched_debug_lock, flags);
 650        print_cfs_stats(m, cpu);
 651        print_rt_stats(m, cpu);
 652        print_dl_stats(m, cpu);
 653
 654        print_rq(m, rq, cpu);
 655        spin_unlock_irqrestore(&sched_debug_lock, flags);
 656        SEQ_printf(m, "\n");
 657}
 658
 659static const char *sched_tunable_scaling_names[] = {
 660        "none",
 661        "logaritmic",
 662        "linear"
 663};
 664
 665static void sched_debug_header(struct seq_file *m)
 666{
 667        u64 ktime, sched_clk, cpu_clk;
 668        unsigned long flags;
 669
 670        local_irq_save(flags);
 671        ktime = ktime_to_ns(ktime_get());
 672        sched_clk = sched_clock();
 673        cpu_clk = local_clock();
 674        local_irq_restore(flags);
 675
 676        SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
 677                init_utsname()->release,
 678                (int)strcspn(init_utsname()->version, " "),
 679                init_utsname()->version);
 680
 681#define P(x) \
 682        SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
 683#define PN(x) \
 684        SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 685        PN(ktime);
 686        PN(sched_clk);
 687        PN(cpu_clk);
 688        P(jiffies);
 689#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 690        P(sched_clock_stable());
 691#endif
 692#undef PN
 693#undef P
 694
 695        SEQ_printf(m, "\n");
 696        SEQ_printf(m, "sysctl_sched\n");
 697
 698#define P(x) \
 699        SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 700#define PN(x) \
 701        SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 702        PN(sysctl_sched_latency);
 703        PN(sysctl_sched_min_granularity);
 704        PN(sysctl_sched_wakeup_granularity);
 705        P(sysctl_sched_child_runs_first);
 706        P(sysctl_sched_features);
 707#undef PN
 708#undef P
 709
 710        SEQ_printf(m, "  .%-40s: %d (%s)\n",
 711                "sysctl_sched_tunable_scaling",
 712                sysctl_sched_tunable_scaling,
 713                sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
 714        SEQ_printf(m, "\n");
 715}
 716
 717static int sched_debug_show(struct seq_file *m, void *v)
 718{
 719        int cpu = (unsigned long)(v - 2);
 720
 721        if (cpu != -1)
 722                print_cpu(m, cpu);
 723        else
 724                sched_debug_header(m);
 725
 726        return 0;
 727}
 728
 729void sysrq_sched_debug_show(void)
 730{
 731        int cpu;
 732
 733        sched_debug_header(NULL);
 734        for_each_online_cpu(cpu)
 735                print_cpu(NULL, cpu);
 736
 737}
 738
 739/*
 740 * This itererator needs some explanation.
 741 * It returns 1 for the header position.
 742 * This means 2 is cpu 0.
 743 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
 744 * to use cpumask_* to iterate over the cpus.
 745 */
 746static void *sched_debug_start(struct seq_file *file, loff_t *offset)
 747{
 748        unsigned long n = *offset;
 749
 750        if (n == 0)
 751                return (void *) 1;
 752
 753        n--;
 754
 755        if (n > 0)
 756                n = cpumask_next(n - 1, cpu_online_mask);
 757        else
 758                n = cpumask_first(cpu_online_mask);
 759
 760        *offset = n + 1;
 761
 762        if (n < nr_cpu_ids)
 763                return (void *)(unsigned long)(n + 2);
 764        return NULL;
 765}
 766
 767static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
 768{
 769        (*offset)++;
 770        return sched_debug_start(file, offset);
 771}
 772
 773static void sched_debug_stop(struct seq_file *file, void *data)
 774{
 775}
 776
 777static const struct seq_operations sched_debug_sops = {
 778        .start = sched_debug_start,
 779        .next = sched_debug_next,
 780        .stop = sched_debug_stop,
 781        .show = sched_debug_show,
 782};
 783
 784static int sched_debug_release(struct inode *inode, struct file *file)
 785{
 786        seq_release(inode, file);
 787
 788        return 0;
 789}
 790
 791static int sched_debug_open(struct inode *inode, struct file *filp)
 792{
 793        int ret = 0;
 794
 795        ret = seq_open(filp, &sched_debug_sops);
 796
 797        return ret;
 798}
 799
 800static const struct file_operations sched_debug_fops = {
 801        .open           = sched_debug_open,
 802        .read           = seq_read,
 803        .llseek         = seq_lseek,
 804        .release        = sched_debug_release,
 805};
 806
 807static int __init init_sched_debug_procfs(void)
 808{
 809        struct proc_dir_entry *pe;
 810
 811        pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
 812        if (!pe)
 813                return -ENOMEM;
 814        return 0;
 815}
 816
 817__initcall(init_sched_debug_procfs);
 818
 819#define __P(F) \
 820        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
 821#define P(F) \
 822        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
 823#define __PN(F) \
 824        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
 825#define PN(F) \
 826        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 827
 828
 829#ifdef CONFIG_NUMA_BALANCING
 830void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
 831                unsigned long tpf, unsigned long gsf, unsigned long gpf)
 832{
 833        SEQ_printf(m, "numa_faults node=%d ", node);
 834        SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
 835        SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
 836}
 837#endif
 838
 839
 840static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 841{
 842#ifdef CONFIG_NUMA_BALANCING
 843        struct mempolicy *pol;
 844
 845        if (p->mm)
 846                P(mm->numa_scan_seq);
 847
 848        task_lock(p);
 849        pol = p->mempolicy;
 850        if (pol && !(pol->flags & MPOL_F_MORON))
 851                pol = NULL;
 852        mpol_get(pol);
 853        task_unlock(p);
 854
 855        P(numa_pages_migrated);
 856        P(numa_preferred_nid);
 857        P(total_numa_faults);
 858        SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
 859                        task_node(p), task_numa_group_id(p));
 860        show_numa_stats(p, m);
 861        mpol_put(pol);
 862#endif
 863}
 864
 865void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 866{
 867        unsigned long nr_switches;
 868
 869        SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
 870                                                get_nr_threads(p));
 871        SEQ_printf(m,
 872                "---------------------------------------------------------"
 873                "----------\n");
 874#define __P(F) \
 875        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
 876#define P(F) \
 877        SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
 878#define __PN(F) \
 879        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
 880#define PN(F) \
 881        SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 882
 883        PN(se.exec_start);
 884        PN(se.vruntime);
 885        PN(se.sum_exec_runtime);
 886
 887        nr_switches = p->nvcsw + p->nivcsw;
 888
 889#ifdef CONFIG_SCHEDSTATS
 890        P(se.nr_migrations);
 891
 892        if (schedstat_enabled()) {
 893                u64 avg_atom, avg_per_cpu;
 894
 895                PN(se.statistics.sum_sleep_runtime);
 896                PN(se.statistics.wait_start);
 897                PN(se.statistics.sleep_start);
 898                PN(se.statistics.block_start);
 899                PN(se.statistics.sleep_max);
 900                PN(se.statistics.block_max);
 901                PN(se.statistics.exec_max);
 902                PN(se.statistics.slice_max);
 903                PN(se.statistics.wait_max);
 904                PN(se.statistics.wait_sum);
 905                P(se.statistics.wait_count);
 906                PN(se.statistics.iowait_sum);
 907                P(se.statistics.iowait_count);
 908                P(se.statistics.nr_migrations_cold);
 909                P(se.statistics.nr_failed_migrations_affine);
 910                P(se.statistics.nr_failed_migrations_running);
 911                P(se.statistics.nr_failed_migrations_hot);
 912                P(se.statistics.nr_forced_migrations);
 913                P(se.statistics.nr_wakeups);
 914                P(se.statistics.nr_wakeups_sync);
 915                P(se.statistics.nr_wakeups_migrate);
 916                P(se.statistics.nr_wakeups_local);
 917                P(se.statistics.nr_wakeups_remote);
 918                P(se.statistics.nr_wakeups_affine);
 919                P(se.statistics.nr_wakeups_affine_attempts);
 920                P(se.statistics.nr_wakeups_passive);
 921                P(se.statistics.nr_wakeups_idle);
 922
 923                avg_atom = p->se.sum_exec_runtime;
 924                if (nr_switches)
 925                        avg_atom = div64_ul(avg_atom, nr_switches);
 926                else
 927                        avg_atom = -1LL;
 928
 929                avg_per_cpu = p->se.sum_exec_runtime;
 930                if (p->se.nr_migrations) {
 931                        avg_per_cpu = div64_u64(avg_per_cpu,
 932                                                p->se.nr_migrations);
 933                } else {
 934                        avg_per_cpu = -1LL;
 935                }
 936
 937                __PN(avg_atom);
 938                __PN(avg_per_cpu);
 939        }
 940#endif
 941        __P(nr_switches);
 942        SEQ_printf(m, "%-45s:%21Ld\n",
 943                   "nr_voluntary_switches", (long long)p->nvcsw);
 944        SEQ_printf(m, "%-45s:%21Ld\n",
 945                   "nr_involuntary_switches", (long long)p->nivcsw);
 946
 947        P(se.load.weight);
 948#ifdef CONFIG_SMP
 949        P(se.avg.load_sum);
 950        P(se.avg.util_sum);
 951        P(se.avg.load_avg);
 952        P(se.avg.util_avg);
 953        P(se.avg.last_update_time);
 954#endif
 955        P(policy);
 956        P(prio);
 957#undef PN
 958#undef __PN
 959#undef P
 960#undef __P
 961
 962        {
 963                unsigned int this_cpu = raw_smp_processor_id();
 964                u64 t0, t1;
 965
 966                t0 = cpu_clock(this_cpu);
 967                t1 = cpu_clock(this_cpu);
 968                SEQ_printf(m, "%-45s:%21Ld\n",
 969                           "clock-delta", (long long)(t1-t0));
 970        }
 971
 972        sched_show_numa(p, m);
 973}
 974
 975void proc_sched_set_task(struct task_struct *p)
 976{
 977#ifdef CONFIG_SCHEDSTATS
 978        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 979#endif
 980}
 981