linux/kernel/sched/stats.c
<<
>>
Prefs
   1
   2#include <linux/slab.h>
   3#include <linux/fs.h>
   4#include <linux/seq_file.h>
   5#include <linux/proc_fs.h>
   6
   7#include "sched.h"
   8
   9/*
  10 * bump this up when changing the output format or the meaning of an existing
  11 * format, so that tools can adapt (or abort)
  12 */
  13#define SCHEDSTAT_VERSION 15
  14
  15static int show_schedstat(struct seq_file *seq, void *v)
  16{
  17        int cpu;
  18        int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
  19        char *mask_str = kmalloc(mask_len, GFP_KERNEL);
  20
  21        if (mask_str == NULL)
  22                return -ENOMEM;
  23
  24        if (v == (void *)1) {
  25                seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
  26                seq_printf(seq, "timestamp %lu\n", jiffies);
  27        } else {
  28                struct rq *rq;
  29#ifdef CONFIG_SMP
  30                struct sched_domain *sd;
  31                int dcount = 0;
  32#endif
  33                cpu = (unsigned long)(v - 2);
  34                rq = cpu_rq(cpu);
  35
  36                /* runqueue-specific stats */
  37                seq_printf(seq,
  38                    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
  39                    cpu, rq->yld_count,
  40                    rq->sched_count, rq->sched_goidle,
  41                    rq->ttwu_count, rq->ttwu_local,
  42                    rq->rq_cpu_time,
  43                    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
  44
  45                seq_printf(seq, "\n");
  46
  47#ifdef CONFIG_SMP
  48                /* domain-specific stats */
  49                rcu_read_lock();
  50                for_each_domain(cpu, sd) {
  51                        enum cpu_idle_type itype;
  52
  53                        cpumask_scnprintf(mask_str, mask_len,
  54                                          sched_domain_span(sd));
  55                        seq_printf(seq, "domain%d %s", dcount++, mask_str);
  56                        for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
  57                                        itype++) {
  58                                seq_printf(seq, " %u %u %u %u %u %u %u %u",
  59                                    sd->lb_count[itype],
  60                                    sd->lb_balanced[itype],
  61                                    sd->lb_failed[itype],
  62                                    sd->lb_imbalance[itype],
  63                                    sd->lb_gained[itype],
  64                                    sd->lb_hot_gained[itype],
  65                                    sd->lb_nobusyq[itype],
  66                                    sd->lb_nobusyg[itype]);
  67                        }
  68                        seq_printf(seq,
  69                                   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
  70                            sd->alb_count, sd->alb_failed, sd->alb_pushed,
  71                            sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
  72                            sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
  73                            sd->ttwu_wake_remote, sd->ttwu_move_affine,
  74                            sd->ttwu_move_balance);
  75                }
  76                rcu_read_unlock();
  77#endif
  78        }
  79        kfree(mask_str);
  80        return 0;
  81}
  82
  83/*
  84 * This itererator needs some explanation.
  85 * It returns 1 for the header position.
  86 * This means 2 is cpu 0.
  87 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
  88 * to use cpumask_* to iterate over the cpus.
  89 */
  90static void *schedstat_start(struct seq_file *file, loff_t *offset)
  91{
  92        unsigned long n = *offset;
  93
  94        if (n == 0)
  95                return (void *) 1;
  96
  97        n--;
  98
  99        if (n > 0)
 100                n = cpumask_next(n - 1, cpu_online_mask);
 101        else
 102                n = cpumask_first(cpu_online_mask);
 103
 104        *offset = n + 1;
 105
 106        if (n < nr_cpu_ids)
 107                return (void *)(unsigned long)(n + 2);
 108        return NULL;
 109}
 110
 111static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
 112{
 113        (*offset)++;
 114        return schedstat_start(file, offset);
 115}
 116
 117static void schedstat_stop(struct seq_file *file, void *data)
 118{
 119}
 120
 121static const struct seq_operations schedstat_sops = {
 122        .start = schedstat_start,
 123        .next  = schedstat_next,
 124        .stop  = schedstat_stop,
 125        .show  = show_schedstat,
 126};
 127
 128static int schedstat_open(struct inode *inode, struct file *file)
 129{
 130        return seq_open(file, &schedstat_sops);
 131}
 132
 133static const struct file_operations proc_schedstat_operations = {
 134        .open    = schedstat_open,
 135        .read    = seq_read,
 136        .llseek  = seq_lseek,
 137        .release = seq_release,
 138};
 139
 140static int __init proc_schedstat_init(void)
 141{
 142        proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
 143        return 0;
 144}
 145module_init(proc_schedstat_init);
 146