1
2
3
4
5
6
7
8
9#include "sched.h"
10
11static DEFINE_SPINLOCK(sched_debug_lock);
12
13
14
15
16
17#define SEQ_printf(m, x...) \
18 do { \
19 if (m) \
20 seq_printf(m, x); \
21 else \
22 pr_cont(x); \
23 } while (0)
24
25
26
27
28static long long nsec_high(unsigned long long nsec)
29{
30 if ((long long)nsec < 0) {
31 nsec = -nsec;
32 do_div(nsec, 1000000);
33 return -nsec;
34 }
35 do_div(nsec, 1000000);
36
37 return nsec;
38}
39
40static unsigned long nsec_low(unsigned long long nsec)
41{
42 if ((long long)nsec < 0)
43 nsec = -nsec;
44
45 return do_div(nsec, 1000000);
46}
47
48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
50#define SCHED_FEAT(name, enabled) \
51 #name ,
52
53static const char * const sched_feat_names[] = {
54#include "features.h"
55};
56
57#undef SCHED_FEAT
58
59static int sched_feat_show(struct seq_file *m, void *v)
60{
61 int i;
62
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
65 seq_puts(m, "NO_");
66 seq_printf(m, "%s ", sched_feat_names[i]);
67 }
68 seq_puts(m, "\n");
69
70 return 0;
71}
72
73#ifdef CONFIG_JUMP_LABEL
74
75#define jump_label_key__true STATIC_KEY_INIT_TRUE
76#define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78#define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
80
81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82#include "features.h"
83};
84
85#undef SCHED_FEAT
86
87static void sched_feat_disable(int i)
88{
89 static_key_disable_cpuslocked(&sched_feat_keys[i]);
90}
91
92static void sched_feat_enable(int i)
93{
94 static_key_enable_cpuslocked(&sched_feat_keys[i]);
95}
96#else
97static void sched_feat_disable(int i) { };
98static void sched_feat_enable(int i) { };
99#endif
100
101static int sched_feat_set(char *cmp)
102{
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
121 }
122
123 return 0;
124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129{
130 char buf[64];
131 char *cmp;
132 int ret;
133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144
145 inode = file_inode(filp);
146 cpus_read_lock();
147 inode_lock(inode);
148 ret = sched_feat_set(cmp);
149 inode_unlock(inode);
150 cpus_read_unlock();
151 if (ret < 0)
152 return ret;
153
154 *ppos += cnt;
155
156 return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161 return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
172__read_mostly bool sched_debug_enabled;
173
174static __init int sched_init_debug(void)
175{
176 debugfs_create_file("sched_features", 0644, NULL, NULL,
177 &sched_feat_fops);
178
179 debugfs_create_bool("sched_debug", 0644, NULL,
180 &sched_debug_enabled);
181
182 return 0;
183}
184late_initcall(sched_init_debug);
185
186#ifdef CONFIG_SMP
187
188#ifdef CONFIG_SYSCTL
189
190static struct ctl_table sd_ctl_dir[] = {
191 {
192 .procname = "sched_domain",
193 .mode = 0555,
194 },
195 {}
196};
197
198static struct ctl_table sd_ctl_root[] = {
199 {
200 .procname = "kernel",
201 .mode = 0555,
202 .child = sd_ctl_dir,
203 },
204 {}
205};
206
207static struct ctl_table *sd_alloc_ctl_entry(int n)
208{
209 struct ctl_table *entry =
210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212 return entry;
213}
214
215static void sd_free_ctl_entry(struct ctl_table **tablep)
216{
217 struct ctl_table *entry;
218
219
220
221
222
223
224
225 for (entry = *tablep; entry->mode; entry++) {
226 if (entry->child)
227 sd_free_ctl_entry(&entry->child);
228 if (entry->proc_handler == NULL)
229 kfree(entry->procname);
230 }
231
232 kfree(*tablep);
233 *tablep = NULL;
234}
235
236static int min_load_idx = 0;
237static int max_load_idx = CPU_LOAD_IDX_MAX-1;
238
239static void
240set_table_entry(struct ctl_table *entry,
241 const char *procname, void *data, int maxlen,
242 umode_t mode, proc_handler *proc_handler,
243 bool load_idx)
244{
245 entry->procname = procname;
246 entry->data = data;
247 entry->maxlen = maxlen;
248 entry->mode = mode;
249 entry->proc_handler = proc_handler;
250
251 if (load_idx) {
252 entry->extra1 = &min_load_idx;
253 entry->extra2 = &max_load_idx;
254 }
255}
256
257static struct ctl_table *
258sd_alloc_ctl_domain_table(struct sched_domain *sd)
259{
260 struct ctl_table *table = sd_alloc_ctl_entry(14);
261
262 if (table == NULL)
263 return NULL;
264
265 set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
266 set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false);
267 set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
268 set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
269 set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
270 set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
271 set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true );
272 set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false);
273 set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false);
274 set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false);
275 set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false);
276 set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false);
277 set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false);
278
279
280 return table;
281}
282
283static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
284{
285 struct ctl_table *entry, *table;
286 struct sched_domain *sd;
287 int domain_num = 0, i;
288 char buf[32];
289
290 for_each_domain(cpu, sd)
291 domain_num++;
292 entry = table = sd_alloc_ctl_entry(domain_num + 1);
293 if (table == NULL)
294 return NULL;
295
296 i = 0;
297 for_each_domain(cpu, sd) {
298 snprintf(buf, 32, "domain%d", i);
299 entry->procname = kstrdup(buf, GFP_KERNEL);
300 entry->mode = 0555;
301 entry->child = sd_alloc_ctl_domain_table(sd);
302 entry++;
303 i++;
304 }
305 return table;
306}
307
308static cpumask_var_t sd_sysctl_cpus;
309static struct ctl_table_header *sd_sysctl_header;
310
311void register_sched_domain_sysctl(void)
312{
313 static struct ctl_table *cpu_entries;
314 static struct ctl_table **cpu_idx;
315 static bool init_done = false;
316 char buf[32];
317 int i;
318
319 if (!cpu_entries) {
320 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
321 if (!cpu_entries)
322 return;
323
324 WARN_ON(sd_ctl_dir[0].child);
325 sd_ctl_dir[0].child = cpu_entries;
326 }
327
328 if (!cpu_idx) {
329 struct ctl_table *e = cpu_entries;
330
331 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
332 if (!cpu_idx)
333 return;
334
335
336 for_each_possible_cpu(i) {
337 cpu_idx[i] = e;
338 e++;
339 }
340 }
341
342 if (!cpumask_available(sd_sysctl_cpus)) {
343 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
344 return;
345 }
346
347 if (!init_done) {
348 init_done = true;
349
350 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
351 }
352
353 for_each_cpu(i, sd_sysctl_cpus) {
354 struct ctl_table *e = cpu_idx[i];
355
356 if (e->child)
357 sd_free_ctl_entry(&e->child);
358
359 if (!e->procname) {
360 snprintf(buf, 32, "cpu%d", i);
361 e->procname = kstrdup(buf, GFP_KERNEL);
362 }
363 e->mode = 0555;
364 e->child = sd_alloc_ctl_cpu_table(i);
365
366 __cpumask_clear_cpu(i, sd_sysctl_cpus);
367 }
368
369 WARN_ON(sd_sysctl_header);
370 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
371}
372
373void dirty_sched_domain_sysctl(int cpu)
374{
375 if (cpumask_available(sd_sysctl_cpus))
376 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
377}
378
379
380void unregister_sched_domain_sysctl(void)
381{
382 unregister_sysctl_table(sd_sysctl_header);
383 sd_sysctl_header = NULL;
384}
385#endif
386#endif
387
388#ifdef CONFIG_FAIR_GROUP_SCHED
389static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
390{
391 struct sched_entity *se = tg->se[cpu];
392
393#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
394#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
395#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
396#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
397
398 if (!se)
399 return;
400
401 PN(se->exec_start);
402 PN(se->vruntime);
403 PN(se->sum_exec_runtime);
404
405 if (schedstat_enabled()) {
406 PN_SCHEDSTAT(se->statistics.wait_start);
407 PN_SCHEDSTAT(se->statistics.sleep_start);
408 PN_SCHEDSTAT(se->statistics.block_start);
409 PN_SCHEDSTAT(se->statistics.sleep_max);
410 PN_SCHEDSTAT(se->statistics.block_max);
411 PN_SCHEDSTAT(se->statistics.exec_max);
412 PN_SCHEDSTAT(se->statistics.slice_max);
413 PN_SCHEDSTAT(se->statistics.wait_max);
414 PN_SCHEDSTAT(se->statistics.wait_sum);
415 P_SCHEDSTAT(se->statistics.wait_count);
416 }
417
418 P(se->load.weight);
419 P(se->runnable_weight);
420#ifdef CONFIG_SMP
421 P(se->avg.load_avg);
422 P(se->avg.util_avg);
423 P(se->avg.runnable_load_avg);
424#endif
425
426#undef PN_SCHEDSTAT
427#undef PN
428#undef P_SCHEDSTAT
429#undef P
430}
431#endif
432
433#ifdef CONFIG_CGROUP_SCHED
434static char group_path[PATH_MAX];
435
436static char *task_group_path(struct task_group *tg)
437{
438 if (autogroup_path(tg, group_path, PATH_MAX))
439 return group_path;
440
441 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
442
443 return group_path;
444}
445#endif
446
447static void
448print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
449{
450 if (rq->curr == p)
451 SEQ_printf(m, ">R");
452 else
453 SEQ_printf(m, " %c", task_state_to_char(p));
454
455 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
456 p->comm, task_pid_nr(p),
457 SPLIT_NS(p->se.vruntime),
458 (long long)(p->nvcsw + p->nivcsw),
459 p->prio);
460
461 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
462 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
463 SPLIT_NS(p->se.sum_exec_runtime),
464 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
465
466#ifdef CONFIG_NUMA_BALANCING
467 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
468#endif
469#ifdef CONFIG_CGROUP_SCHED
470 SEQ_printf(m, " %s", task_group_path(task_group(p)));
471#endif
472
473 SEQ_printf(m, "\n");
474}
475
476static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
477{
478 struct task_struct *g, *p;
479
480 SEQ_printf(m, "\n");
481 SEQ_printf(m, "runnable tasks:\n");
482 SEQ_printf(m, " S task PID tree-key switches prio"
483 " wait-time sum-exec sum-sleep\n");
484 SEQ_printf(m, "-------------------------------------------------------"
485 "----------------------------------------------------\n");
486
487 rcu_read_lock();
488 for_each_process_thread(g, p) {
489 if (task_cpu(p) != rq_cpu)
490 continue;
491
492 print_task(m, rq, p);
493 }
494 rcu_read_unlock();
495}
496
497void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
498{
499 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
500 spread, rq0_min_vruntime, spread0;
501 struct rq *rq = cpu_rq(cpu);
502 struct sched_entity *last;
503 unsigned long flags;
504
505#ifdef CONFIG_FAIR_GROUP_SCHED
506 SEQ_printf(m, "\n");
507 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
508#else
509 SEQ_printf(m, "\n");
510 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
511#endif
512 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
513 SPLIT_NS(cfs_rq->exec_clock));
514
515 raw_spin_lock_irqsave(&rq->lock, flags);
516 if (rb_first_cached(&cfs_rq->tasks_timeline))
517 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
518 last = __pick_last_entity(cfs_rq);
519 if (last)
520 max_vruntime = last->vruntime;
521 min_vruntime = cfs_rq->min_vruntime;
522 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
523 raw_spin_unlock_irqrestore(&rq->lock, flags);
524 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
525 SPLIT_NS(MIN_vruntime));
526 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
527 SPLIT_NS(min_vruntime));
528 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
529 SPLIT_NS(max_vruntime));
530 spread = max_vruntime - MIN_vruntime;
531 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
532 SPLIT_NS(spread));
533 spread0 = min_vruntime - rq0_min_vruntime;
534 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
535 SPLIT_NS(spread0));
536 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
537 cfs_rq->nr_spread_over);
538 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
539 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
540#ifdef CONFIG_SMP
541 SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
542 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
543 cfs_rq->avg.load_avg);
544 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
545 cfs_rq->avg.runnable_load_avg);
546 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
547 cfs_rq->avg.util_avg);
548 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
549 cfs_rq->avg.util_est.enqueued);
550 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
551 cfs_rq->removed.load_avg);
552 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
553 cfs_rq->removed.util_avg);
554 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum",
555 cfs_rq->removed.runnable_sum);
556#ifdef CONFIG_FAIR_GROUP_SCHED
557 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
558 cfs_rq->tg_load_avg_contrib);
559 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
560 atomic_long_read(&cfs_rq->tg->load_avg));
561#endif
562#endif
563#ifdef CONFIG_CFS_BANDWIDTH
564 SEQ_printf(m, " .%-30s: %d\n", "throttled",
565 cfs_rq->throttled);
566 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
567 cfs_rq->throttle_count);
568#endif
569
570#ifdef CONFIG_FAIR_GROUP_SCHED
571 print_cfs_group_stats(m, cpu, cfs_rq->tg);
572#endif
573}
574
575void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
576{
577#ifdef CONFIG_RT_GROUP_SCHED
578 SEQ_printf(m, "\n");
579 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
580#else
581 SEQ_printf(m, "\n");
582 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
583#endif
584
585#define P(x) \
586 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
587#define PU(x) \
588 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
589#define PN(x) \
590 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
591
592 PU(rt_nr_running);
593#ifdef CONFIG_SMP
594 PU(rt_nr_migratory);
595#endif
596 P(rt_throttled);
597 PN(rt_time);
598 PN(rt_runtime);
599
600#undef PN
601#undef PU
602#undef P
603}
604
605void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
606{
607 struct dl_bw *dl_bw;
608
609 SEQ_printf(m, "\n");
610 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
611
612#define PU(x) \
613 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
614
615 PU(dl_nr_running);
616#ifdef CONFIG_SMP
617 PU(dl_nr_migratory);
618 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
619#else
620 dl_bw = &dl_rq->dl_bw;
621#endif
622 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
623 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
624
625#undef PU
626}
627
628static void print_cpu(struct seq_file *m, int cpu)
629{
630 struct rq *rq = cpu_rq(cpu);
631 unsigned long flags;
632
633#ifdef CONFIG_X86
634 {
635 unsigned int freq = cpu_khz ? : 1;
636
637 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
638 cpu, freq / 1000, (freq % 1000));
639 }
640#else
641 SEQ_printf(m, "cpu#%d\n", cpu);
642#endif
643
644#define P(x) \
645do { \
646 if (sizeof(rq->x) == 4) \
647 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
648 else \
649 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
650} while (0)
651
652#define PN(x) \
653 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
654
655 P(nr_running);
656 SEQ_printf(m, " .%-30s: %lu\n", "load",
657 rq->load.weight);
658 P(nr_switches);
659 P(nr_load_updates);
660 P(nr_uninterruptible);
661 PN(next_balance);
662 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
663 PN(clock);
664 PN(clock_task);
665 P(cpu_load[0]);
666 P(cpu_load[1]);
667 P(cpu_load[2]);
668 P(cpu_load[3]);
669 P(cpu_load[4]);
670#undef P
671#undef PN
672
673#ifdef CONFIG_SMP
674#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
675 P64(avg_idle);
676 P64(max_idle_balance_cost);
677#undef P64
678#endif
679
680#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
681 if (schedstat_enabled()) {
682 P(yld_count);
683 P(sched_count);
684 P(sched_goidle);
685 P(ttwu_count);
686 P(ttwu_local);
687 }
688#undef P
689
690 spin_lock_irqsave(&sched_debug_lock, flags);
691 print_cfs_stats(m, cpu);
692 print_rt_stats(m, cpu);
693 print_dl_stats(m, cpu);
694
695 print_rq(m, rq, cpu);
696 spin_unlock_irqrestore(&sched_debug_lock, flags);
697 SEQ_printf(m, "\n");
698}
699
700static const char *sched_tunable_scaling_names[] = {
701 "none",
702 "logarithmic",
703 "linear"
704};
705
706static void sched_debug_header(struct seq_file *m)
707{
708 u64 ktime, sched_clk, cpu_clk;
709 unsigned long flags;
710
711 local_irq_save(flags);
712 ktime = ktime_to_ns(ktime_get());
713 sched_clk = sched_clock();
714 cpu_clk = local_clock();
715 local_irq_restore(flags);
716
717 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
718 init_utsname()->release,
719 (int)strcspn(init_utsname()->version, " "),
720 init_utsname()->version);
721
722#define P(x) \
723 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
724#define PN(x) \
725 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
726 PN(ktime);
727 PN(sched_clk);
728 PN(cpu_clk);
729 P(jiffies);
730#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
731 P(sched_clock_stable());
732#endif
733#undef PN
734#undef P
735
736 SEQ_printf(m, "\n");
737 SEQ_printf(m, "sysctl_sched\n");
738
739#define P(x) \
740 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
741#define PN(x) \
742 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
743 PN(sysctl_sched_latency);
744 PN(sysctl_sched_min_granularity);
745 PN(sysctl_sched_wakeup_granularity);
746 P(sysctl_sched_child_runs_first);
747 P(sysctl_sched_features);
748#undef PN
749#undef P
750
751 SEQ_printf(m, " .%-40s: %d (%s)\n",
752 "sysctl_sched_tunable_scaling",
753 sysctl_sched_tunable_scaling,
754 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
755 SEQ_printf(m, "\n");
756}
757
758static int sched_debug_show(struct seq_file *m, void *v)
759{
760 int cpu = (unsigned long)(v - 2);
761
762 if (cpu != -1)
763 print_cpu(m, cpu);
764 else
765 sched_debug_header(m);
766
767 return 0;
768}
769
770void sysrq_sched_debug_show(void)
771{
772 int cpu;
773
774 sched_debug_header(NULL);
775 for_each_online_cpu(cpu)
776 print_cpu(NULL, cpu);
777
778}
779
780
781
782
783
784
785
786
787static void *sched_debug_start(struct seq_file *file, loff_t *offset)
788{
789 unsigned long n = *offset;
790
791 if (n == 0)
792 return (void *) 1;
793
794 n--;
795
796 if (n > 0)
797 n = cpumask_next(n - 1, cpu_online_mask);
798 else
799 n = cpumask_first(cpu_online_mask);
800
801 *offset = n + 1;
802
803 if (n < nr_cpu_ids)
804 return (void *)(unsigned long)(n + 2);
805
806 return NULL;
807}
808
809static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
810{
811 (*offset)++;
812 return sched_debug_start(file, offset);
813}
814
815static void sched_debug_stop(struct seq_file *file, void *data)
816{
817}
818
819static const struct seq_operations sched_debug_sops = {
820 .start = sched_debug_start,
821 .next = sched_debug_next,
822 .stop = sched_debug_stop,
823 .show = sched_debug_show,
824};
825
826static int __init init_sched_debug_procfs(void)
827{
828 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
829 return -ENOMEM;
830 return 0;
831}
832
833__initcall(init_sched_debug_procfs);
834
835#define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
836#define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
837#define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
838#define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
839
840
841#ifdef CONFIG_NUMA_BALANCING
842void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
843 unsigned long tpf, unsigned long gsf, unsigned long gpf)
844{
845 SEQ_printf(m, "numa_faults node=%d ", node);
846 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
847 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
848}
849#endif
850
851
852static void sched_show_numa(struct task_struct *p, struct seq_file *m)
853{
854#ifdef CONFIG_NUMA_BALANCING
855 struct mempolicy *pol;
856
857 if (p->mm)
858 P(mm->numa_scan_seq);
859
860 task_lock(p);
861 pol = p->mempolicy;
862 if (pol && !(pol->flags & MPOL_F_MORON))
863 pol = NULL;
864 mpol_get(pol);
865 task_unlock(p);
866
867 P(numa_pages_migrated);
868 P(numa_preferred_nid);
869 P(total_numa_faults);
870 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
871 task_node(p), task_numa_group_id(p));
872 show_numa_stats(p, m);
873 mpol_put(pol);
874#endif
875}
876
877void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
878 struct seq_file *m)
879{
880 unsigned long nr_switches;
881
882 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
883 get_nr_threads(p));
884 SEQ_printf(m,
885 "---------------------------------------------------------"
886 "----------\n");
887#define __P(F) \
888 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
889#define P(F) \
890 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
891#define P_SCHEDSTAT(F) \
892 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
893#define __PN(F) \
894 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
895#define PN(F) \
896 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
897#define PN_SCHEDSTAT(F) \
898 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
899
900 PN(se.exec_start);
901 PN(se.vruntime);
902 PN(se.sum_exec_runtime);
903
904 nr_switches = p->nvcsw + p->nivcsw;
905
906 P(se.nr_migrations);
907
908 if (schedstat_enabled()) {
909 u64 avg_atom, avg_per_cpu;
910
911 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
912 PN_SCHEDSTAT(se.statistics.wait_start);
913 PN_SCHEDSTAT(se.statistics.sleep_start);
914 PN_SCHEDSTAT(se.statistics.block_start);
915 PN_SCHEDSTAT(se.statistics.sleep_max);
916 PN_SCHEDSTAT(se.statistics.block_max);
917 PN_SCHEDSTAT(se.statistics.exec_max);
918 PN_SCHEDSTAT(se.statistics.slice_max);
919 PN_SCHEDSTAT(se.statistics.wait_max);
920 PN_SCHEDSTAT(se.statistics.wait_sum);
921 P_SCHEDSTAT(se.statistics.wait_count);
922 PN_SCHEDSTAT(se.statistics.iowait_sum);
923 P_SCHEDSTAT(se.statistics.iowait_count);
924 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
925 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
926 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
927 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
928 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
929 P_SCHEDSTAT(se.statistics.nr_wakeups);
930 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
931 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
932 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
933 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
934 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
935 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
936 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
937 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
938
939 avg_atom = p->se.sum_exec_runtime;
940 if (nr_switches)
941 avg_atom = div64_ul(avg_atom, nr_switches);
942 else
943 avg_atom = -1LL;
944
945 avg_per_cpu = p->se.sum_exec_runtime;
946 if (p->se.nr_migrations) {
947 avg_per_cpu = div64_u64(avg_per_cpu,
948 p->se.nr_migrations);
949 } else {
950 avg_per_cpu = -1LL;
951 }
952
953 __PN(avg_atom);
954 __PN(avg_per_cpu);
955 }
956
957 __P(nr_switches);
958 SEQ_printf(m, "%-45s:%21Ld\n",
959 "nr_voluntary_switches", (long long)p->nvcsw);
960 SEQ_printf(m, "%-45s:%21Ld\n",
961 "nr_involuntary_switches", (long long)p->nivcsw);
962
963 P(se.load.weight);
964 P(se.runnable_weight);
965#ifdef CONFIG_SMP
966 P(se.avg.load_sum);
967 P(se.avg.runnable_load_sum);
968 P(se.avg.util_sum);
969 P(se.avg.load_avg);
970 P(se.avg.runnable_load_avg);
971 P(se.avg.util_avg);
972 P(se.avg.last_update_time);
973 P(se.avg.util_est.ewma);
974 P(se.avg.util_est.enqueued);
975#endif
976 P(policy);
977 P(prio);
978 if (task_has_dl_policy(p)) {
979 P(dl.runtime);
980 P(dl.deadline);
981 }
982#undef PN_SCHEDSTAT
983#undef PN
984#undef __PN
985#undef P_SCHEDSTAT
986#undef P
987#undef __P
988
989 {
990 unsigned int this_cpu = raw_smp_processor_id();
991 u64 t0, t1;
992
993 t0 = cpu_clock(this_cpu);
994 t1 = cpu_clock(this_cpu);
995 SEQ_printf(m, "%-45s:%21Ld\n",
996 "clock-delta", (long long)(t1-t0));
997 }
998
999 sched_show_numa(p, m);
1000}
1001
1002void proc_sched_set_task(struct task_struct *p)
1003{
1004#ifdef CONFIG_SCHEDSTATS
1005 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1006#endif
1007}
1008