1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kasan.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/nmi.h>
33#include <linux/init.h>
34#include <linux/uaccess.h>
35#include <linux/highmem.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
38#include <linux/capability.h>
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h>
42#include <linux/perf_event.h>
43#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
46#include <linux/freezer.h>
47#include <linux/vmalloc.h>
48#include <linux/blkdev.h>
49#include <linux/delay.h>
50#include <linux/pid_namespace.h>
51#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
58#include <linux/proc_fs.h>
59#include <linux/seq_file.h>
60#include <linux/sysctl.h>
61#include <linux/syscalls.h>
62#include <linux/times.h>
63#include <linux/tsacct_kern.h>
64#include <linux/kprobes.h>
65#include <linux/delayacct.h>
66#include <linux/unistd.h>
67#include <linux/pagemap.h>
68#include <linux/hrtimer.h>
69#include <linux/tick.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74#include <linux/context_tracking.h>
75#include <linux/compiler.h>
76#include <linux/frame.h>
77
78#include <asm/switch_to.h>
79#include <asm/tlb.h>
80#include <asm/irq_regs.h>
81#include <asm/mutex.h>
82#ifdef CONFIG_PARAVIRT
83#include <asm/paravirt.h>
84#endif
85
86#include "sched.h"
87#include "../workqueue_internal.h"
88#include "../smpboot.h"
89
90#define CREATE_TRACE_POINTS
91#include <trace/events/sched.h>
92
93DEFINE_MUTEX(sched_domains_mutex);
94DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
95
96static void update_rq_clock_task(struct rq *rq, s64 delta);
97
98void update_rq_clock(struct rq *rq)
99{
100 s64 delta;
101
102 lockdep_assert_held(&rq->lock);
103
104 if (rq->clock_skip_update & RQCF_ACT_SKIP)
105 return;
106
107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
108 if (delta < 0)
109 return;
110 rq->clock += delta;
111 update_rq_clock_task(rq, delta);
112}
113
114
115
116
117
118#define SCHED_FEAT(name, enabled) \
119 (1UL << __SCHED_FEAT_##name) * enabled |
120
121const_debug unsigned int sysctl_sched_features =
122#include "features.h"
123 0;
124
125#undef SCHED_FEAT
126
127
128
129
130
131const_debug unsigned int sysctl_sched_nr_migrate = 32;
132
133
134
135
136
137
138
139const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
140
141
142
143
144
145unsigned int sysctl_sched_rt_period = 1000000;
146
147__read_mostly int scheduler_running;
148
149
150
151
152
153int sysctl_sched_rt_runtime = 950000;
154
155
156cpumask_var_t cpu_isolated_map;
157
158
159
160
161static struct rq *this_rq_lock(void)
162 __acquires(rq->lock)
163{
164 struct rq *rq;
165
166 local_irq_disable();
167 rq = this_rq();
168 raw_spin_lock(&rq->lock);
169
170 return rq;
171}
172
173#ifdef CONFIG_SCHED_HRTICK
174
175
176
177
178static void hrtick_clear(struct rq *rq)
179{
180 if (hrtimer_active(&rq->hrtick_timer))
181 hrtimer_cancel(&rq->hrtick_timer);
182}
183
184
185
186
187
188static enum hrtimer_restart hrtick(struct hrtimer *timer)
189{
190 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
191
192 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
193
194 raw_spin_lock(&rq->lock);
195 update_rq_clock(rq);
196 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
197 raw_spin_unlock(&rq->lock);
198
199 return HRTIMER_NORESTART;
200}
201
202#ifdef CONFIG_SMP
203
204static void __hrtick_restart(struct rq *rq)
205{
206 struct hrtimer *timer = &rq->hrtick_timer;
207
208 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
209}
210
211
212
213
214static void __hrtick_start(void *arg)
215{
216 struct rq *rq = arg;
217
218 raw_spin_lock(&rq->lock);
219 __hrtick_restart(rq);
220 rq->hrtick_csd_pending = 0;
221 raw_spin_unlock(&rq->lock);
222}
223
224
225
226
227
228
229void hrtick_start(struct rq *rq, u64 delay)
230{
231 struct hrtimer *timer = &rq->hrtick_timer;
232 ktime_t time;
233 s64 delta;
234
235
236
237
238
239 delta = max_t(s64, delay, 10000LL);
240 time = ktime_add_ns(timer->base->get_time(), delta);
241
242 hrtimer_set_expires(timer, time);
243
244 if (rq == this_rq()) {
245 __hrtick_restart(rq);
246 } else if (!rq->hrtick_csd_pending) {
247 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
248 rq->hrtick_csd_pending = 1;
249 }
250}
251
252static int
253hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
254{
255 int cpu = (int)(long)hcpu;
256
257 switch (action) {
258 case CPU_UP_CANCELED:
259 case CPU_UP_CANCELED_FROZEN:
260 case CPU_DOWN_PREPARE:
261 case CPU_DOWN_PREPARE_FROZEN:
262 case CPU_DEAD:
263 case CPU_DEAD_FROZEN:
264 hrtick_clear(cpu_rq(cpu));
265 return NOTIFY_OK;
266 }
267
268 return NOTIFY_DONE;
269}
270
271static __init void init_hrtick(void)
272{
273 hotcpu_notifier(hotplug_hrtick, 0);
274}
275#else
276
277
278
279
280
281void hrtick_start(struct rq *rq, u64 delay)
282{
283
284
285
286
287 delay = max_t(u64, delay, 10000LL);
288 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
289 HRTIMER_MODE_REL_PINNED);
290}
291
292static inline void init_hrtick(void)
293{
294}
295#endif
296
297static void init_rq_hrtick(struct rq *rq)
298{
299#ifdef CONFIG_SMP
300 rq->hrtick_csd_pending = 0;
301
302 rq->hrtick_csd.flags = 0;
303 rq->hrtick_csd.func = __hrtick_start;
304 rq->hrtick_csd.info = rq;
305#endif
306
307 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
308 rq->hrtick_timer.function = hrtick;
309}
310#else
311static inline void hrtick_clear(struct rq *rq)
312{
313}
314
315static inline void init_rq_hrtick(struct rq *rq)
316{
317}
318
319static inline void init_hrtick(void)
320{
321}
322#endif
323
324
325
326
327#define fetch_or(ptr, mask) \
328 ({ \
329 typeof(ptr) _ptr = (ptr); \
330 typeof(mask) _mask = (mask); \
331 typeof(*_ptr) _old, _val = *_ptr; \
332 \
333 for (;;) { \
334 _old = cmpxchg(_ptr, _val, _val | _mask); \
335 if (_old == _val) \
336 break; \
337 _val = _old; \
338 } \
339 _old; \
340})
341
342#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
343
344
345
346
347
348static bool set_nr_and_not_polling(struct task_struct *p)
349{
350 struct thread_info *ti = task_thread_info(p);
351 return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
352}
353
354
355
356
357
358
359
360static bool set_nr_if_polling(struct task_struct *p)
361{
362 struct thread_info *ti = task_thread_info(p);
363 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
364
365 for (;;) {
366 if (!(val & _TIF_POLLING_NRFLAG))
367 return false;
368 if (val & _TIF_NEED_RESCHED)
369 return true;
370 old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
371 if (old == val)
372 break;
373 val = old;
374 }
375 return true;
376}
377
378#else
379static bool set_nr_and_not_polling(struct task_struct *p)
380{
381 set_tsk_need_resched(p);
382 return true;
383}
384
385#ifdef CONFIG_SMP
386static bool set_nr_if_polling(struct task_struct *p)
387{
388 return false;
389}
390#endif
391#endif
392
393void wake_q_add(struct wake_q_head *head, struct task_struct *task)
394{
395 struct wake_q_node *node = &task->wake_q;
396
397
398
399
400
401
402
403
404
405 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
406 return;
407
408 get_task_struct(task);
409
410
411
412
413 *head->lastp = node;
414 head->lastp = &node->next;
415}
416
417void wake_up_q(struct wake_q_head *head)
418{
419 struct wake_q_node *node = head->first;
420
421 while (node != WAKE_Q_TAIL) {
422 struct task_struct *task;
423
424 task = container_of(node, struct task_struct, wake_q);
425 BUG_ON(!task);
426
427 node = node->next;
428 task->wake_q.next = NULL;
429
430
431
432
433
434 wake_up_process(task);
435 put_task_struct(task);
436 }
437}
438
439
440
441
442
443
444
445
446void resched_curr(struct rq *rq)
447{
448 struct task_struct *curr = rq->curr;
449 int cpu;
450
451 lockdep_assert_held(&rq->lock);
452
453 if (test_tsk_need_resched(curr))
454 return;
455
456 cpu = cpu_of(rq);
457
458 if (cpu == smp_processor_id()) {
459 set_tsk_need_resched(curr);
460 set_preempt_need_resched();
461 return;
462 }
463
464 if (set_nr_and_not_polling(curr))
465 smp_send_reschedule(cpu);
466 else
467 trace_sched_wake_idle_without_ipi(cpu);
468}
469
470void resched_cpu(int cpu)
471{
472 struct rq *rq = cpu_rq(cpu);
473 unsigned long flags;
474
475 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
476 return;
477 resched_curr(rq);
478 raw_spin_unlock_irqrestore(&rq->lock, flags);
479}
480
481#ifdef CONFIG_SMP
482#ifdef CONFIG_NO_HZ_COMMON
483
484
485
486
487
488
489
490
491int get_nohz_timer_target(void)
492{
493 int i, cpu = smp_processor_id();
494 struct sched_domain *sd;
495
496 if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
497 return cpu;
498
499 rcu_read_lock();
500 for_each_domain(cpu, sd) {
501 for_each_cpu(i, sched_domain_span(sd)) {
502 if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
503 cpu = i;
504 goto unlock;
505 }
506 }
507 }
508
509 if (!is_housekeeping_cpu(cpu))
510 cpu = housekeeping_any_cpu();
511unlock:
512 rcu_read_unlock();
513 return cpu;
514}
515
516
517
518
519
520
521
522
523
524
525static void wake_up_idle_cpu(int cpu)
526{
527 struct rq *rq = cpu_rq(cpu);
528
529 if (cpu == smp_processor_id())
530 return;
531
532 if (set_nr_and_not_polling(rq->idle))
533 smp_send_reschedule(cpu);
534 else
535 trace_sched_wake_idle_without_ipi(cpu);
536}
537
538static bool wake_up_full_nohz_cpu(int cpu)
539{
540
541
542
543
544
545
546 if (tick_nohz_full_cpu(cpu)) {
547 if (cpu != smp_processor_id() ||
548 tick_nohz_tick_stopped())
549 tick_nohz_full_kick_cpu(cpu);
550 return true;
551 }
552
553 return false;
554}
555
556void wake_up_nohz_cpu(int cpu)
557{
558 if (!wake_up_full_nohz_cpu(cpu))
559 wake_up_idle_cpu(cpu);
560}
561
562static inline bool got_nohz_idle_kick(void)
563{
564 int cpu = smp_processor_id();
565
566 if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
567 return false;
568
569 if (idle_cpu(cpu) && !need_resched())
570 return true;
571
572
573
574
575
576 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
577 return false;
578}
579
580#else
581
582static inline bool got_nohz_idle_kick(void)
583{
584 return false;
585}
586
587#endif
588
589#ifdef CONFIG_NO_HZ_FULL
590bool sched_can_stop_tick(struct rq *rq)
591{
592 int fifo_nr_running;
593
594
595 if (rq->dl.dl_nr_running)
596 return false;
597
598
599
600
601
602 if (rq->rt.rr_nr_running) {
603 if (rq->rt.rr_nr_running == 1)
604 return true;
605 else
606 return false;
607 }
608
609
610
611
612
613 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
614 if (fifo_nr_running)
615 return true;
616
617
618
619
620
621
622 if (rq->nr_running > 1)
623 return false;
624
625 return true;
626}
627#endif
628
629void sched_avg_update(struct rq *rq)
630{
631 s64 period = sched_avg_period();
632
633 while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
634
635
636
637
638
639 asm("" : "+rm" (rq->age_stamp));
640 rq->age_stamp += period;
641 rq->rt_avg /= 2;
642 }
643}
644
645#endif
646
647#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
648 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
649
650
651
652
653
654
655int walk_tg_tree_from(struct task_group *from,
656 tg_visitor down, tg_visitor up, void *data)
657{
658 struct task_group *parent, *child;
659 int ret;
660
661 parent = from;
662
663down:
664 ret = (*down)(parent, data);
665 if (ret)
666 goto out;
667 list_for_each_entry_rcu(child, &parent->children, siblings) {
668 parent = child;
669 goto down;
670
671up:
672 continue;
673 }
674 ret = (*up)(parent, data);
675 if (ret || parent == from)
676 goto out;
677
678 child = parent;
679 parent = parent->parent;
680 if (parent)
681 goto up;
682out:
683 return ret;
684}
685
686int tg_nop(struct task_group *tg, void *data)
687{
688 return 0;
689}
690#endif
691
692static void set_load_weight(struct task_struct *p)
693{
694 int prio = p->static_prio - MAX_RT_PRIO;
695 struct load_weight *load = &p->se.load;
696
697
698
699
700 if (idle_policy(p->policy)) {
701 load->weight = scale_load(WEIGHT_IDLEPRIO);
702 load->inv_weight = WMULT_IDLEPRIO;
703 return;
704 }
705
706 load->weight = scale_load(sched_prio_to_weight[prio]);
707 load->inv_weight = sched_prio_to_wmult[prio];
708}
709
710static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
711{
712 update_rq_clock(rq);
713 if (!(flags & ENQUEUE_RESTORE))
714 sched_info_queued(rq, p);
715 p->sched_class->enqueue_task(rq, p, flags);
716}
717
718static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
719{
720 update_rq_clock(rq);
721 if (!(flags & DEQUEUE_SAVE))
722 sched_info_dequeued(rq, p);
723 p->sched_class->dequeue_task(rq, p, flags);
724}
725
726void activate_task(struct rq *rq, struct task_struct *p, int flags)
727{
728 if (task_contributes_to_load(p))
729 rq->nr_uninterruptible--;
730
731 enqueue_task(rq, p, flags);
732}
733
734void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
735{
736 if (task_contributes_to_load(p))
737 rq->nr_uninterruptible++;
738
739 dequeue_task(rq, p, flags);
740}
741
742static void update_rq_clock_task(struct rq *rq, s64 delta)
743{
744
745
746
747
748#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
749 s64 steal = 0, irq_delta = 0;
750#endif
751#ifdef CONFIG_IRQ_TIME_ACCOUNTING
752 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769 if (irq_delta > delta)
770 irq_delta = delta;
771
772 rq->prev_irq_time += irq_delta;
773 delta -= irq_delta;
774#endif
775#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
776 if (static_key_false((¶virt_steal_rq_enabled))) {
777 steal = paravirt_steal_clock(cpu_of(rq));
778 steal -= rq->prev_steal_time_rq;
779
780 if (unlikely(steal > delta))
781 steal = delta;
782
783 rq->prev_steal_time_rq += steal;
784 delta -= steal;
785 }
786#endif
787
788 rq->clock_task += delta;
789
790#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
791 if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
792 sched_rt_avg_update(rq, irq_delta + steal);
793#endif
794}
795
796void sched_set_stop_task(int cpu, struct task_struct *stop)
797{
798 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
799 struct task_struct *old_stop = cpu_rq(cpu)->stop;
800
801 if (stop) {
802
803
804
805
806
807
808
809
810 sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
811
812 stop->sched_class = &stop_sched_class;
813 }
814
815 cpu_rq(cpu)->stop = stop;
816
817 if (old_stop) {
818
819
820
821
822 old_stop->sched_class = &rt_sched_class;
823 }
824}
825
826
827
828
829static inline int __normal_prio(struct task_struct *p)
830{
831 return p->static_prio;
832}
833
834
835
836
837
838
839
840
841static inline int normal_prio(struct task_struct *p)
842{
843 int prio;
844
845 if (task_has_dl_policy(p))
846 prio = MAX_DL_PRIO-1;
847 else if (task_has_rt_policy(p))
848 prio = MAX_RT_PRIO-1 - p->rt_priority;
849 else
850 prio = __normal_prio(p);
851 return prio;
852}
853
854
855
856
857
858
859
860
861static int effective_prio(struct task_struct *p)
862{
863 p->normal_prio = normal_prio(p);
864
865
866
867
868
869 if (!rt_prio(p->prio))
870 return p->normal_prio;
871 return p->prio;
872}
873
874
875
876
877
878
879
880inline int task_curr(const struct task_struct *p)
881{
882 return cpu_curr(task_cpu(p)) == p;
883}
884
885
886
887
888
889
890
891
892static inline void check_class_changed(struct rq *rq, struct task_struct *p,
893 const struct sched_class *prev_class,
894 int oldprio)
895{
896 if (prev_class != p->sched_class) {
897 if (prev_class->switched_from)
898 prev_class->switched_from(rq, p);
899
900 p->sched_class->switched_to(rq, p);
901 } else if (oldprio != p->prio || dl_task(p))
902 p->sched_class->prio_changed(rq, p, oldprio);
903}
904
905void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
906{
907 const struct sched_class *class;
908
909 if (p->sched_class == rq->curr->sched_class) {
910 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
911 } else {
912 for_each_class(class) {
913 if (class == rq->curr->sched_class)
914 break;
915 if (class == p->sched_class) {
916 resched_curr(rq);
917 break;
918 }
919 }
920 }
921
922
923
924
925
926 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
927 rq_clock_skip_update(rq, true);
928}
929
930#ifdef CONFIG_SMP
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
951{
952 lockdep_assert_held(&rq->lock);
953
954 p->on_rq = TASK_ON_RQ_MIGRATING;
955 dequeue_task(rq, p, 0);
956 set_task_cpu(p, new_cpu);
957 raw_spin_unlock(&rq->lock);
958
959 rq = cpu_rq(new_cpu);
960
961 raw_spin_lock(&rq->lock);
962 BUG_ON(task_cpu(p) != new_cpu);
963 enqueue_task(rq, p, 0);
964 p->on_rq = TASK_ON_RQ_QUEUED;
965 check_preempt_curr(rq, p, 0);
966
967 return rq;
968}
969
970struct migration_arg {
971 struct task_struct *task;
972 int dest_cpu;
973};
974
975
976
977
978
979
980
981
982
983
984static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
985{
986 if (unlikely(!cpu_active(dest_cpu)))
987 return rq;
988
989
990 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
991 return rq;
992
993 rq = move_queued_task(rq, p, dest_cpu);
994
995 return rq;
996}
997
998
999
1000
1001
1002
1003static int migration_cpu_stop(void *data)
1004{
1005 struct migration_arg *arg = data;
1006 struct task_struct *p = arg->task;
1007 struct rq *rq = this_rq();
1008
1009
1010
1011
1012
1013 local_irq_disable();
1014
1015
1016
1017
1018
1019 sched_ttwu_pending();
1020
1021 raw_spin_lock(&p->pi_lock);
1022 raw_spin_lock(&rq->lock);
1023
1024
1025
1026
1027
1028 if (task_rq(p) == rq && task_on_rq_queued(p))
1029 rq = __migrate_task(rq, p, arg->dest_cpu);
1030 raw_spin_unlock(&rq->lock);
1031 raw_spin_unlock(&p->pi_lock);
1032
1033 local_irq_enable();
1034 return 0;
1035}
1036
1037
1038
1039
1040
1041void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
1042{
1043 cpumask_copy(&p->cpus_allowed, new_mask);
1044 p->nr_cpus_allowed = cpumask_weight(new_mask);
1045}
1046
1047void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1048{
1049 struct rq *rq = task_rq(p);
1050 bool queued, running;
1051
1052 lockdep_assert_held(&p->pi_lock);
1053
1054 queued = task_on_rq_queued(p);
1055 running = task_current(rq, p);
1056
1057 if (queued) {
1058
1059
1060
1061
1062 lockdep_assert_held(&rq->lock);
1063 dequeue_task(rq, p, DEQUEUE_SAVE);
1064 }
1065 if (running)
1066 put_prev_task(rq, p);
1067
1068 p->sched_class->set_cpus_allowed(p, new_mask);
1069
1070 if (running)
1071 p->sched_class->set_curr_task(rq);
1072 if (queued)
1073 enqueue_task(rq, p, ENQUEUE_RESTORE);
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static int __set_cpus_allowed_ptr(struct task_struct *p,
1086 const struct cpumask *new_mask, bool check)
1087{
1088 unsigned long flags;
1089 struct rq *rq;
1090 unsigned int dest_cpu;
1091 int ret = 0;
1092
1093 rq = task_rq_lock(p, &flags);
1094
1095
1096
1097
1098
1099 if (check && (p->flags & PF_NO_SETAFFINITY)) {
1100 ret = -EINVAL;
1101 goto out;
1102 }
1103
1104 if (cpumask_equal(&p->cpus_allowed, new_mask))
1105 goto out;
1106
1107 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1108 ret = -EINVAL;
1109 goto out;
1110 }
1111
1112 do_set_cpus_allowed(p, new_mask);
1113
1114
1115 if (cpumask_test_cpu(task_cpu(p), new_mask))
1116 goto out;
1117
1118 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
1119 if (task_running(rq, p) || p->state == TASK_WAKING) {
1120 struct migration_arg arg = { p, dest_cpu };
1121
1122 task_rq_unlock(rq, p, &flags);
1123 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1124 tlb_migrate_finish(p->mm);
1125 return 0;
1126 } else if (task_on_rq_queued(p)) {
1127
1128
1129
1130
1131 lockdep_unpin_lock(&rq->lock);
1132 rq = move_queued_task(rq, p, dest_cpu);
1133 lockdep_pin_lock(&rq->lock);
1134 }
1135out:
1136 task_rq_unlock(rq, p, &flags);
1137
1138 return ret;
1139}
1140
1141int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1142{
1143 return __set_cpus_allowed_ptr(p, new_mask, false);
1144}
1145EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1146
1147void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1148{
1149#ifdef CONFIG_SCHED_DEBUG
1150
1151
1152
1153
1154 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1155 !p->on_rq);
1156
1157
1158
1159
1160
1161
1162 WARN_ON_ONCE(p->state == TASK_RUNNING &&
1163 p->sched_class == &fair_sched_class &&
1164 (p->on_rq && !task_on_rq_migrating(p)));
1165
1166#ifdef CONFIG_LOCKDEP
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1178 lockdep_is_held(&task_rq(p)->lock)));
1179#endif
1180#endif
1181
1182 trace_sched_migrate_task(p, new_cpu);
1183
1184 if (task_cpu(p) != new_cpu) {
1185 if (p->sched_class->migrate_task_rq)
1186 p->sched_class->migrate_task_rq(p);
1187 p->se.nr_migrations++;
1188 perf_event_task_migrate(p);
1189 }
1190
1191 __set_task_cpu(p, new_cpu);
1192}
1193
1194static void __migrate_swap_task(struct task_struct *p, int cpu)
1195{
1196 if (task_on_rq_queued(p)) {
1197 struct rq *src_rq, *dst_rq;
1198
1199 src_rq = task_rq(p);
1200 dst_rq = cpu_rq(cpu);
1201
1202 p->on_rq = TASK_ON_RQ_MIGRATING;
1203 deactivate_task(src_rq, p, 0);
1204 set_task_cpu(p, cpu);
1205 activate_task(dst_rq, p, 0);
1206 p->on_rq = TASK_ON_RQ_QUEUED;
1207 check_preempt_curr(dst_rq, p, 0);
1208 } else {
1209
1210
1211
1212
1213
1214 p->wake_cpu = cpu;
1215 }
1216}
1217
1218struct migration_swap_arg {
1219 struct task_struct *src_task, *dst_task;
1220 int src_cpu, dst_cpu;
1221};
1222
1223static int migrate_swap_stop(void *data)
1224{
1225 struct migration_swap_arg *arg = data;
1226 struct rq *src_rq, *dst_rq;
1227 int ret = -EAGAIN;
1228
1229 if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
1230 return -EAGAIN;
1231
1232 src_rq = cpu_rq(arg->src_cpu);
1233 dst_rq = cpu_rq(arg->dst_cpu);
1234
1235 double_raw_lock(&arg->src_task->pi_lock,
1236 &arg->dst_task->pi_lock);
1237 double_rq_lock(src_rq, dst_rq);
1238
1239 if (task_cpu(arg->dst_task) != arg->dst_cpu)
1240 goto unlock;
1241
1242 if (task_cpu(arg->src_task) != arg->src_cpu)
1243 goto unlock;
1244
1245 if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
1246 goto unlock;
1247
1248 if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
1249 goto unlock;
1250
1251 __migrate_swap_task(arg->src_task, arg->dst_cpu);
1252 __migrate_swap_task(arg->dst_task, arg->src_cpu);
1253
1254 ret = 0;
1255
1256unlock:
1257 double_rq_unlock(src_rq, dst_rq);
1258 raw_spin_unlock(&arg->dst_task->pi_lock);
1259 raw_spin_unlock(&arg->src_task->pi_lock);
1260
1261 return ret;
1262}
1263
1264
1265
1266
1267int migrate_swap(struct task_struct *cur, struct task_struct *p)
1268{
1269 struct migration_swap_arg arg;
1270 int ret = -EINVAL;
1271
1272 arg = (struct migration_swap_arg){
1273 .src_task = cur,
1274 .src_cpu = task_cpu(cur),
1275 .dst_task = p,
1276 .dst_cpu = task_cpu(p),
1277 };
1278
1279 if (arg.src_cpu == arg.dst_cpu)
1280 goto out;
1281
1282
1283
1284
1285
1286 if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
1287 goto out;
1288
1289 if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
1290 goto out;
1291
1292 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1293 goto out;
1294
1295 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1296 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1297
1298out:
1299 return ret;
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1319{
1320 unsigned long flags;
1321 int running, queued;
1322 unsigned long ncsw;
1323 struct rq *rq;
1324
1325 for (;;) {
1326
1327
1328
1329
1330
1331
1332 rq = task_rq(p);
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 while (task_running(rq, p)) {
1346 if (match_state && unlikely(p->state != match_state))
1347 return 0;
1348 cpu_relax();
1349 }
1350
1351
1352
1353
1354
1355
1356 rq = task_rq_lock(p, &flags);
1357 trace_sched_wait_task(p);
1358 running = task_running(rq, p);
1359 queued = task_on_rq_queued(p);
1360 ncsw = 0;
1361 if (!match_state || p->state == match_state)
1362 ncsw = p->nvcsw | LONG_MIN;
1363 task_rq_unlock(rq, p, &flags);
1364
1365
1366
1367
1368 if (unlikely(!ncsw))
1369 break;
1370
1371
1372
1373
1374
1375
1376
1377 if (unlikely(running)) {
1378 cpu_relax();
1379 continue;
1380 }
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 if (unlikely(queued)) {
1392 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1393
1394 set_current_state(TASK_UNINTERRUPTIBLE);
1395 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1396 continue;
1397 }
1398
1399
1400
1401
1402
1403
1404 break;
1405 }
1406
1407 return ncsw;
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423void kick_process(struct task_struct *p)
1424{
1425 int cpu;
1426
1427 preempt_disable();
1428 cpu = task_cpu(p);
1429 if ((cpu != smp_processor_id()) && task_curr(p))
1430 smp_send_reschedule(cpu);
1431 preempt_enable();
1432}
1433EXPORT_SYMBOL_GPL(kick_process);
1434
1435
1436
1437
1438static int select_fallback_rq(int cpu, struct task_struct *p)
1439{
1440 int nid = cpu_to_node(cpu);
1441 const struct cpumask *nodemask = NULL;
1442 enum { cpuset, possible, fail } state = cpuset;
1443 int dest_cpu;
1444
1445
1446
1447
1448
1449
1450 if (nid != -1) {
1451 nodemask = cpumask_of_node(nid);
1452
1453
1454 for_each_cpu(dest_cpu, nodemask) {
1455 if (!cpu_online(dest_cpu))
1456 continue;
1457 if (!cpu_active(dest_cpu))
1458 continue;
1459 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1460 return dest_cpu;
1461 }
1462 }
1463
1464 for (;;) {
1465
1466 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1467 if (!cpu_online(dest_cpu))
1468 continue;
1469 if (!cpu_active(dest_cpu))
1470 continue;
1471 goto out;
1472 }
1473
1474
1475 switch (state) {
1476 case cpuset:
1477 if (IS_ENABLED(CONFIG_CPUSETS)) {
1478 cpuset_cpus_allowed_fallback(p);
1479 state = possible;
1480 break;
1481 }
1482
1483 case possible:
1484 do_set_cpus_allowed(p, cpu_possible_mask);
1485 state = fail;
1486 break;
1487
1488 case fail:
1489 BUG();
1490 break;
1491 }
1492 }
1493
1494out:
1495 if (state != cpuset) {
1496
1497
1498
1499
1500
1501 if (p->mm && printk_ratelimit()) {
1502 printk_deferred("process %d (%s) no longer affine to cpu%d\n",
1503 task_pid_nr(p), p->comm, cpu);
1504 }
1505 }
1506
1507 return dest_cpu;
1508}
1509
1510
1511
1512
1513static inline
1514int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1515{
1516 lockdep_assert_held(&p->pi_lock);
1517
1518 if (p->nr_cpus_allowed > 1)
1519 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1532 !cpu_online(cpu)))
1533 cpu = select_fallback_rq(task_cpu(p), p);
1534
1535 return cpu;
1536}
1537
1538static void update_avg(u64 *avg, u64 sample)
1539{
1540 s64 diff = sample - *avg;
1541 *avg += diff >> 3;
1542}
1543
1544#else
1545
1546static inline int __set_cpus_allowed_ptr(struct task_struct *p,
1547 const struct cpumask *new_mask, bool check)
1548{
1549 return set_cpus_allowed_ptr(p, new_mask);
1550}
1551
1552#endif
1553
1554static void
1555ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1556{
1557#ifdef CONFIG_SCHEDSTATS
1558 struct rq *rq = this_rq();
1559
1560#ifdef CONFIG_SMP
1561 int this_cpu = smp_processor_id();
1562
1563 if (cpu == this_cpu) {
1564 schedstat_inc(rq, ttwu_local);
1565 schedstat_inc(p, se.statistics.nr_wakeups_local);
1566 } else {
1567 struct sched_domain *sd;
1568
1569 schedstat_inc(p, se.statistics.nr_wakeups_remote);
1570 rcu_read_lock();
1571 for_each_domain(this_cpu, sd) {
1572 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1573 schedstat_inc(sd, ttwu_wake_remote);
1574 break;
1575 }
1576 }
1577 rcu_read_unlock();
1578 }
1579
1580 if (wake_flags & WF_MIGRATED)
1581 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1582
1583#endif
1584
1585 schedstat_inc(rq, ttwu_count);
1586 schedstat_inc(p, se.statistics.nr_wakeups);
1587
1588 if (wake_flags & WF_SYNC)
1589 schedstat_inc(p, se.statistics.nr_wakeups_sync);
1590
1591#endif
1592}
1593
1594static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1595{
1596 activate_task(rq, p, en_flags);
1597 p->on_rq = TASK_ON_RQ_QUEUED;
1598
1599
1600 if (p->flags & PF_WQ_WORKER)
1601 wq_worker_waking_up(p, cpu_of(rq));
1602}
1603
1604
1605
1606
1607static void
1608ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1609{
1610 check_preempt_curr(rq, p, wake_flags);
1611 p->state = TASK_RUNNING;
1612 trace_sched_wakeup(p);
1613
1614#ifdef CONFIG_SMP
1615 if (p->sched_class->task_woken) {
1616
1617
1618
1619
1620 lockdep_unpin_lock(&rq->lock);
1621 p->sched_class->task_woken(rq, p);
1622 lockdep_pin_lock(&rq->lock);
1623 }
1624
1625 if (rq->idle_stamp) {
1626 u64 delta = rq_clock(rq) - rq->idle_stamp;
1627 u64 max = 2*rq->max_idle_balance_cost;
1628
1629 update_avg(&rq->avg_idle, delta);
1630
1631 if (rq->avg_idle > max)
1632 rq->avg_idle = max;
1633
1634 rq->idle_stamp = 0;
1635 }
1636#endif
1637}
1638
1639static void
1640ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1641{
1642 lockdep_assert_held(&rq->lock);
1643
1644#ifdef CONFIG_SMP
1645 if (p->sched_contributes_to_load)
1646 rq->nr_uninterruptible--;
1647#endif
1648
1649 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1650 ttwu_do_wakeup(rq, p, wake_flags);
1651}
1652
1653
1654
1655
1656
1657
1658
1659static int ttwu_remote(struct task_struct *p, int wake_flags)
1660{
1661 struct rq *rq;
1662 int ret = 0;
1663
1664 rq = __task_rq_lock(p);
1665 if (task_on_rq_queued(p)) {
1666
1667 update_rq_clock(rq);
1668 ttwu_do_wakeup(rq, p, wake_flags);
1669 ret = 1;
1670 }
1671 __task_rq_unlock(rq);
1672
1673 return ret;
1674}
1675
1676#ifdef CONFIG_SMP
1677void sched_ttwu_pending(void)
1678{
1679 struct rq *rq = this_rq();
1680 struct llist_node *llist = llist_del_all(&rq->wake_list);
1681 struct task_struct *p;
1682 unsigned long flags;
1683
1684 if (!llist)
1685 return;
1686
1687 raw_spin_lock_irqsave(&rq->lock, flags);
1688 lockdep_pin_lock(&rq->lock);
1689
1690 while (llist) {
1691 p = llist_entry(llist, struct task_struct, wake_entry);
1692 llist = llist_next(llist);
1693 ttwu_do_activate(rq, p, 0);
1694 }
1695
1696 lockdep_unpin_lock(&rq->lock);
1697 raw_spin_unlock_irqrestore(&rq->lock, flags);
1698}
1699
1700void scheduler_ipi(void)
1701{
1702
1703
1704
1705
1706
1707 preempt_fold_need_resched();
1708
1709 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1710 return;
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725 irq_enter();
1726 sched_ttwu_pending();
1727
1728
1729
1730
1731 if (unlikely(got_nohz_idle_kick())) {
1732 this_rq()->idle_balance = 1;
1733 raise_softirq_irqoff(SCHED_SOFTIRQ);
1734 }
1735 irq_exit();
1736}
1737
1738static void ttwu_queue_remote(struct task_struct *p, int cpu)
1739{
1740 struct rq *rq = cpu_rq(cpu);
1741
1742 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1743 if (!set_nr_if_polling(rq->idle))
1744 smp_send_reschedule(cpu);
1745 else
1746 trace_sched_wake_idle_without_ipi(cpu);
1747 }
1748}
1749
1750void wake_up_if_idle(int cpu)
1751{
1752 struct rq *rq = cpu_rq(cpu);
1753 unsigned long flags;
1754
1755 rcu_read_lock();
1756
1757 if (!is_idle_task(rcu_dereference(rq->curr)))
1758 goto out;
1759
1760 if (set_nr_if_polling(rq->idle)) {
1761 trace_sched_wake_idle_without_ipi(cpu);
1762 } else {
1763 raw_spin_lock_irqsave(&rq->lock, flags);
1764 if (is_idle_task(rq->curr))
1765 smp_send_reschedule(cpu);
1766
1767 raw_spin_unlock_irqrestore(&rq->lock, flags);
1768 }
1769
1770out:
1771 rcu_read_unlock();
1772}
1773
1774bool cpus_share_cache(int this_cpu, int that_cpu)
1775{
1776 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1777}
1778#endif
1779
1780static void ttwu_queue(struct task_struct *p, int cpu)
1781{
1782 struct rq *rq = cpu_rq(cpu);
1783
1784#if defined(CONFIG_SMP)
1785 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1786 sched_clock_cpu(cpu);
1787 ttwu_queue_remote(p, cpu);
1788 return;
1789 }
1790#endif
1791
1792 raw_spin_lock(&rq->lock);
1793 lockdep_pin_lock(&rq->lock);
1794 ttwu_do_activate(rq, p, 0);
1795 lockdep_unpin_lock(&rq->lock);
1796 raw_spin_unlock(&rq->lock);
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static int
1906try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1907{
1908 unsigned long flags;
1909 int cpu, success = 0;
1910
1911
1912
1913
1914
1915
1916
1917 smp_mb__before_spinlock();
1918 raw_spin_lock_irqsave(&p->pi_lock, flags);
1919 if (!(p->state & state))
1920 goto out;
1921
1922 trace_sched_waking(p);
1923
1924 success = 1;
1925 cpu = task_cpu(p);
1926
1927 if (p->on_rq && ttwu_remote(p, wake_flags))
1928 goto stat;
1929
1930#ifdef CONFIG_SMP
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948 smp_rmb();
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959 smp_cond_acquire(!p->on_cpu);
1960
1961 p->sched_contributes_to_load = !!task_contributes_to_load(p);
1962 p->state = TASK_WAKING;
1963
1964 if (p->sched_class->task_waking)
1965 p->sched_class->task_waking(p);
1966
1967 cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
1968 if (task_cpu(p) != cpu) {
1969 wake_flags |= WF_MIGRATED;
1970 set_task_cpu(p, cpu);
1971 }
1972#endif
1973
1974 ttwu_queue(p, cpu);
1975stat:
1976 if (schedstat_enabled())
1977 ttwu_stat(p, cpu, wake_flags);
1978out:
1979 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1980
1981 return success;
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static void try_to_wake_up_local(struct task_struct *p)
1993{
1994 struct rq *rq = task_rq(p);
1995
1996 if (WARN_ON_ONCE(rq != this_rq()) ||
1997 WARN_ON_ONCE(p == current))
1998 return;
1999
2000 lockdep_assert_held(&rq->lock);
2001
2002 if (!raw_spin_trylock(&p->pi_lock)) {
2003
2004
2005
2006
2007
2008
2009 lockdep_unpin_lock(&rq->lock);
2010 raw_spin_unlock(&rq->lock);
2011 raw_spin_lock(&p->pi_lock);
2012 raw_spin_lock(&rq->lock);
2013 lockdep_pin_lock(&rq->lock);
2014 }
2015
2016 if (!(p->state & TASK_NORMAL))
2017 goto out;
2018
2019 trace_sched_waking(p);
2020
2021 if (!task_on_rq_queued(p))
2022 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2023
2024 ttwu_do_wakeup(rq, p, 0);
2025 if (schedstat_enabled())
2026 ttwu_stat(p, smp_processor_id(), 0);
2027out:
2028 raw_spin_unlock(&p->pi_lock);
2029}
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043int wake_up_process(struct task_struct *p)
2044{
2045 return try_to_wake_up(p, TASK_NORMAL, 0);
2046}
2047EXPORT_SYMBOL(wake_up_process);
2048
2049int wake_up_state(struct task_struct *p, unsigned int state)
2050{
2051 return try_to_wake_up(p, state, 0);
2052}
2053
2054
2055
2056
2057void __dl_clear_params(struct task_struct *p)
2058{
2059 struct sched_dl_entity *dl_se = &p->dl;
2060
2061 dl_se->dl_runtime = 0;
2062 dl_se->dl_deadline = 0;
2063 dl_se->dl_period = 0;
2064 dl_se->flags = 0;
2065 dl_se->dl_bw = 0;
2066
2067 dl_se->dl_throttled = 0;
2068 dl_se->dl_yielded = 0;
2069}
2070
2071
2072
2073
2074
2075
2076
2077static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2078{
2079 p->on_rq = 0;
2080
2081 p->se.on_rq = 0;
2082 p->se.exec_start = 0;
2083 p->se.sum_exec_runtime = 0;
2084 p->se.prev_sum_exec_runtime = 0;
2085 p->se.nr_migrations = 0;
2086 p->se.vruntime = 0;
2087 INIT_LIST_HEAD(&p->se.group_node);
2088
2089#ifdef CONFIG_FAIR_GROUP_SCHED
2090 p->se.cfs_rq = NULL;
2091#endif
2092
2093#ifdef CONFIG_SCHEDSTATS
2094
2095 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2096#endif
2097
2098 RB_CLEAR_NODE(&p->dl.rb_node);
2099 init_dl_task_timer(&p->dl);
2100 __dl_clear_params(p);
2101
2102 INIT_LIST_HEAD(&p->rt.run_list);
2103 p->rt.timeout = 0;
2104 p->rt.time_slice = sched_rr_timeslice;
2105 p->rt.on_rq = 0;
2106 p->rt.on_list = 0;
2107
2108#ifdef CONFIG_PREEMPT_NOTIFIERS
2109 INIT_HLIST_HEAD(&p->preempt_notifiers);
2110#endif
2111
2112#ifdef CONFIG_NUMA_BALANCING
2113 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
2114 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
2115 p->mm->numa_scan_seq = 0;
2116 }
2117
2118 if (clone_flags & CLONE_VM)
2119 p->numa_preferred_nid = current->numa_preferred_nid;
2120 else
2121 p->numa_preferred_nid = -1;
2122
2123 p->node_stamp = 0ULL;
2124 p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
2125 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
2126 p->numa_work.next = &p->numa_work;
2127 p->numa_faults = NULL;
2128 p->last_task_numa_placement = 0;
2129 p->last_sum_exec_runtime = 0;
2130
2131 p->numa_group = NULL;
2132#endif
2133}
2134
2135DEFINE_STATIC_KEY_FALSE(sched_numa_balancing);
2136
2137#ifdef CONFIG_NUMA_BALANCING
2138
2139void set_numabalancing_state(bool enabled)
2140{
2141 if (enabled)
2142 static_branch_enable(&sched_numa_balancing);
2143 else
2144 static_branch_disable(&sched_numa_balancing);
2145}
2146
2147#ifdef CONFIG_PROC_SYSCTL
2148int sysctl_numa_balancing(struct ctl_table *table, int write,
2149 void __user *buffer, size_t *lenp, loff_t *ppos)
2150{
2151 struct ctl_table t;
2152 int err;
2153 int state = static_branch_likely(&sched_numa_balancing);
2154
2155 if (write && !capable(CAP_SYS_ADMIN))
2156 return -EPERM;
2157
2158 t = *table;
2159 t.data = &state;
2160 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2161 if (err < 0)
2162 return err;
2163 if (write)
2164 set_numabalancing_state(state);
2165 return err;
2166}
2167#endif
2168#endif
2169
2170DEFINE_STATIC_KEY_FALSE(sched_schedstats);
2171
2172#ifdef CONFIG_SCHEDSTATS
2173static void set_schedstats(bool enabled)
2174{
2175 if (enabled)
2176 static_branch_enable(&sched_schedstats);
2177 else
2178 static_branch_disable(&sched_schedstats);
2179}
2180
2181void force_schedstat_enabled(void)
2182{
2183 if (!schedstat_enabled()) {
2184 pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
2185 static_branch_enable(&sched_schedstats);
2186 }
2187}
2188
2189static int __init setup_schedstats(char *str)
2190{
2191 int ret = 0;
2192 if (!str)
2193 goto out;
2194
2195 if (!strcmp(str, "enable")) {
2196 set_schedstats(true);
2197 ret = 1;
2198 } else if (!strcmp(str, "disable")) {
2199 set_schedstats(false);
2200 ret = 1;
2201 }
2202out:
2203 if (!ret)
2204 pr_warn("Unable to parse schedstats=\n");
2205
2206 return ret;
2207}
2208__setup("schedstats=", setup_schedstats);
2209
2210#ifdef CONFIG_PROC_SYSCTL
2211int sysctl_schedstats(struct ctl_table *table, int write,
2212 void __user *buffer, size_t *lenp, loff_t *ppos)
2213{
2214 struct ctl_table t;
2215 int err;
2216 int state = static_branch_likely(&sched_schedstats);
2217
2218 if (write && !capable(CAP_SYS_ADMIN))
2219 return -EPERM;
2220
2221 t = *table;
2222 t.data = &state;
2223 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2224 if (err < 0)
2225 return err;
2226 if (write)
2227 set_schedstats(state);
2228 return err;
2229}
2230#endif
2231#endif
2232
2233
2234
2235
2236int sched_fork(unsigned long clone_flags, struct task_struct *p)
2237{
2238 unsigned long flags;
2239 int cpu = get_cpu();
2240
2241 __sched_fork(clone_flags, p);
2242
2243
2244
2245
2246
2247 p->state = TASK_RUNNING;
2248
2249
2250
2251
2252 p->prio = current->normal_prio;
2253
2254
2255
2256
2257 if (unlikely(p->sched_reset_on_fork)) {
2258 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2259 p->policy = SCHED_NORMAL;
2260 p->static_prio = NICE_TO_PRIO(0);
2261 p->rt_priority = 0;
2262 } else if (PRIO_TO_NICE(p->static_prio) < 0)
2263 p->static_prio = NICE_TO_PRIO(0);
2264
2265 p->prio = p->normal_prio = __normal_prio(p);
2266 set_load_weight(p);
2267
2268
2269
2270
2271
2272 p->sched_reset_on_fork = 0;
2273 }
2274
2275 if (dl_prio(p->prio)) {
2276 put_cpu();
2277 return -EAGAIN;
2278 } else if (rt_prio(p->prio)) {
2279 p->sched_class = &rt_sched_class;
2280 } else {
2281 p->sched_class = &fair_sched_class;
2282 }
2283
2284 if (p->sched_class->task_fork)
2285 p->sched_class->task_fork(p);
2286
2287
2288
2289
2290
2291
2292
2293
2294 raw_spin_lock_irqsave(&p->pi_lock, flags);
2295 set_task_cpu(p, cpu);
2296 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2297
2298#ifdef CONFIG_SCHED_INFO
2299 if (likely(sched_info_on()))
2300 memset(&p->sched_info, 0, sizeof(p->sched_info));
2301#endif
2302#if defined(CONFIG_SMP)
2303 p->on_cpu = 0;
2304#endif
2305 init_task_preempt_count(p);
2306#ifdef CONFIG_SMP
2307 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2308 RB_CLEAR_NODE(&p->pushable_dl_tasks);
2309#endif
2310
2311 put_cpu();
2312 return 0;
2313}
2314
2315unsigned long to_ratio(u64 period, u64 runtime)
2316{
2317 if (runtime == RUNTIME_INF)
2318 return 1ULL << 20;
2319
2320
2321
2322
2323
2324
2325 if (period == 0)
2326 return 0;
2327
2328 return div64_u64(runtime << 20, period);
2329}
2330
2331#ifdef CONFIG_SMP
2332inline struct dl_bw *dl_bw_of(int i)
2333{
2334 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2335 "sched RCU must be held");
2336 return &cpu_rq(i)->rd->dl_bw;
2337}
2338
2339static inline int dl_bw_cpus(int i)
2340{
2341 struct root_domain *rd = cpu_rq(i)->rd;
2342 int cpus = 0;
2343
2344 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2345 "sched RCU must be held");
2346 for_each_cpu_and(i, rd->span, cpu_active_mask)
2347 cpus++;
2348
2349 return cpus;
2350}
2351#else
2352inline struct dl_bw *dl_bw_of(int i)
2353{
2354 return &cpu_rq(i)->dl.dl_bw;
2355}
2356
2357static inline int dl_bw_cpus(int i)
2358{
2359 return 1;
2360}
2361#endif
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374static int dl_overflow(struct task_struct *p, int policy,
2375 const struct sched_attr *attr)
2376{
2377
2378 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2379 u64 period = attr->sched_period ?: attr->sched_deadline;
2380 u64 runtime = attr->sched_runtime;
2381 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2382 int cpus, err = -1;
2383
2384 if (new_bw == p->dl.dl_bw)
2385 return 0;
2386
2387
2388
2389
2390
2391
2392 raw_spin_lock(&dl_b->lock);
2393 cpus = dl_bw_cpus(task_cpu(p));
2394 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2395 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2396 __dl_add(dl_b, new_bw);
2397 err = 0;
2398 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2399 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2400 __dl_clear(dl_b, p->dl.dl_bw);
2401 __dl_add(dl_b, new_bw);
2402 err = 0;
2403 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2404 __dl_clear(dl_b, p->dl.dl_bw);
2405 err = 0;
2406 }
2407 raw_spin_unlock(&dl_b->lock);
2408
2409 return err;
2410}
2411
2412extern void init_dl_bw(struct dl_bw *dl_b);
2413
2414
2415
2416
2417
2418
2419
2420
2421void wake_up_new_task(struct task_struct *p)
2422{
2423 unsigned long flags;
2424 struct rq *rq;
2425
2426 raw_spin_lock_irqsave(&p->pi_lock, flags);
2427
2428 init_entity_runnable_average(&p->se);
2429#ifdef CONFIG_SMP
2430
2431
2432
2433
2434
2435 set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
2436#endif
2437
2438 rq = __task_rq_lock(p);
2439 activate_task(rq, p, 0);
2440 p->on_rq = TASK_ON_RQ_QUEUED;
2441 trace_sched_wakeup_new(p);
2442 check_preempt_curr(rq, p, WF_FORK);
2443#ifdef CONFIG_SMP
2444 if (p->sched_class->task_woken) {
2445
2446
2447
2448
2449 lockdep_unpin_lock(&rq->lock);
2450 p->sched_class->task_woken(rq, p);
2451 lockdep_pin_lock(&rq->lock);
2452 }
2453#endif
2454 task_rq_unlock(rq, p, &flags);
2455}
2456
2457#ifdef CONFIG_PREEMPT_NOTIFIERS
2458
2459static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
2460
2461void preempt_notifier_inc(void)
2462{
2463 static_key_slow_inc(&preempt_notifier_key);
2464}
2465EXPORT_SYMBOL_GPL(preempt_notifier_inc);
2466
2467void preempt_notifier_dec(void)
2468{
2469 static_key_slow_dec(&preempt_notifier_key);
2470}
2471EXPORT_SYMBOL_GPL(preempt_notifier_dec);
2472
2473
2474
2475
2476
2477void preempt_notifier_register(struct preempt_notifier *notifier)
2478{
2479 if (!static_key_false(&preempt_notifier_key))
2480 WARN(1, "registering preempt_notifier while notifiers disabled\n");
2481
2482 hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
2483}
2484EXPORT_SYMBOL_GPL(preempt_notifier_register);
2485
2486
2487
2488
2489
2490
2491
2492void preempt_notifier_unregister(struct preempt_notifier *notifier)
2493{
2494 hlist_del(¬ifier->link);
2495}
2496EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2497
2498static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
2499{
2500 struct preempt_notifier *notifier;
2501
2502 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2503 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2504}
2505
2506static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2507{
2508 if (static_key_false(&preempt_notifier_key))
2509 __fire_sched_in_preempt_notifiers(curr);
2510}
2511
2512static void
2513__fire_sched_out_preempt_notifiers(struct task_struct *curr,
2514 struct task_struct *next)
2515{
2516 struct preempt_notifier *notifier;
2517
2518 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
2519 notifier->ops->sched_out(notifier, next);
2520}
2521
2522static __always_inline void
2523fire_sched_out_preempt_notifiers(struct task_struct *curr,
2524 struct task_struct *next)
2525{
2526 if (static_key_false(&preempt_notifier_key))
2527 __fire_sched_out_preempt_notifiers(curr, next);
2528}
2529
2530#else
2531
2532static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2533{
2534}
2535
2536static inline void
2537fire_sched_out_preempt_notifiers(struct task_struct *curr,
2538 struct task_struct *next)
2539{
2540}
2541
2542#endif
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557static inline void
2558prepare_task_switch(struct rq *rq, struct task_struct *prev,
2559 struct task_struct *next)
2560{
2561 sched_info_switch(rq, prev, next);
2562 perf_event_task_sched_out(prev, next);
2563 fire_sched_out_preempt_notifiers(prev, next);
2564 prepare_lock_switch(rq, next);
2565 prepare_arch_switch(next);
2566}
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587static struct rq *finish_task_switch(struct task_struct *prev)
2588 __releases(rq->lock)
2589{
2590 struct rq *rq = this_rq();
2591 struct mm_struct *mm = rq->prev_mm;
2592 long prev_state;
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605 if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
2606 "corrupted preempt_count: %s/%d/0x%x\n",
2607 current->comm, current->pid, preempt_count()))
2608 preempt_count_set(FORK_PREEMPT_COUNT);
2609
2610 rq->prev_mm = NULL;
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623 prev_state = prev->state;
2624 vtime_task_switch(prev);
2625 perf_event_task_sched_in(prev, current);
2626 finish_lock_switch(rq, prev);
2627 finish_arch_post_lock_switch();
2628
2629 fire_sched_in_preempt_notifiers(current);
2630 if (mm)
2631 mmdrop(mm);
2632 if (unlikely(prev_state == TASK_DEAD)) {
2633 if (prev->sched_class->task_dead)
2634 prev->sched_class->task_dead(prev);
2635
2636
2637
2638
2639
2640 kprobe_flush_task(prev);
2641 put_task_struct(prev);
2642 }
2643
2644 tick_nohz_task_switch();
2645 return rq;
2646}
2647
2648#ifdef CONFIG_SMP
2649
2650
2651static void __balance_callback(struct rq *rq)
2652{
2653 struct callback_head *head, *next;
2654 void (*func)(struct rq *rq);
2655 unsigned long flags;
2656
2657 raw_spin_lock_irqsave(&rq->lock, flags);
2658 head = rq->balance_callback;
2659 rq->balance_callback = NULL;
2660 while (head) {
2661 func = (void (*)(struct rq *))head->func;
2662 next = head->next;
2663 head->next = NULL;
2664 head = next;
2665
2666 func(rq);
2667 }
2668 raw_spin_unlock_irqrestore(&rq->lock, flags);
2669}
2670
2671static inline void balance_callback(struct rq *rq)
2672{
2673 if (unlikely(rq->balance_callback))
2674 __balance_callback(rq);
2675}
2676
2677#else
2678
2679static inline void balance_callback(struct rq *rq)
2680{
2681}
2682
2683#endif
2684
2685
2686
2687
2688
2689asmlinkage __visible void schedule_tail(struct task_struct *prev)
2690 __releases(rq->lock)
2691{
2692 struct rq *rq;
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703 rq = finish_task_switch(prev);
2704 balance_callback(rq);
2705 preempt_enable();
2706
2707 if (current->set_child_tid)
2708 put_user(task_pid_vnr(current), current->set_child_tid);
2709}
2710
2711
2712
2713
2714static __always_inline struct rq *
2715context_switch(struct rq *rq, struct task_struct *prev,
2716 struct task_struct *next)
2717{
2718 struct mm_struct *mm, *oldmm;
2719
2720 prepare_task_switch(rq, prev, next);
2721
2722 mm = next->mm;
2723 oldmm = prev->active_mm;
2724
2725
2726
2727
2728
2729 arch_start_context_switch(prev);
2730
2731 if (!mm) {
2732 next->active_mm = oldmm;
2733 atomic_inc(&oldmm->mm_count);
2734 enter_lazy_tlb(oldmm, next);
2735 } else
2736 switch_mm(oldmm, mm, next);
2737
2738 if (!prev->mm) {
2739 prev->active_mm = NULL;
2740 rq->prev_mm = oldmm;
2741 }
2742
2743
2744
2745
2746
2747
2748 lockdep_unpin_lock(&rq->lock);
2749 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2750
2751
2752 switch_to(prev, next, prev);
2753 barrier();
2754
2755 return finish_task_switch(prev);
2756}
2757
2758
2759
2760
2761
2762
2763
2764unsigned long nr_running(void)
2765{
2766 unsigned long i, sum = 0;
2767
2768 for_each_online_cpu(i)
2769 sum += cpu_rq(i)->nr_running;
2770
2771 return sum;
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787bool single_task_running(void)
2788{
2789 return raw_rq()->nr_running == 1;
2790}
2791EXPORT_SYMBOL(single_task_running);
2792
2793unsigned long long nr_context_switches(void)
2794{
2795 int i;
2796 unsigned long long sum = 0;
2797
2798 for_each_possible_cpu(i)
2799 sum += cpu_rq(i)->nr_switches;
2800
2801 return sum;
2802}
2803
2804unsigned long nr_iowait(void)
2805{
2806 unsigned long i, sum = 0;
2807
2808 for_each_possible_cpu(i)
2809 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2810
2811 return sum;
2812}
2813
2814unsigned long nr_iowait_cpu(int cpu)
2815{
2816 struct rq *this = cpu_rq(cpu);
2817 return atomic_read(&this->nr_iowait);
2818}
2819
2820void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2821{
2822 struct rq *rq = this_rq();
2823 *nr_waiters = atomic_read(&rq->nr_iowait);
2824 *load = rq->load.weight;
2825}
2826
2827#ifdef CONFIG_SMP
2828
2829
2830
2831
2832
2833void sched_exec(void)
2834{
2835 struct task_struct *p = current;
2836 unsigned long flags;
2837 int dest_cpu;
2838
2839 raw_spin_lock_irqsave(&p->pi_lock, flags);
2840 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
2841 if (dest_cpu == smp_processor_id())
2842 goto unlock;
2843
2844 if (likely(cpu_active(dest_cpu))) {
2845 struct migration_arg arg = { p, dest_cpu };
2846
2847 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2848 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2849 return;
2850 }
2851unlock:
2852 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2853}
2854
2855#endif
2856
2857DEFINE_PER_CPU(struct kernel_stat, kstat);
2858DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
2859
2860EXPORT_PER_CPU_SYMBOL(kstat);
2861EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2862
2863
2864
2865
2866
2867
2868unsigned long long task_sched_runtime(struct task_struct *p)
2869{
2870 unsigned long flags;
2871 struct rq *rq;
2872 u64 ns;
2873
2874#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886 if (!p->on_cpu || !task_on_rq_queued(p))
2887 return p->se.sum_exec_runtime;
2888#endif
2889
2890 rq = task_rq_lock(p, &flags);
2891
2892
2893
2894
2895
2896 if (task_current(rq, p) && task_on_rq_queued(p)) {
2897 update_rq_clock(rq);
2898 p->sched_class->update_curr(rq);
2899 }
2900 ns = p->se.sum_exec_runtime;
2901 task_rq_unlock(rq, p, &flags);
2902
2903 return ns;
2904}
2905
2906
2907
2908
2909
2910void scheduler_tick(void)
2911{
2912 int cpu = smp_processor_id();
2913 struct rq *rq = cpu_rq(cpu);
2914 struct task_struct *curr = rq->curr;
2915
2916 sched_clock_tick();
2917
2918 raw_spin_lock(&rq->lock);
2919 update_rq_clock(rq);
2920 curr->sched_class->task_tick(rq, curr, 0);
2921 update_cpu_load_active(rq);
2922 calc_global_load_tick(rq);
2923 raw_spin_unlock(&rq->lock);
2924
2925 perf_event_task_tick();
2926
2927#ifdef CONFIG_SMP
2928 rq->idle_balance = idle_cpu(cpu);
2929 trigger_load_balance(rq);
2930#endif
2931 rq_last_tick_reset(rq);
2932}
2933
2934#ifdef CONFIG_NO_HZ_FULL
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948u64 scheduler_tick_max_deferment(void)
2949{
2950 struct rq *rq = this_rq();
2951 unsigned long next, now = READ_ONCE(jiffies);
2952
2953 next = rq->last_sched_tick + HZ;
2954
2955 if (time_before_eq(next, now))
2956 return 0;
2957
2958 return jiffies_to_nsecs(next - now);
2959}
2960#endif
2961
2962#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2963 defined(CONFIG_PREEMPT_TRACER))
2964
2965void preempt_count_add(int val)
2966{
2967#ifdef CONFIG_DEBUG_PREEMPT
2968
2969
2970
2971 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2972 return;
2973#endif
2974 __preempt_count_add(val);
2975#ifdef CONFIG_DEBUG_PREEMPT
2976
2977
2978
2979 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2980 PREEMPT_MASK - 10);
2981#endif
2982 if (preempt_count() == val) {
2983 unsigned long ip = get_lock_parent_ip();
2984#ifdef CONFIG_DEBUG_PREEMPT
2985 current->preempt_disable_ip = ip;
2986#endif
2987 trace_preempt_off(CALLER_ADDR0, ip);
2988 }
2989}
2990EXPORT_SYMBOL(preempt_count_add);
2991NOKPROBE_SYMBOL(preempt_count_add);
2992
2993void preempt_count_sub(int val)
2994{
2995#ifdef CONFIG_DEBUG_PREEMPT
2996
2997
2998
2999 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3000 return;
3001
3002
3003
3004 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3005 !(preempt_count() & PREEMPT_MASK)))
3006 return;
3007#endif
3008
3009 if (preempt_count() == val)
3010 trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
3011 __preempt_count_sub(val);
3012}
3013EXPORT_SYMBOL(preempt_count_sub);
3014NOKPROBE_SYMBOL(preempt_count_sub);
3015
3016#endif
3017
3018
3019
3020
3021static noinline void __schedule_bug(struct task_struct *prev)
3022{
3023 if (oops_in_progress)
3024 return;
3025
3026 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3027 prev->comm, prev->pid, preempt_count());
3028
3029 debug_show_held_locks(prev);
3030 print_modules();
3031 if (irqs_disabled())
3032 print_irqtrace_events(prev);
3033#ifdef CONFIG_DEBUG_PREEMPT
3034 if (in_atomic_preempt_off()) {
3035 pr_err("Preemption disabled at:");
3036 print_ip_sym(current->preempt_disable_ip);
3037 pr_cont("\n");
3038 }
3039#endif
3040 dump_stack();
3041 add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
3042}
3043
3044
3045
3046
3047static inline void schedule_debug(struct task_struct *prev)
3048{
3049#ifdef CONFIG_SCHED_STACK_END_CHECK
3050 BUG_ON(task_stack_end_corrupted(prev));
3051#endif
3052
3053 if (unlikely(in_atomic_preempt_off())) {
3054 __schedule_bug(prev);
3055 preempt_count_set(PREEMPT_DISABLED);
3056 }
3057 rcu_sleep_check();
3058
3059 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3060
3061 schedstat_inc(this_rq(), sched_count);
3062}
3063
3064
3065
3066
3067static inline struct task_struct *
3068pick_next_task(struct rq *rq, struct task_struct *prev)
3069{
3070 const struct sched_class *class = &fair_sched_class;
3071 struct task_struct *p;
3072
3073
3074
3075
3076
3077 if (likely(prev->sched_class == class &&
3078 rq->nr_running == rq->cfs.h_nr_running)) {
3079 p = fair_sched_class.pick_next_task(rq, prev);
3080 if (unlikely(p == RETRY_TASK))
3081 goto again;
3082
3083
3084 if (unlikely(!p))
3085 p = idle_sched_class.pick_next_task(rq, prev);
3086
3087 return p;
3088 }
3089
3090again:
3091 for_each_class(class) {
3092 p = class->pick_next_task(rq, prev);
3093 if (p) {
3094 if (unlikely(p == RETRY_TASK))
3095 goto again;
3096 return p;
3097 }
3098 }
3099
3100 BUG();
3101}
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142static void __sched notrace __schedule(bool preempt)
3143{
3144 struct task_struct *prev, *next;
3145 unsigned long *switch_count;
3146 struct rq *rq;
3147 int cpu;
3148
3149 cpu = smp_processor_id();
3150 rq = cpu_rq(cpu);
3151 prev = rq->curr;
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161 if (unlikely(prev->state == TASK_DEAD))
3162 preempt_enable_no_resched_notrace();
3163
3164 schedule_debug(prev);
3165
3166 if (sched_feat(HRTICK))
3167 hrtick_clear(rq);
3168
3169 local_irq_disable();
3170 rcu_note_context_switch();
3171
3172
3173
3174
3175
3176
3177 smp_mb__before_spinlock();
3178 raw_spin_lock(&rq->lock);
3179 lockdep_pin_lock(&rq->lock);
3180
3181 rq->clock_skip_update <<= 1;
3182
3183 switch_count = &prev->nivcsw;
3184 if (!preempt && prev->state) {
3185 if (unlikely(signal_pending_state(prev->state, prev))) {
3186 prev->state = TASK_RUNNING;
3187 } else {
3188 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3189 prev->on_rq = 0;
3190
3191
3192
3193
3194
3195
3196 if (prev->flags & PF_WQ_WORKER) {
3197 struct task_struct *to_wakeup;
3198
3199 to_wakeup = wq_worker_sleeping(prev);
3200 if (to_wakeup)
3201 try_to_wake_up_local(to_wakeup);
3202 }
3203 }
3204 switch_count = &prev->nvcsw;
3205 }
3206
3207 if (task_on_rq_queued(prev))
3208 update_rq_clock(rq);
3209
3210 next = pick_next_task(rq, prev);
3211 clear_tsk_need_resched(prev);
3212 clear_preempt_need_resched();
3213 rq->clock_skip_update = 0;
3214
3215 if (likely(prev != next)) {
3216 rq->nr_switches++;
3217 rq->curr = next;
3218 ++*switch_count;
3219
3220 trace_sched_switch(preempt, prev, next);
3221 rq = context_switch(rq, prev, next);
3222 } else {
3223 lockdep_unpin_lock(&rq->lock);
3224 raw_spin_unlock_irq(&rq->lock);
3225 }
3226
3227 balance_callback(rq);
3228}
3229STACK_FRAME_NON_STANDARD(__schedule);
3230
3231static inline void sched_submit_work(struct task_struct *tsk)
3232{
3233 if (!tsk->state || tsk_is_pi_blocked(tsk))
3234 return;
3235
3236
3237
3238
3239 if (blk_needs_flush_plug(tsk))
3240 blk_schedule_flush_plug(tsk);
3241}
3242
3243asmlinkage __visible void __sched schedule(void)
3244{
3245 struct task_struct *tsk = current;
3246
3247 sched_submit_work(tsk);
3248 do {
3249 preempt_disable();
3250 __schedule(false);
3251 sched_preempt_enable_no_resched();
3252 } while (need_resched());
3253}
3254EXPORT_SYMBOL(schedule);
3255
3256#ifdef CONFIG_CONTEXT_TRACKING
3257asmlinkage __visible void __sched schedule_user(void)
3258{
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269 enum ctx_state prev_state = exception_enter();
3270 schedule();
3271 exception_exit(prev_state);
3272}
3273#endif
3274
3275
3276
3277
3278
3279
3280void __sched schedule_preempt_disabled(void)
3281{
3282 sched_preempt_enable_no_resched();
3283 schedule();
3284 preempt_disable();
3285}
3286
3287static void __sched notrace preempt_schedule_common(void)
3288{
3289 do {
3290 preempt_disable_notrace();
3291 __schedule(true);
3292 preempt_enable_no_resched_notrace();
3293
3294
3295
3296
3297
3298 } while (need_resched());
3299}
3300
3301#ifdef CONFIG_PREEMPT
3302
3303
3304
3305
3306
3307asmlinkage __visible void __sched notrace preempt_schedule(void)
3308{
3309
3310
3311
3312
3313 if (likely(!preemptible()))
3314 return;
3315
3316 preempt_schedule_common();
3317}
3318NOKPROBE_SYMBOL(preempt_schedule);
3319EXPORT_SYMBOL(preempt_schedule);
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
3336{
3337 enum ctx_state prev_ctx;
3338
3339 if (likely(!preemptible()))
3340 return;
3341
3342 do {
3343 preempt_disable_notrace();
3344
3345
3346
3347
3348
3349 prev_ctx = exception_enter();
3350 __schedule(true);
3351 exception_exit(prev_ctx);
3352
3353 preempt_enable_no_resched_notrace();
3354 } while (need_resched());
3355}
3356EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
3357
3358#endif
3359
3360
3361
3362
3363
3364
3365
3366asmlinkage __visible void __sched preempt_schedule_irq(void)
3367{
3368 enum ctx_state prev_state;
3369
3370
3371 BUG_ON(preempt_count() || !irqs_disabled());
3372
3373 prev_state = exception_enter();
3374
3375 do {
3376 preempt_disable();
3377 local_irq_enable();
3378 __schedule(true);
3379 local_irq_disable();
3380 sched_preempt_enable_no_resched();
3381 } while (need_resched());
3382
3383 exception_exit(prev_state);
3384}
3385
3386int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3387 void *key)
3388{
3389 return try_to_wake_up(curr->private, mode, wake_flags);
3390}
3391EXPORT_SYMBOL(default_wake_function);
3392
3393#ifdef CONFIG_RT_MUTEXES
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406void rt_mutex_setprio(struct task_struct *p, int prio)
3407{
3408 int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;
3409 struct rq *rq;
3410 const struct sched_class *prev_class;
3411
3412 BUG_ON(prio > MAX_PRIO);
3413
3414 rq = __task_rq_lock(p);
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428 if (unlikely(p == rq->idle)) {
3429 WARN_ON(p != rq->curr);
3430 WARN_ON(p->pi_blocked_on);
3431 goto out_unlock;
3432 }
3433
3434 trace_sched_pi_setprio(p, prio);
3435 oldprio = p->prio;
3436
3437 if (oldprio == prio)
3438 queue_flag &= ~DEQUEUE_MOVE;
3439
3440 prev_class = p->sched_class;
3441 queued = task_on_rq_queued(p);
3442 running = task_current(rq, p);
3443 if (queued)
3444 dequeue_task(rq, p, queue_flag);
3445 if (running)
3446 put_prev_task(rq, p);
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457 if (dl_prio(prio)) {
3458 struct task_struct *pi_task = rt_mutex_get_top_task(p);
3459 if (!dl_prio(p->normal_prio) ||
3460 (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
3461 p->dl.dl_boosted = 1;
3462 queue_flag |= ENQUEUE_REPLENISH;
3463 } else
3464 p->dl.dl_boosted = 0;
3465 p->sched_class = &dl_sched_class;
3466 } else if (rt_prio(prio)) {
3467 if (dl_prio(oldprio))
3468 p->dl.dl_boosted = 0;
3469 if (oldprio < prio)
3470 queue_flag |= ENQUEUE_HEAD;
3471 p->sched_class = &rt_sched_class;
3472 } else {
3473 if (dl_prio(oldprio))
3474 p->dl.dl_boosted = 0;
3475 if (rt_prio(oldprio))
3476 p->rt.timeout = 0;
3477 p->sched_class = &fair_sched_class;
3478 }
3479
3480 p->prio = prio;
3481
3482 if (running)
3483 p->sched_class->set_curr_task(rq);
3484 if (queued)
3485 enqueue_task(rq, p, queue_flag);
3486
3487 check_class_changed(rq, p, prev_class, oldprio);
3488out_unlock:
3489 preempt_disable();
3490 __task_rq_unlock(rq);
3491
3492 balance_callback(rq);
3493 preempt_enable();
3494}
3495#endif
3496
3497void set_user_nice(struct task_struct *p, long nice)
3498{
3499 int old_prio, delta, queued;
3500 unsigned long flags;
3501 struct rq *rq;
3502
3503 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
3504 return;
3505
3506
3507
3508
3509 rq = task_rq_lock(p, &flags);
3510
3511
3512
3513
3514
3515
3516 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
3517 p->static_prio = NICE_TO_PRIO(nice);
3518 goto out_unlock;
3519 }
3520 queued = task_on_rq_queued(p);
3521 if (queued)
3522 dequeue_task(rq, p, DEQUEUE_SAVE);
3523
3524 p->static_prio = NICE_TO_PRIO(nice);
3525 set_load_weight(p);
3526 old_prio = p->prio;
3527 p->prio = effective_prio(p);
3528 delta = p->prio - old_prio;
3529
3530 if (queued) {
3531 enqueue_task(rq, p, ENQUEUE_RESTORE);
3532
3533
3534
3535
3536 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3537 resched_curr(rq);
3538 }
3539out_unlock:
3540 task_rq_unlock(rq, p, &flags);
3541}
3542EXPORT_SYMBOL(set_user_nice);
3543
3544
3545
3546
3547
3548
3549int can_nice(const struct task_struct *p, const int nice)
3550{
3551
3552 int nice_rlim = nice_to_rlimit(nice);
3553
3554 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3555 capable(CAP_SYS_NICE));
3556}
3557
3558#ifdef __ARCH_WANT_SYS_NICE
3559
3560
3561
3562
3563
3564
3565
3566
3567SYSCALL_DEFINE1(nice, int, increment)
3568{
3569 long nice, retval;
3570
3571
3572
3573
3574
3575
3576 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
3577 nice = task_nice(current) + increment;
3578
3579 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
3580 if (increment < 0 && !can_nice(current, nice))
3581 return -EPERM;
3582
3583 retval = security_task_setnice(current, nice);
3584 if (retval)
3585 return retval;
3586
3587 set_user_nice(current, nice);
3588 return 0;
3589}
3590
3591#endif
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601int task_prio(const struct task_struct *p)
3602{
3603 return p->prio - MAX_RT_PRIO;
3604}
3605
3606
3607
3608
3609
3610
3611
3612int idle_cpu(int cpu)
3613{
3614 struct rq *rq = cpu_rq(cpu);
3615
3616 if (rq->curr != rq->idle)
3617 return 0;
3618
3619 if (rq->nr_running)
3620 return 0;
3621
3622#ifdef CONFIG_SMP
3623 if (!llist_empty(&rq->wake_list))
3624 return 0;
3625#endif
3626
3627 return 1;
3628}
3629
3630
3631
3632
3633
3634
3635
3636struct task_struct *idle_task(int cpu)
3637{
3638 return cpu_rq(cpu)->idle;
3639}
3640
3641
3642
3643
3644
3645
3646
3647static struct task_struct *find_process_by_pid(pid_t pid)
3648{
3649 return pid ? find_task_by_vpid(pid) : current;
3650}
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660static void
3661__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3662{
3663 struct sched_dl_entity *dl_se = &p->dl;
3664
3665 dl_se->dl_runtime = attr->sched_runtime;
3666 dl_se->dl_deadline = attr->sched_deadline;
3667 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3668 dl_se->flags = attr->sched_flags;
3669 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690}
3691
3692
3693
3694
3695
3696#define SETPARAM_POLICY -1
3697
3698static void __setscheduler_params(struct task_struct *p,
3699 const struct sched_attr *attr)
3700{
3701 int policy = attr->sched_policy;
3702
3703 if (policy == SETPARAM_POLICY)
3704 policy = p->policy;
3705
3706 p->policy = policy;
3707
3708 if (dl_policy(policy))
3709 __setparam_dl(p, attr);
3710 else if (fair_policy(policy))
3711 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3712
3713
3714
3715
3716
3717
3718 p->rt_priority = attr->sched_priority;
3719 p->normal_prio = normal_prio(p);
3720 set_load_weight(p);
3721}
3722
3723
3724static void __setscheduler(struct rq *rq, struct task_struct *p,
3725 const struct sched_attr *attr, bool keep_boost)
3726{
3727 __setscheduler_params(p, attr);
3728
3729
3730
3731
3732
3733 if (keep_boost)
3734 p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3735 else
3736 p->prio = normal_prio(p);
3737
3738 if (dl_prio(p->prio))
3739 p->sched_class = &dl_sched_class;
3740 else if (rt_prio(p->prio))
3741 p->sched_class = &rt_sched_class;
3742 else
3743 p->sched_class = &fair_sched_class;
3744}
3745
3746static void
3747__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3748{
3749 struct sched_dl_entity *dl_se = &p->dl;
3750
3751 attr->sched_priority = p->rt_priority;
3752 attr->sched_runtime = dl_se->dl_runtime;
3753 attr->sched_deadline = dl_se->dl_deadline;
3754 attr->sched_period = dl_se->dl_period;
3755 attr->sched_flags = dl_se->flags;
3756}
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768static bool
3769__checkparam_dl(const struct sched_attr *attr)
3770{
3771
3772 if (attr->sched_deadline == 0)
3773 return false;
3774
3775
3776
3777
3778
3779 if (attr->sched_runtime < (1ULL << DL_SCALE))
3780 return false;
3781
3782
3783
3784
3785
3786 if (attr->sched_deadline & (1ULL << 63) ||
3787 attr->sched_period & (1ULL << 63))
3788 return false;
3789
3790
3791 if ((attr->sched_period != 0 &&
3792 attr->sched_period < attr->sched_deadline) ||
3793 attr->sched_deadline < attr->sched_runtime)
3794 return false;
3795
3796 return true;
3797}
3798
3799
3800
3801
3802static bool check_same_owner(struct task_struct *p)
3803{
3804 const struct cred *cred = current_cred(), *pcred;
3805 bool match;
3806
3807 rcu_read_lock();
3808 pcred = __task_cred(p);
3809 match = (uid_eq(cred->euid, pcred->euid) ||
3810 uid_eq(cred->euid, pcred->uid));
3811 rcu_read_unlock();
3812 return match;
3813}
3814
3815static bool dl_param_changed(struct task_struct *p,
3816 const struct sched_attr *attr)
3817{
3818 struct sched_dl_entity *dl_se = &p->dl;
3819
3820 if (dl_se->dl_runtime != attr->sched_runtime ||
3821 dl_se->dl_deadline != attr->sched_deadline ||
3822 dl_se->dl_period != attr->sched_period ||
3823 dl_se->flags != attr->sched_flags)
3824 return true;
3825
3826 return false;
3827}
3828
3829static int __sched_setscheduler(struct task_struct *p,
3830 const struct sched_attr *attr,
3831 bool user, bool pi)
3832{
3833 int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
3834 MAX_RT_PRIO - 1 - attr->sched_priority;
3835 int retval, oldprio, oldpolicy = -1, queued, running;
3836 int new_effective_prio, policy = attr->sched_policy;
3837 unsigned long flags;
3838 const struct sched_class *prev_class;
3839 struct rq *rq;
3840 int reset_on_fork;
3841 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;
3842
3843
3844 BUG_ON(in_interrupt());
3845recheck:
3846
3847 if (policy < 0) {
3848 reset_on_fork = p->sched_reset_on_fork;
3849 policy = oldpolicy = p->policy;
3850 } else {
3851 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
3852
3853 if (!valid_policy(policy))
3854 return -EINVAL;
3855 }
3856
3857 if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
3858 return -EINVAL;
3859
3860
3861
3862
3863
3864
3865 if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3866 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3867 return -EINVAL;
3868 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3869 (rt_policy(policy) != (attr->sched_priority != 0)))
3870 return -EINVAL;
3871
3872
3873
3874
3875 if (user && !capable(CAP_SYS_NICE)) {
3876 if (fair_policy(policy)) {
3877 if (attr->sched_nice < task_nice(p) &&
3878 !can_nice(p, attr->sched_nice))
3879 return -EPERM;
3880 }
3881
3882 if (rt_policy(policy)) {
3883 unsigned long rlim_rtprio =
3884 task_rlimit(p, RLIMIT_RTPRIO);
3885
3886
3887 if (policy != p->policy && !rlim_rtprio)
3888 return -EPERM;
3889
3890
3891 if (attr->sched_priority > p->rt_priority &&
3892 attr->sched_priority > rlim_rtprio)
3893 return -EPERM;
3894 }
3895
3896
3897
3898
3899
3900
3901
3902 if (dl_policy(policy))
3903 return -EPERM;
3904
3905
3906
3907
3908
3909 if (idle_policy(p->policy) && !idle_policy(policy)) {
3910 if (!can_nice(p, task_nice(p)))
3911 return -EPERM;
3912 }
3913
3914
3915 if (!check_same_owner(p))
3916 return -EPERM;
3917
3918
3919 if (p->sched_reset_on_fork && !reset_on_fork)
3920 return -EPERM;
3921 }
3922
3923 if (user) {
3924 retval = security_task_setscheduler(p);
3925 if (retval)
3926 return retval;
3927 }
3928
3929
3930
3931
3932
3933
3934
3935
3936 rq = task_rq_lock(p, &flags);
3937
3938
3939
3940
3941 if (p == rq->stop) {
3942 task_rq_unlock(rq, p, &flags);
3943 return -EINVAL;
3944 }
3945
3946
3947
3948
3949
3950 if (unlikely(policy == p->policy)) {
3951 if (fair_policy(policy) && attr->sched_nice != task_nice(p))
3952 goto change;
3953 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3954 goto change;
3955 if (dl_policy(policy) && dl_param_changed(p, attr))
3956 goto change;
3957
3958 p->sched_reset_on_fork = reset_on_fork;
3959 task_rq_unlock(rq, p, &flags);
3960 return 0;
3961 }
3962change:
3963
3964 if (user) {
3965#ifdef CONFIG_RT_GROUP_SCHED
3966
3967
3968
3969
3970 if (rt_bandwidth_enabled() && rt_policy(policy) &&
3971 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
3972 !task_group_is_autogroup(task_group(p))) {
3973 task_rq_unlock(rq, p, &flags);
3974 return -EPERM;
3975 }
3976#endif
3977#ifdef CONFIG_SMP
3978 if (dl_bandwidth_enabled() && dl_policy(policy)) {
3979 cpumask_t *span = rq->rd->span;
3980
3981
3982
3983
3984
3985
3986 if (!cpumask_subset(span, &p->cpus_allowed) ||
3987 rq->rd->dl_bw.bw == 0) {
3988 task_rq_unlock(rq, p, &flags);
3989 return -EPERM;
3990 }
3991 }
3992#endif
3993 }
3994
3995
3996 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3997 policy = oldpolicy = -1;
3998 task_rq_unlock(rq, p, &flags);
3999 goto recheck;
4000 }
4001
4002
4003
4004
4005
4006
4007 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
4008 task_rq_unlock(rq, p, &flags);
4009 return -EBUSY;
4010 }
4011
4012 p->sched_reset_on_fork = reset_on_fork;
4013 oldprio = p->prio;
4014
4015 if (pi) {
4016
4017
4018
4019
4020
4021
4022
4023 new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
4024 if (new_effective_prio == oldprio)
4025 queue_flags &= ~DEQUEUE_MOVE;
4026 }
4027
4028 queued = task_on_rq_queued(p);
4029 running = task_current(rq, p);
4030 if (queued)
4031 dequeue_task(rq, p, queue_flags);
4032 if (running)
4033 put_prev_task(rq, p);
4034
4035 prev_class = p->sched_class;
4036 __setscheduler(rq, p, attr, pi);
4037
4038 if (running)
4039 p->sched_class->set_curr_task(rq);
4040 if (queued) {
4041
4042
4043
4044
4045 if (oldprio < p->prio)
4046 queue_flags |= ENQUEUE_HEAD;
4047
4048 enqueue_task(rq, p, queue_flags);
4049 }
4050
4051 check_class_changed(rq, p, prev_class, oldprio);
4052 preempt_disable();
4053 task_rq_unlock(rq, p, &flags);
4054
4055 if (pi)
4056 rt_mutex_adjust_pi(p);
4057
4058
4059
4060
4061 balance_callback(rq);
4062 preempt_enable();
4063
4064 return 0;
4065}
4066
4067static int _sched_setscheduler(struct task_struct *p, int policy,
4068 const struct sched_param *param, bool check)
4069{
4070 struct sched_attr attr = {
4071 .sched_policy = policy,
4072 .sched_priority = param->sched_priority,
4073 .sched_nice = PRIO_TO_NICE(p->static_prio),
4074 };
4075
4076
4077 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
4078 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4079 policy &= ~SCHED_RESET_ON_FORK;
4080 attr.sched_policy = policy;
4081 }
4082
4083 return __sched_setscheduler(p, &attr, check, true);
4084}
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095int sched_setscheduler(struct task_struct *p, int policy,
4096 const struct sched_param *param)
4097{
4098 return _sched_setscheduler(p, policy, param, true);
4099}
4100EXPORT_SYMBOL_GPL(sched_setscheduler);
4101
4102int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
4103{
4104 return __sched_setscheduler(p, attr, true, true);
4105}
4106EXPORT_SYMBOL_GPL(sched_setattr);
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4122 const struct sched_param *param)
4123{
4124 return _sched_setscheduler(p, policy, param, false);
4125}
4126EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
4127
4128static int
4129do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4130{
4131 struct sched_param lparam;
4132 struct task_struct *p;
4133 int retval;
4134
4135 if (!param || pid < 0)
4136 return -EINVAL;
4137 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4138 return -EFAULT;
4139
4140 rcu_read_lock();
4141 retval = -ESRCH;
4142 p = find_process_by_pid(pid);
4143 if (p != NULL)
4144 retval = sched_setscheduler(p, policy, &lparam);
4145 rcu_read_unlock();
4146
4147 return retval;
4148}
4149
4150
4151
4152
4153static int sched_copy_attr(struct sched_attr __user *uattr,
4154 struct sched_attr *attr)
4155{
4156 u32 size;
4157 int ret;
4158
4159 if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
4160 return -EFAULT;
4161
4162
4163
4164
4165 memset(attr, 0, sizeof(*attr));
4166
4167 ret = get_user(size, &uattr->size);
4168 if (ret)
4169 return ret;
4170
4171 if (size > PAGE_SIZE)
4172 goto err_size;
4173
4174 if (!size)
4175 size = SCHED_ATTR_SIZE_VER0;
4176
4177 if (size < SCHED_ATTR_SIZE_VER0)
4178 goto err_size;
4179
4180
4181
4182
4183
4184
4185
4186 if (size > sizeof(*attr)) {
4187 unsigned char __user *addr;
4188 unsigned char __user *end;
4189 unsigned char val;
4190
4191 addr = (void __user *)uattr + sizeof(*attr);
4192 end = (void __user *)uattr + size;
4193
4194 for (; addr < end; addr++) {
4195 ret = get_user(val, addr);
4196 if (ret)
4197 return ret;
4198 if (val)
4199 goto err_size;
4200 }
4201 size = sizeof(*attr);
4202 }
4203
4204 ret = copy_from_user(attr, uattr, size);
4205 if (ret)
4206 return -EFAULT;
4207
4208
4209
4210
4211
4212 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
4213
4214 return 0;
4215
4216err_size:
4217 put_user(sizeof(*attr), &uattr->size);
4218 return -E2BIG;
4219}
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4230 struct sched_param __user *, param)
4231{
4232
4233 if (policy < 0)
4234 return -EINVAL;
4235
4236 return do_sched_setscheduler(pid, policy, param);
4237}
4238
4239
4240
4241
4242
4243
4244
4245
4246SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4247{
4248 return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
4249}
4250
4251
4252
4253
4254
4255
4256
4257SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
4258 unsigned int, flags)
4259{
4260 struct sched_attr attr;
4261 struct task_struct *p;
4262 int retval;
4263
4264 if (!uattr || pid < 0 || flags)
4265 return -EINVAL;
4266
4267 retval = sched_copy_attr(uattr, &attr);
4268 if (retval)
4269 return retval;
4270
4271 if ((int)attr.sched_policy < 0)
4272 return -EINVAL;
4273
4274 rcu_read_lock();
4275 retval = -ESRCH;
4276 p = find_process_by_pid(pid);
4277 if (p != NULL)
4278 retval = sched_setattr(p, &attr);
4279 rcu_read_unlock();
4280
4281 return retval;
4282}
4283
4284
4285
4286
4287
4288
4289
4290
4291SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4292{
4293 struct task_struct *p;
4294 int retval;
4295
4296 if (pid < 0)
4297 return -EINVAL;
4298
4299 retval = -ESRCH;
4300 rcu_read_lock();
4301 p = find_process_by_pid(pid);
4302 if (p) {
4303 retval = security_task_getscheduler(p);
4304 if (!retval)
4305 retval = p->policy
4306 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4307 }
4308 rcu_read_unlock();
4309 return retval;
4310}
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4321{
4322 struct sched_param lp = { .sched_priority = 0 };
4323 struct task_struct *p;
4324 int retval;
4325
4326 if (!param || pid < 0)
4327 return -EINVAL;
4328
4329 rcu_read_lock();
4330 p = find_process_by_pid(pid);
4331 retval = -ESRCH;
4332 if (!p)
4333 goto out_unlock;
4334
4335 retval = security_task_getscheduler(p);
4336 if (retval)
4337 goto out_unlock;
4338
4339 if (task_has_rt_policy(p))
4340 lp.sched_priority = p->rt_priority;
4341 rcu_read_unlock();
4342
4343
4344
4345
4346 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4347
4348 return retval;
4349
4350out_unlock:
4351 rcu_read_unlock();
4352 return retval;
4353}
4354
4355static int sched_read_attr(struct sched_attr __user *uattr,
4356 struct sched_attr *attr,
4357 unsigned int usize)
4358{
4359 int ret;
4360
4361 if (!access_ok(VERIFY_WRITE, uattr, usize))
4362 return -EFAULT;
4363
4364
4365
4366
4367
4368
4369 if (usize < sizeof(*attr)) {
4370 unsigned char *addr;
4371 unsigned char *end;
4372
4373 addr = (void *)attr + usize;
4374 end = (void *)attr + sizeof(*attr);
4375
4376 for (; addr < end; addr++) {
4377 if (*addr)
4378 return -EFBIG;
4379 }
4380
4381 attr->size = usize;
4382 }
4383
4384 ret = copy_to_user(uattr, attr, attr->size);
4385 if (ret)
4386 return -EFAULT;
4387
4388 return 0;
4389}
4390
4391
4392
4393
4394
4395
4396
4397
4398SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
4399 unsigned int, size, unsigned int, flags)
4400{
4401 struct sched_attr attr = {
4402 .size = sizeof(struct sched_attr),
4403 };
4404 struct task_struct *p;
4405 int retval;
4406
4407 if (!uattr || pid < 0 || size > PAGE_SIZE ||
4408 size < SCHED_ATTR_SIZE_VER0 || flags)
4409 return -EINVAL;
4410
4411 rcu_read_lock();
4412 p = find_process_by_pid(pid);
4413 retval = -ESRCH;
4414 if (!p)
4415 goto out_unlock;
4416
4417 retval = security_task_getscheduler(p);
4418 if (retval)
4419 goto out_unlock;
4420
4421 attr.sched_policy = p->policy;
4422 if (p->sched_reset_on_fork)
4423 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
4424 if (task_has_dl_policy(p))
4425 __getparam_dl(p, &attr);
4426 else if (task_has_rt_policy(p))
4427 attr.sched_priority = p->rt_priority;
4428 else
4429 attr.sched_nice = task_nice(p);
4430
4431 rcu_read_unlock();
4432
4433 retval = sched_read_attr(uattr, &attr, size);
4434 return retval;
4435
4436out_unlock:
4437 rcu_read_unlock();
4438 return retval;
4439}
4440
4441long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4442{
4443 cpumask_var_t cpus_allowed, new_mask;
4444 struct task_struct *p;
4445 int retval;
4446
4447 rcu_read_lock();
4448
4449 p = find_process_by_pid(pid);
4450 if (!p) {
4451 rcu_read_unlock();
4452 return -ESRCH;
4453 }
4454
4455
4456 get_task_struct(p);
4457 rcu_read_unlock();
4458
4459 if (p->flags & PF_NO_SETAFFINITY) {
4460 retval = -EINVAL;
4461 goto out_put_task;
4462 }
4463 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4464 retval = -ENOMEM;
4465 goto out_put_task;
4466 }
4467 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4468 retval = -ENOMEM;
4469 goto out_free_cpus_allowed;
4470 }
4471 retval = -EPERM;
4472 if (!check_same_owner(p)) {
4473 rcu_read_lock();
4474 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
4475 rcu_read_unlock();
4476 goto out_free_new_mask;
4477 }
4478 rcu_read_unlock();
4479 }
4480
4481 retval = security_task_setscheduler(p);
4482 if (retval)
4483 goto out_free_new_mask;
4484
4485
4486 cpuset_cpus_allowed(p, cpus_allowed);
4487 cpumask_and(new_mask, in_mask, cpus_allowed);
4488
4489
4490
4491
4492
4493
4494
4495#ifdef CONFIG_SMP
4496 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
4497 rcu_read_lock();
4498 if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
4499 retval = -EBUSY;
4500 rcu_read_unlock();
4501 goto out_free_new_mask;
4502 }
4503 rcu_read_unlock();
4504 }
4505#endif
4506again:
4507 retval = __set_cpus_allowed_ptr(p, new_mask, true);
4508
4509 if (!retval) {
4510 cpuset_cpus_allowed(p, cpus_allowed);
4511 if (!cpumask_subset(new_mask, cpus_allowed)) {
4512
4513
4514
4515
4516
4517 cpumask_copy(new_mask, cpus_allowed);
4518 goto again;
4519 }
4520 }
4521out_free_new_mask:
4522 free_cpumask_var(new_mask);
4523out_free_cpus_allowed:
4524 free_cpumask_var(cpus_allowed);
4525out_put_task:
4526 put_task_struct(p);
4527 return retval;
4528}
4529
4530static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4531 struct cpumask *new_mask)
4532{
4533 if (len < cpumask_size())
4534 cpumask_clear(new_mask);
4535 else if (len > cpumask_size())
4536 len = cpumask_size();
4537
4538 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4539}
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4550 unsigned long __user *, user_mask_ptr)
4551{
4552 cpumask_var_t new_mask;
4553 int retval;
4554
4555 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4556 return -ENOMEM;
4557
4558 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4559 if (retval == 0)
4560 retval = sched_setaffinity(pid, new_mask);
4561 free_cpumask_var(new_mask);
4562 return retval;
4563}
4564
4565long sched_getaffinity(pid_t pid, struct cpumask *mask)
4566{
4567 struct task_struct *p;
4568 unsigned long flags;
4569 int retval;
4570
4571 rcu_read_lock();
4572
4573 retval = -ESRCH;
4574 p = find_process_by_pid(pid);
4575 if (!p)
4576 goto out_unlock;
4577
4578 retval = security_task_getscheduler(p);
4579 if (retval)
4580 goto out_unlock;
4581
4582 raw_spin_lock_irqsave(&p->pi_lock, flags);
4583 cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
4584 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4585
4586out_unlock:
4587 rcu_read_unlock();
4588
4589 return retval;
4590}
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4601 unsigned long __user *, user_mask_ptr)
4602{
4603 int ret;
4604 cpumask_var_t mask;
4605
4606 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4607 return -EINVAL;
4608 if (len & (sizeof(unsigned long)-1))
4609 return -EINVAL;
4610
4611 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4612 return -ENOMEM;
4613
4614 ret = sched_getaffinity(pid, mask);
4615 if (ret == 0) {
4616 size_t retlen = min_t(size_t, len, cpumask_size());
4617
4618 if (copy_to_user(user_mask_ptr, mask, retlen))
4619 ret = -EFAULT;
4620 else
4621 ret = retlen;
4622 }
4623 free_cpumask_var(mask);
4624
4625 return ret;
4626}
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636SYSCALL_DEFINE0(sched_yield)
4637{
4638 struct rq *rq = this_rq_lock();
4639
4640 schedstat_inc(rq, yld_count);
4641 current->sched_class->yield_task(rq);
4642
4643
4644
4645
4646
4647 __release(rq->lock);
4648 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4649 do_raw_spin_unlock(&rq->lock);
4650 sched_preempt_enable_no_resched();
4651
4652 schedule();
4653
4654 return 0;
4655}
4656
4657int __sched _cond_resched(void)
4658{
4659 if (should_resched(0)) {
4660 preempt_schedule_common();
4661 return 1;
4662 }
4663 return 0;
4664}
4665EXPORT_SYMBOL(_cond_resched);
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675int __cond_resched_lock(spinlock_t *lock)
4676{
4677 int resched = should_resched(PREEMPT_LOCK_OFFSET);
4678 int ret = 0;
4679
4680 lockdep_assert_held(lock);
4681
4682 if (spin_needbreak(lock) || resched) {
4683 spin_unlock(lock);
4684 if (resched)
4685 preempt_schedule_common();
4686 else
4687 cpu_relax();
4688 ret = 1;
4689 spin_lock(lock);
4690 }
4691 return ret;
4692}
4693EXPORT_SYMBOL(__cond_resched_lock);
4694
4695int __sched __cond_resched_softirq(void)
4696{
4697 BUG_ON(!in_softirq());
4698
4699 if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
4700 local_bh_enable();
4701 preempt_schedule_common();
4702 local_bh_disable();
4703 return 1;
4704 }
4705 return 0;
4706}
4707EXPORT_SYMBOL(__cond_resched_softirq);
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731void __sched yield(void)
4732{
4733 set_current_state(TASK_RUNNING);
4734 sys_sched_yield();
4735}
4736EXPORT_SYMBOL(yield);
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753int __sched yield_to(struct task_struct *p, bool preempt)
4754{
4755 struct task_struct *curr = current;
4756 struct rq *rq, *p_rq;
4757 unsigned long flags;
4758 int yielded = 0;
4759
4760 local_irq_save(flags);
4761 rq = this_rq();
4762
4763again:
4764 p_rq = task_rq(p);
4765
4766
4767
4768
4769 if (rq->nr_running == 1 && p_rq->nr_running == 1) {
4770 yielded = -ESRCH;
4771 goto out_irq;
4772 }
4773
4774 double_rq_lock(rq, p_rq);
4775 if (task_rq(p) != p_rq) {
4776 double_rq_unlock(rq, p_rq);
4777 goto again;
4778 }
4779
4780 if (!curr->sched_class->yield_to_task)
4781 goto out_unlock;
4782
4783 if (curr->sched_class != p->sched_class)
4784 goto out_unlock;
4785
4786 if (task_running(p_rq, p) || p->state)
4787 goto out_unlock;
4788
4789 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4790 if (yielded) {
4791 schedstat_inc(rq, yld_count);
4792
4793
4794
4795
4796 if (preempt && rq != p_rq)
4797 resched_curr(p_rq);
4798 }
4799
4800out_unlock:
4801 double_rq_unlock(rq, p_rq);
4802out_irq:
4803 local_irq_restore(flags);
4804
4805 if (yielded > 0)
4806 schedule();
4807
4808 return yielded;
4809}
4810EXPORT_SYMBOL_GPL(yield_to);
4811
4812
4813
4814
4815
4816long __sched io_schedule_timeout(long timeout)
4817{
4818 int old_iowait = current->in_iowait;
4819 struct rq *rq;
4820 long ret;
4821
4822 current->in_iowait = 1;
4823 blk_schedule_flush_plug(current);
4824
4825 delayacct_blkio_start();
4826 rq = raw_rq();
4827 atomic_inc(&rq->nr_iowait);
4828 ret = schedule_timeout(timeout);
4829 current->in_iowait = old_iowait;
4830 atomic_dec(&rq->nr_iowait);
4831 delayacct_blkio_end();
4832
4833 return ret;
4834}
4835EXPORT_SYMBOL(io_schedule_timeout);
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4846{
4847 int ret = -EINVAL;
4848
4849 switch (policy) {
4850 case SCHED_FIFO:
4851 case SCHED_RR:
4852 ret = MAX_USER_RT_PRIO-1;
4853 break;
4854 case SCHED_DEADLINE:
4855 case SCHED_NORMAL:
4856 case SCHED_BATCH:
4857 case SCHED_IDLE:
4858 ret = 0;
4859 break;
4860 }
4861 return ret;
4862}
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4873{
4874 int ret = -EINVAL;
4875
4876 switch (policy) {
4877 case SCHED_FIFO:
4878 case SCHED_RR:
4879 ret = 1;
4880 break;
4881 case SCHED_DEADLINE:
4882 case SCHED_NORMAL:
4883 case SCHED_BATCH:
4884 case SCHED_IDLE:
4885 ret = 0;
4886 }
4887 return ret;
4888}
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4902 struct timespec __user *, interval)
4903{
4904 struct task_struct *p;
4905 unsigned int time_slice;
4906 unsigned long flags;
4907 struct rq *rq;
4908 int retval;
4909 struct timespec t;
4910
4911 if (pid < 0)
4912 return -EINVAL;
4913
4914 retval = -ESRCH;
4915 rcu_read_lock();
4916 p = find_process_by_pid(pid);
4917 if (!p)
4918 goto out_unlock;
4919
4920 retval = security_task_getscheduler(p);
4921 if (retval)
4922 goto out_unlock;
4923
4924 rq = task_rq_lock(p, &flags);
4925 time_slice = 0;
4926 if (p->sched_class->get_rr_interval)
4927 time_slice = p->sched_class->get_rr_interval(rq, p);
4928 task_rq_unlock(rq, p, &flags);
4929
4930 rcu_read_unlock();
4931 jiffies_to_timespec(time_slice, &t);
4932 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4933 return retval;
4934
4935out_unlock:
4936 rcu_read_unlock();
4937 return retval;
4938}
4939
4940static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4941
4942void sched_show_task(struct task_struct *p)
4943{
4944 unsigned long free = 0;
4945 int ppid;
4946 unsigned long state = p->state;
4947
4948 if (state)
4949 state = __ffs(state) + 1;
4950 printk(KERN_INFO "%-15.15s %c", p->comm,
4951 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4952#if BITS_PER_LONG == 32
4953 if (state == TASK_RUNNING)
4954 printk(KERN_CONT " running ");
4955 else
4956 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4957#else
4958 if (state == TASK_RUNNING)
4959 printk(KERN_CONT " running task ");
4960 else
4961 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4962#endif
4963#ifdef CONFIG_DEBUG_STACK_USAGE
4964 free = stack_not_used(p);
4965#endif
4966 ppid = 0;
4967 rcu_read_lock();
4968 if (pid_alive(p))
4969 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4970 rcu_read_unlock();
4971 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4972 task_pid_nr(p), ppid,
4973 (unsigned long)task_thread_info(p)->flags);
4974
4975 print_worker_info(KERN_INFO, p);
4976 show_stack(p, NULL);
4977}
4978
4979void show_state_filter(unsigned long state_filter)
4980{
4981 struct task_struct *g, *p;
4982
4983#if BITS_PER_LONG == 32
4984 printk(KERN_INFO
4985 " task PC stack pid father\n");
4986#else
4987 printk(KERN_INFO
4988 " task PC stack pid father\n");
4989#endif
4990 rcu_read_lock();
4991 for_each_process_thread(g, p) {
4992
4993
4994
4995
4996 touch_nmi_watchdog();
4997 if (!state_filter || (p->state & state_filter))
4998 sched_show_task(p);
4999 }
5000
5001 touch_all_softlockup_watchdogs();
5002
5003#ifdef CONFIG_SCHED_DEBUG
5004 sysrq_sched_debug_show();
5005#endif
5006 rcu_read_unlock();
5007
5008
5009
5010 if (!state_filter)
5011 debug_show_all_locks();
5012}
5013
5014void init_idle_bootup_task(struct task_struct *idle)
5015{
5016 idle->sched_class = &idle_sched_class;
5017}
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027void init_idle(struct task_struct *idle, int cpu)
5028{
5029 struct rq *rq = cpu_rq(cpu);
5030 unsigned long flags;
5031
5032 raw_spin_lock_irqsave(&idle->pi_lock, flags);
5033 raw_spin_lock(&rq->lock);
5034
5035 __sched_fork(0, idle);
5036 idle->state = TASK_RUNNING;
5037 idle->se.exec_start = sched_clock();
5038
5039 kasan_unpoison_task_stack(idle);
5040
5041#ifdef CONFIG_SMP
5042
5043
5044
5045
5046
5047
5048 set_cpus_allowed_common(idle, cpumask_of(cpu));
5049#endif
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060 rcu_read_lock();
5061 __set_task_cpu(idle, cpu);
5062 rcu_read_unlock();
5063
5064 rq->curr = rq->idle = idle;
5065 idle->on_rq = TASK_ON_RQ_QUEUED;
5066#ifdef CONFIG_SMP
5067 idle->on_cpu = 1;
5068#endif
5069 raw_spin_unlock(&rq->lock);
5070 raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
5071
5072
5073 init_idle_preempt_count(idle, cpu);
5074
5075
5076
5077
5078 idle->sched_class = &idle_sched_class;
5079 ftrace_graph_init_idle_task(idle, cpu);
5080 vtime_init_idle(idle, cpu);
5081#ifdef CONFIG_SMP
5082 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
5083#endif
5084}
5085
5086int cpuset_cpumask_can_shrink(const struct cpumask *cur,
5087 const struct cpumask *trial)
5088{
5089 int ret = 1, trial_cpus;
5090 struct dl_bw *cur_dl_b;
5091 unsigned long flags;
5092
5093 if (!cpumask_weight(cur))
5094 return ret;
5095
5096 rcu_read_lock_sched();
5097 cur_dl_b = dl_bw_of(cpumask_any(cur));
5098 trial_cpus = cpumask_weight(trial);
5099
5100 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
5101 if (cur_dl_b->bw != -1 &&
5102 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
5103 ret = 0;
5104 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
5105 rcu_read_unlock_sched();
5106
5107 return ret;
5108}
5109
5110int task_can_attach(struct task_struct *p,
5111 const struct cpumask *cs_cpus_allowed)
5112{
5113 int ret = 0;
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124 if (p->flags & PF_NO_SETAFFINITY) {
5125 ret = -EINVAL;
5126 goto out;
5127 }
5128
5129#ifdef CONFIG_SMP
5130 if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
5131 cs_cpus_allowed)) {
5132 unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
5133 cs_cpus_allowed);
5134 struct dl_bw *dl_b;
5135 bool overflow;
5136 int cpus;
5137 unsigned long flags;
5138
5139 rcu_read_lock_sched();
5140 dl_b = dl_bw_of(dest_cpu);
5141 raw_spin_lock_irqsave(&dl_b->lock, flags);
5142 cpus = dl_bw_cpus(dest_cpu);
5143 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
5144 if (overflow)
5145 ret = -EBUSY;
5146 else {
5147
5148
5149
5150
5151
5152
5153 __dl_add(dl_b, p->dl.dl_bw);
5154 }
5155 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5156 rcu_read_unlock_sched();
5157
5158 }
5159#endif
5160out:
5161 return ret;
5162}
5163
5164#ifdef CONFIG_SMP
5165
5166#ifdef CONFIG_NUMA_BALANCING
5167
5168int migrate_task_to(struct task_struct *p, int target_cpu)
5169{
5170 struct migration_arg arg = { p, target_cpu };
5171 int curr_cpu = task_cpu(p);
5172
5173 if (curr_cpu == target_cpu)
5174 return 0;
5175
5176 if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
5177 return -EINVAL;
5178
5179
5180
5181 trace_sched_move_numa(p, curr_cpu, target_cpu);
5182 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
5183}
5184
5185
5186
5187
5188
5189void sched_setnuma(struct task_struct *p, int nid)
5190{
5191 struct rq *rq;
5192 unsigned long flags;
5193 bool queued, running;
5194
5195 rq = task_rq_lock(p, &flags);
5196 queued = task_on_rq_queued(p);
5197 running = task_current(rq, p);
5198
5199 if (queued)
5200 dequeue_task(rq, p, DEQUEUE_SAVE);
5201 if (running)
5202 put_prev_task(rq, p);
5203
5204 p->numa_preferred_nid = nid;
5205
5206 if (running)
5207 p->sched_class->set_curr_task(rq);
5208 if (queued)
5209 enqueue_task(rq, p, ENQUEUE_RESTORE);
5210 task_rq_unlock(rq, p, &flags);
5211}
5212#endif
5213
5214#ifdef CONFIG_HOTPLUG_CPU
5215
5216
5217
5218
5219void idle_task_exit(void)
5220{
5221 struct mm_struct *mm = current->active_mm;
5222
5223 BUG_ON(cpu_online(smp_processor_id()));
5224
5225 if (mm != &init_mm) {
5226 switch_mm(mm, &init_mm, current);
5227 finish_arch_post_lock_switch();
5228 }
5229 mmdrop(mm);
5230}
5231
5232
5233
5234
5235
5236
5237
5238
5239static void calc_load_migrate(struct rq *rq)
5240{
5241 long delta = calc_load_fold_active(rq);
5242 if (delta)
5243 atomic_long_add(delta, &calc_load_tasks);
5244}
5245
5246static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
5247{
5248}
5249
5250static const struct sched_class fake_sched_class = {
5251 .put_prev_task = put_prev_task_fake,
5252};
5253
5254static struct task_struct fake_task = {
5255
5256
5257
5258 .prio = MAX_PRIO + 1,
5259 .sched_class = &fake_sched_class,
5260};
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270static void migrate_tasks(struct rq *dead_rq)
5271{
5272 struct rq *rq = dead_rq;
5273 struct task_struct *next, *stop = rq->stop;
5274 int dest_cpu;
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285 rq->stop = NULL;
5286
5287
5288
5289
5290
5291
5292 update_rq_clock(rq);
5293
5294 for (;;) {
5295
5296
5297
5298
5299 if (rq->nr_running == 1)
5300 break;
5301
5302
5303
5304
5305 lockdep_pin_lock(&rq->lock);
5306 next = pick_next_task(rq, &fake_task);
5307 BUG_ON(!next);
5308 next->sched_class->put_prev_task(rq, next);
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319 lockdep_unpin_lock(&rq->lock);
5320 raw_spin_unlock(&rq->lock);
5321 raw_spin_lock(&next->pi_lock);
5322 raw_spin_lock(&rq->lock);
5323
5324
5325
5326
5327
5328
5329 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
5330 raw_spin_unlock(&next->pi_lock);
5331 continue;
5332 }
5333
5334
5335 dest_cpu = select_fallback_rq(dead_rq->cpu, next);
5336
5337 rq = __migrate_task(rq, next, dest_cpu);
5338 if (rq != dead_rq) {
5339 raw_spin_unlock(&rq->lock);
5340 rq = dead_rq;
5341 raw_spin_lock(&rq->lock);
5342 }
5343 raw_spin_unlock(&next->pi_lock);
5344 }
5345
5346 rq->stop = stop;
5347}
5348#endif
5349
5350static void set_rq_online(struct rq *rq)
5351{
5352 if (!rq->online) {
5353 const struct sched_class *class;
5354
5355 cpumask_set_cpu(rq->cpu, rq->rd->online);
5356 rq->online = 1;
5357
5358 for_each_class(class) {
5359 if (class->rq_online)
5360 class->rq_online(rq);
5361 }
5362 }
5363}
5364
5365static void set_rq_offline(struct rq *rq)
5366{
5367 if (rq->online) {
5368 const struct sched_class *class;
5369
5370 for_each_class(class) {
5371 if (class->rq_offline)
5372 class->rq_offline(rq);
5373 }
5374
5375 cpumask_clear_cpu(rq->cpu, rq->rd->online);
5376 rq->online = 0;
5377 }
5378}
5379
5380
5381
5382
5383
5384static int
5385migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5386{
5387 int cpu = (long)hcpu;
5388 unsigned long flags;
5389 struct rq *rq = cpu_rq(cpu);
5390
5391 switch (action & ~CPU_TASKS_FROZEN) {
5392
5393 case CPU_UP_PREPARE:
5394 rq->calc_load_update = calc_load_update;
5395 account_reset_rq(rq);
5396 break;
5397
5398 case CPU_ONLINE:
5399
5400 raw_spin_lock_irqsave(&rq->lock, flags);
5401 if (rq->rd) {
5402 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5403
5404 set_rq_online(rq);
5405 }
5406 raw_spin_unlock_irqrestore(&rq->lock, flags);
5407 break;
5408
5409#ifdef CONFIG_HOTPLUG_CPU
5410 case CPU_DYING:
5411 sched_ttwu_pending();
5412
5413 raw_spin_lock_irqsave(&rq->lock, flags);
5414 if (rq->rd) {
5415 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5416 set_rq_offline(rq);
5417 }
5418 migrate_tasks(rq);
5419 BUG_ON(rq->nr_running != 1);
5420 raw_spin_unlock_irqrestore(&rq->lock, flags);
5421 break;
5422
5423 case CPU_DEAD:
5424 calc_load_migrate(rq);
5425 break;
5426#endif
5427 }
5428
5429 update_max_interval();
5430
5431 return NOTIFY_OK;
5432}
5433
5434
5435
5436
5437
5438
5439static struct notifier_block migration_notifier = {
5440 .notifier_call = migration_call,
5441 .priority = CPU_PRI_MIGRATION,
5442};
5443
5444static void set_cpu_rq_start_time(void)
5445{
5446 int cpu = smp_processor_id();
5447 struct rq *rq = cpu_rq(cpu);
5448 rq->age_stamp = sched_clock_cpu(cpu);
5449}
5450
5451static int sched_cpu_active(struct notifier_block *nfb,
5452 unsigned long action, void *hcpu)
5453{
5454 int cpu = (long)hcpu;
5455
5456 switch (action & ~CPU_TASKS_FROZEN) {
5457 case CPU_STARTING:
5458 set_cpu_rq_start_time();
5459 return NOTIFY_OK;
5460
5461 case CPU_DOWN_FAILED:
5462 set_cpu_active(cpu, true);
5463 return NOTIFY_OK;
5464
5465 default:
5466 return NOTIFY_DONE;
5467 }
5468}
5469
5470static int sched_cpu_inactive(struct notifier_block *nfb,
5471 unsigned long action, void *hcpu)
5472{
5473 switch (action & ~CPU_TASKS_FROZEN) {
5474 case CPU_DOWN_PREPARE:
5475 set_cpu_active((long)hcpu, false);
5476 return NOTIFY_OK;
5477 default:
5478 return NOTIFY_DONE;
5479 }
5480}
5481
5482static int __init migration_init(void)
5483{
5484 void *cpu = (void *)(long)smp_processor_id();
5485 int err;
5486
5487
5488 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5489 BUG_ON(err == NOTIFY_BAD);
5490 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5491 register_cpu_notifier(&migration_notifier);
5492
5493
5494 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5495 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5496
5497 return 0;
5498}
5499early_initcall(migration_init);
5500
5501static cpumask_var_t sched_domains_tmpmask;
5502
5503#ifdef CONFIG_SCHED_DEBUG
5504
5505static __read_mostly int sched_debug_enabled;
5506
5507static int __init sched_debug_setup(char *str)
5508{
5509 sched_debug_enabled = 1;
5510
5511 return 0;
5512}
5513early_param("sched_debug", sched_debug_setup);
5514
5515static inline bool sched_debug(void)
5516{
5517 return sched_debug_enabled;
5518}
5519
5520static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5521 struct cpumask *groupmask)
5522{
5523 struct sched_group *group = sd->groups;
5524
5525 cpumask_clear(groupmask);
5526
5527 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5528
5529 if (!(sd->flags & SD_LOAD_BALANCE)) {
5530 printk("does not load-balance\n");
5531 if (sd->parent)
5532 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5533 " has parent");
5534 return -1;
5535 }
5536
5537 printk(KERN_CONT "span %*pbl level %s\n",
5538 cpumask_pr_args(sched_domain_span(sd)), sd->name);
5539
5540 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5541 printk(KERN_ERR "ERROR: domain->span does not contain "
5542 "CPU%d\n", cpu);
5543 }
5544 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5545 printk(KERN_ERR "ERROR: domain->groups does not contain"
5546 " CPU%d\n", cpu);
5547 }
5548
5549 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5550 do {
5551 if (!group) {
5552 printk("\n");
5553 printk(KERN_ERR "ERROR: group is NULL\n");
5554 break;
5555 }
5556
5557 if (!cpumask_weight(sched_group_cpus(group))) {
5558 printk(KERN_CONT "\n");
5559 printk(KERN_ERR "ERROR: empty group\n");
5560 break;
5561 }
5562
5563 if (!(sd->flags & SD_OVERLAP) &&
5564 cpumask_intersects(groupmask, sched_group_cpus(group))) {
5565 printk(KERN_CONT "\n");
5566 printk(KERN_ERR "ERROR: repeated CPUs\n");
5567 break;
5568 }
5569
5570 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5571
5572 printk(KERN_CONT " %*pbl",
5573 cpumask_pr_args(sched_group_cpus(group)));
5574 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5575 printk(KERN_CONT " (cpu_capacity = %d)",
5576 group->sgc->capacity);
5577 }
5578
5579 group = group->next;
5580 } while (group != sd->groups);
5581 printk(KERN_CONT "\n");
5582
5583 if (!cpumask_equal(sched_domain_span(sd), groupmask))
5584 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5585
5586 if (sd->parent &&
5587 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5588 printk(KERN_ERR "ERROR: parent span is not a superset "
5589 "of domain->span\n");
5590 return 0;
5591}
5592
5593static void sched_domain_debug(struct sched_domain *sd, int cpu)
5594{
5595 int level = 0;
5596
5597 if (!sched_debug_enabled)
5598 return;
5599
5600 if (!sd) {
5601 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5602 return;
5603 }
5604
5605 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5606
5607 for (;;) {
5608 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5609 break;
5610 level++;
5611 sd = sd->parent;
5612 if (!sd)
5613 break;
5614 }
5615}
5616#else
5617# define sched_domain_debug(sd, cpu) do { } while (0)
5618static inline bool sched_debug(void)
5619{
5620 return false;
5621}
5622#endif
5623
5624static int sd_degenerate(struct sched_domain *sd)
5625{
5626 if (cpumask_weight(sched_domain_span(sd)) == 1)
5627 return 1;
5628
5629
5630 if (sd->flags & (SD_LOAD_BALANCE |
5631 SD_BALANCE_NEWIDLE |
5632 SD_BALANCE_FORK |
5633 SD_BALANCE_EXEC |
5634 SD_SHARE_CPUCAPACITY |
5635 SD_SHARE_PKG_RESOURCES |
5636 SD_SHARE_POWERDOMAIN)) {
5637 if (sd->groups != sd->groups->next)
5638 return 0;
5639 }
5640
5641
5642 if (sd->flags & (SD_WAKE_AFFINE))
5643 return 0;
5644
5645 return 1;
5646}
5647
5648static int
5649sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5650{
5651 unsigned long cflags = sd->flags, pflags = parent->flags;
5652
5653 if (sd_degenerate(parent))
5654 return 1;
5655
5656 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5657 return 0;
5658
5659
5660 if (parent->groups == parent->groups->next) {
5661 pflags &= ~(SD_LOAD_BALANCE |
5662 SD_BALANCE_NEWIDLE |
5663 SD_BALANCE_FORK |
5664 SD_BALANCE_EXEC |
5665 SD_SHARE_CPUCAPACITY |
5666 SD_SHARE_PKG_RESOURCES |
5667 SD_PREFER_SIBLING |
5668 SD_SHARE_POWERDOMAIN);
5669 if (nr_node_ids == 1)
5670 pflags &= ~SD_SERIALIZE;
5671 }
5672 if (~cflags & pflags)
5673 return 0;
5674
5675 return 1;
5676}
5677
5678static void free_rootdomain(struct rcu_head *rcu)
5679{
5680 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5681
5682 cpupri_cleanup(&rd->cpupri);
5683 cpudl_cleanup(&rd->cpudl);
5684 free_cpumask_var(rd->dlo_mask);
5685 free_cpumask_var(rd->rto_mask);
5686 free_cpumask_var(rd->online);
5687 free_cpumask_var(rd->span);
5688 kfree(rd);
5689}
5690
5691static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5692{
5693 struct root_domain *old_rd = NULL;
5694 unsigned long flags;
5695
5696 raw_spin_lock_irqsave(&rq->lock, flags);
5697
5698 if (rq->rd) {
5699 old_rd = rq->rd;
5700
5701 if (cpumask_test_cpu(rq->cpu, old_rd->online))
5702 set_rq_offline(rq);
5703
5704 cpumask_clear_cpu(rq->cpu, old_rd->span);
5705
5706
5707
5708
5709
5710
5711 if (!atomic_dec_and_test(&old_rd->refcount))
5712 old_rd = NULL;
5713 }
5714
5715 atomic_inc(&rd->refcount);
5716 rq->rd = rd;
5717
5718 cpumask_set_cpu(rq->cpu, rd->span);
5719 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5720 set_rq_online(rq);
5721
5722 raw_spin_unlock_irqrestore(&rq->lock, flags);
5723
5724 if (old_rd)
5725 call_rcu_sched(&old_rd->rcu, free_rootdomain);
5726}
5727
5728static int init_rootdomain(struct root_domain *rd)
5729{
5730 memset(rd, 0, sizeof(*rd));
5731
5732 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5733 goto out;
5734 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5735 goto free_span;
5736 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5737 goto free_online;
5738 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5739 goto free_dlo_mask;
5740
5741 init_dl_bw(&rd->dl_bw);
5742 if (cpudl_init(&rd->cpudl) != 0)
5743 goto free_dlo_mask;
5744
5745 if (cpupri_init(&rd->cpupri) != 0)
5746 goto free_rto_mask;
5747 return 0;
5748
5749free_rto_mask:
5750 free_cpumask_var(rd->rto_mask);
5751free_dlo_mask:
5752 free_cpumask_var(rd->dlo_mask);
5753free_online:
5754 free_cpumask_var(rd->online);
5755free_span:
5756 free_cpumask_var(rd->span);
5757out:
5758 return -ENOMEM;
5759}
5760
5761
5762
5763
5764
5765struct root_domain def_root_domain;
5766
5767static void init_defrootdomain(void)
5768{
5769 init_rootdomain(&def_root_domain);
5770
5771 atomic_set(&def_root_domain.refcount, 1);
5772}
5773
5774static struct root_domain *alloc_rootdomain(void)
5775{
5776 struct root_domain *rd;
5777
5778 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5779 if (!rd)
5780 return NULL;
5781
5782 if (init_rootdomain(rd) != 0) {
5783 kfree(rd);
5784 return NULL;
5785 }
5786
5787 return rd;
5788}
5789
5790static void free_sched_groups(struct sched_group *sg, int free_sgc)
5791{
5792 struct sched_group *tmp, *first;
5793
5794 if (!sg)
5795 return;
5796
5797 first = sg;
5798 do {
5799 tmp = sg->next;
5800
5801 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
5802 kfree(sg->sgc);
5803
5804 kfree(sg);
5805 sg = tmp;
5806 } while (sg != first);
5807}
5808
5809static void free_sched_domain(struct rcu_head *rcu)
5810{
5811 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5812
5813
5814
5815
5816
5817 if (sd->flags & SD_OVERLAP) {
5818 free_sched_groups(sd->groups, 1);
5819 } else if (atomic_dec_and_test(&sd->groups->ref)) {
5820 kfree(sd->groups->sgc);
5821 kfree(sd->groups);
5822 }
5823 kfree(sd);
5824}
5825
5826static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5827{
5828 call_rcu(&sd->rcu, free_sched_domain);
5829}
5830
5831static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5832{
5833 for (; sd; sd = sd->parent)
5834 destroy_sched_domain(sd, cpu);
5835}
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5847DEFINE_PER_CPU(int, sd_llc_size);
5848DEFINE_PER_CPU(int, sd_llc_id);
5849DEFINE_PER_CPU(struct sched_domain *, sd_numa);
5850DEFINE_PER_CPU(struct sched_domain *, sd_busy);
5851DEFINE_PER_CPU(struct sched_domain *, sd_asym);
5852
5853static void update_top_cache_domain(int cpu)
5854{
5855 struct sched_domain *sd;
5856 struct sched_domain *busy_sd = NULL;
5857 int id = cpu;
5858 int size = 1;
5859
5860 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
5861 if (sd) {
5862 id = cpumask_first(sched_domain_span(sd));
5863 size = cpumask_weight(sched_domain_span(sd));
5864 busy_sd = sd->parent;
5865 }
5866 rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
5867
5868 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
5869 per_cpu(sd_llc_size, cpu) = size;
5870 per_cpu(sd_llc_id, cpu) = id;
5871
5872 sd = lowest_flag_domain(cpu, SD_NUMA);
5873 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
5874
5875 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
5876 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
5877}
5878
5879
5880
5881
5882
5883static void
5884cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5885{
5886 struct rq *rq = cpu_rq(cpu);
5887 struct sched_domain *tmp;
5888
5889
5890 for (tmp = sd; tmp; ) {
5891 struct sched_domain *parent = tmp->parent;
5892 if (!parent)
5893 break;
5894
5895 if (sd_parent_degenerate(tmp, parent)) {
5896 tmp->parent = parent->parent;
5897 if (parent->parent)
5898 parent->parent->child = tmp;
5899
5900
5901
5902
5903
5904 if (parent->flags & SD_PREFER_SIBLING)
5905 tmp->flags |= SD_PREFER_SIBLING;
5906 destroy_sched_domain(parent, cpu);
5907 } else
5908 tmp = tmp->parent;
5909 }
5910
5911 if (sd && sd_degenerate(sd)) {
5912 tmp = sd;
5913 sd = sd->parent;
5914 destroy_sched_domain(tmp, cpu);
5915 if (sd)
5916 sd->child = NULL;
5917 }
5918
5919 sched_domain_debug(sd, cpu);
5920
5921 rq_attach_root(rq, rd);
5922 tmp = rq->sd;
5923 rcu_assign_pointer(rq->sd, sd);
5924 destroy_sched_domains(tmp, cpu);
5925
5926 update_top_cache_domain(cpu);
5927}
5928
5929
5930static int __init isolated_cpu_setup(char *str)
5931{
5932 int ret;
5933
5934 alloc_bootmem_cpumask_var(&cpu_isolated_map);
5935 ret = cpulist_parse(str, cpu_isolated_map);
5936 if (ret) {
5937 pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
5938 return 0;
5939 }
5940 return 1;
5941}
5942__setup("isolcpus=", isolated_cpu_setup);
5943
5944struct s_data {
5945 struct sched_domain ** __percpu sd;
5946 struct root_domain *rd;
5947};
5948
5949enum s_alloc {
5950 sa_rootdomain,
5951 sa_sd,
5952 sa_sd_storage,
5953 sa_none,
5954};
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
5970{
5971 const struct cpumask *span = sched_domain_span(sd);
5972 struct sd_data *sdd = sd->private;
5973 struct sched_domain *sibling;
5974 int i;
5975
5976 for_each_cpu(i, span) {
5977 sibling = *per_cpu_ptr(sdd->sd, i);
5978 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
5979 continue;
5980
5981 cpumask_set_cpu(i, sched_group_mask(sg));
5982 }
5983}
5984
5985
5986
5987
5988
5989int group_balance_cpu(struct sched_group *sg)
5990{
5991 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
5992}
5993
5994static int
5995build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5996{
5997 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5998 const struct cpumask *span = sched_domain_span(sd);
5999 struct cpumask *covered = sched_domains_tmpmask;
6000 struct sd_data *sdd = sd->private;
6001 struct sched_domain *sibling;
6002 int i;
6003
6004 cpumask_clear(covered);
6005
6006 for_each_cpu(i, span) {
6007 struct cpumask *sg_span;
6008
6009 if (cpumask_test_cpu(i, covered))
6010 continue;
6011
6012 sibling = *per_cpu_ptr(sdd->sd, i);
6013
6014
6015 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6016 continue;
6017
6018 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6019 GFP_KERNEL, cpu_to_node(cpu));
6020
6021 if (!sg)
6022 goto fail;
6023
6024 sg_span = sched_group_cpus(sg);
6025 if (sibling->child)
6026 cpumask_copy(sg_span, sched_domain_span(sibling->child));
6027 else
6028 cpumask_set_cpu(i, sg_span);
6029
6030 cpumask_or(covered, covered, sg_span);
6031
6032 sg->sgc = *per_cpu_ptr(sdd->sgc, i);
6033 if (atomic_inc_return(&sg->sgc->ref) == 1)
6034 build_group_mask(sd, sg);
6035
6036
6037
6038
6039
6040
6041 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
6042
6043
6044
6045
6046
6047
6048 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6049 group_balance_cpu(sg) == cpu)
6050 groups = sg;
6051
6052 if (!first)
6053 first = sg;
6054 if (last)
6055 last->next = sg;
6056 last = sg;
6057 last->next = first;
6058 }
6059 sd->groups = groups;
6060
6061 return 0;
6062
6063fail:
6064 free_sched_groups(first, 0);
6065
6066 return -ENOMEM;
6067}
6068
6069static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
6070{
6071 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6072 struct sched_domain *child = sd->child;
6073
6074 if (child)
6075 cpu = cpumask_first(sched_domain_span(child));
6076
6077 if (sg) {
6078 *sg = *per_cpu_ptr(sdd->sg, cpu);
6079 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
6080 atomic_set(&(*sg)->sgc->ref, 1);
6081 }
6082
6083 return cpu;
6084}
6085
6086
6087
6088
6089
6090
6091
6092
6093static int
6094build_sched_groups(struct sched_domain *sd, int cpu)
6095{
6096 struct sched_group *first = NULL, *last = NULL;
6097 struct sd_data *sdd = sd->private;
6098 const struct cpumask *span = sched_domain_span(sd);
6099 struct cpumask *covered;
6100 int i;
6101
6102 get_group(cpu, sdd, &sd->groups);
6103 atomic_inc(&sd->groups->ref);
6104
6105 if (cpu != cpumask_first(span))
6106 return 0;
6107
6108 lockdep_assert_held(&sched_domains_mutex);
6109 covered = sched_domains_tmpmask;
6110
6111 cpumask_clear(covered);
6112
6113 for_each_cpu(i, span) {
6114 struct sched_group *sg;
6115 int group, j;
6116
6117 if (cpumask_test_cpu(i, covered))
6118 continue;
6119
6120 group = get_group(i, sdd, &sg);
6121 cpumask_setall(sched_group_mask(sg));
6122
6123 for_each_cpu(j, span) {
6124 if (get_group(j, sdd, NULL) != group)
6125 continue;
6126
6127 cpumask_set_cpu(j, covered);
6128 cpumask_set_cpu(j, sched_group_cpus(sg));
6129 }
6130
6131 if (!first)
6132 first = sg;
6133 if (last)
6134 last->next = sg;
6135 last = sg;
6136 }
6137 last->next = first;
6138
6139 return 0;
6140}
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
6153{
6154 struct sched_group *sg = sd->groups;
6155
6156 WARN_ON(!sg);
6157
6158 do {
6159 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6160 sg = sg->next;
6161 } while (sg != sd->groups);
6162
6163 if (cpu != group_balance_cpu(sg))
6164 return;
6165
6166 update_group_capacity(sd, cpu);
6167 atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
6168}
6169
6170
6171
6172
6173
6174
6175static int default_relax_domain_level = -1;
6176int sched_domain_level_max;
6177
6178static int __init setup_relax_domain_level(char *str)
6179{
6180 if (kstrtoint(str, 0, &default_relax_domain_level))
6181 pr_warn("Unable to set relax_domain_level\n");
6182
6183 return 1;
6184}
6185__setup("relax_domain_level=", setup_relax_domain_level);
6186
6187static void set_domain_attribute(struct sched_domain *sd,
6188 struct sched_domain_attr *attr)
6189{
6190 int request;
6191
6192 if (!attr || attr->relax_domain_level < 0) {
6193 if (default_relax_domain_level < 0)
6194 return;
6195 else
6196 request = default_relax_domain_level;
6197 } else
6198 request = attr->relax_domain_level;
6199 if (request < sd->level) {
6200
6201 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6202 } else {
6203
6204 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6205 }
6206}
6207
6208static void __sdt_free(const struct cpumask *cpu_map);
6209static int __sdt_alloc(const struct cpumask *cpu_map);
6210
6211static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6212 const struct cpumask *cpu_map)
6213{
6214 switch (what) {
6215 case sa_rootdomain:
6216 if (!atomic_read(&d->rd->refcount))
6217 free_rootdomain(&d->rd->rcu);
6218 case sa_sd:
6219 free_percpu(d->sd);
6220 case sa_sd_storage:
6221 __sdt_free(cpu_map);
6222 case sa_none:
6223 break;
6224 }
6225}
6226
6227static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6228 const struct cpumask *cpu_map)
6229{
6230 memset(d, 0, sizeof(*d));
6231
6232 if (__sdt_alloc(cpu_map))
6233 return sa_sd_storage;
6234 d->sd = alloc_percpu(struct sched_domain *);
6235 if (!d->sd)
6236 return sa_sd_storage;
6237 d->rd = alloc_rootdomain();
6238 if (!d->rd)
6239 return sa_sd;
6240 return sa_rootdomain;
6241}
6242
6243
6244
6245
6246
6247
6248static void claim_allocations(int cpu, struct sched_domain *sd)
6249{
6250 struct sd_data *sdd = sd->private;
6251
6252 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6253 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6254
6255 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6256 *per_cpu_ptr(sdd->sg, cpu) = NULL;
6257
6258 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
6259 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
6260}
6261
6262#ifdef CONFIG_NUMA
6263static int sched_domains_numa_levels;
6264enum numa_topology_type sched_numa_topology_type;
6265static int *sched_domains_numa_distance;
6266int sched_max_numa_distance;
6267static struct cpumask ***sched_domains_numa_masks;
6268static int sched_domains_curr_level;
6269#endif
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282#define TOPOLOGY_SD_FLAGS \
6283 (SD_SHARE_CPUCAPACITY | \
6284 SD_SHARE_PKG_RESOURCES | \
6285 SD_NUMA | \
6286 SD_ASYM_PACKING | \
6287 SD_SHARE_POWERDOMAIN)
6288
6289static struct sched_domain *
6290sd_init(struct sched_domain_topology_level *tl, int cpu)
6291{
6292 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
6293 int sd_weight, sd_flags = 0;
6294
6295#ifdef CONFIG_NUMA
6296
6297
6298
6299 sched_domains_curr_level = tl->numa_level;
6300#endif
6301
6302 sd_weight = cpumask_weight(tl->mask(cpu));
6303
6304 if (tl->sd_flags)
6305 sd_flags = (*tl->sd_flags)();
6306 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
6307 "wrong sd_flags in topology description\n"))
6308 sd_flags &= ~TOPOLOGY_SD_FLAGS;
6309
6310 *sd = (struct sched_domain){
6311 .min_interval = sd_weight,
6312 .max_interval = 2*sd_weight,
6313 .busy_factor = 32,
6314 .imbalance_pct = 125,
6315
6316 .cache_nice_tries = 0,
6317 .busy_idx = 0,
6318 .idle_idx = 0,
6319 .newidle_idx = 0,
6320 .wake_idx = 0,
6321 .forkexec_idx = 0,
6322
6323 .flags = 1*SD_LOAD_BALANCE
6324 | 1*SD_BALANCE_NEWIDLE
6325 | 1*SD_BALANCE_EXEC
6326 | 1*SD_BALANCE_FORK
6327 | 0*SD_BALANCE_WAKE
6328 | 1*SD_WAKE_AFFINE
6329 | 0*SD_SHARE_CPUCAPACITY
6330 | 0*SD_SHARE_PKG_RESOURCES
6331 | 0*SD_SERIALIZE
6332 | 0*SD_PREFER_SIBLING
6333 | 0*SD_NUMA
6334 | sd_flags
6335 ,
6336
6337 .last_balance = jiffies,
6338 .balance_interval = sd_weight,
6339 .smt_gain = 0,
6340 .max_newidle_lb_cost = 0,
6341 .next_decay_max_lb_cost = jiffies,
6342#ifdef CONFIG_SCHED_DEBUG
6343 .name = tl->name,
6344#endif
6345 };
6346
6347
6348
6349
6350
6351 if (sd->flags & SD_SHARE_CPUCAPACITY) {
6352 sd->flags |= SD_PREFER_SIBLING;
6353 sd->imbalance_pct = 110;
6354 sd->smt_gain = 1178;
6355
6356 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
6357 sd->imbalance_pct = 117;
6358 sd->cache_nice_tries = 1;
6359 sd->busy_idx = 2;
6360
6361#ifdef CONFIG_NUMA
6362 } else if (sd->flags & SD_NUMA) {
6363 sd->cache_nice_tries = 2;
6364 sd->busy_idx = 3;
6365 sd->idle_idx = 2;
6366
6367 sd->flags |= SD_SERIALIZE;
6368 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
6369 sd->flags &= ~(SD_BALANCE_EXEC |
6370 SD_BALANCE_FORK |
6371 SD_WAKE_AFFINE);
6372 }
6373
6374#endif
6375 } else {
6376 sd->flags |= SD_PREFER_SIBLING;
6377 sd->cache_nice_tries = 1;
6378 sd->busy_idx = 2;
6379 sd->idle_idx = 1;
6380 }
6381
6382 sd->private = &tl->data;
6383
6384 return sd;
6385}
6386
6387
6388
6389
6390static struct sched_domain_topology_level default_topology[] = {
6391#ifdef CONFIG_SCHED_SMT
6392 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
6393#endif
6394#ifdef CONFIG_SCHED_MC
6395 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
6396#endif
6397 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
6398 { NULL, },
6399};
6400
6401static struct sched_domain_topology_level *sched_domain_topology =
6402 default_topology;
6403
6404#define for_each_sd_topology(tl) \
6405 for (tl = sched_domain_topology; tl->mask; tl++)
6406
6407void set_sched_topology(struct sched_domain_topology_level *tl)
6408{
6409 sched_domain_topology = tl;
6410}
6411
6412#ifdef CONFIG_NUMA
6413
6414static const struct cpumask *sd_numa_mask(int cpu)
6415{
6416 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6417}
6418
6419static void sched_numa_warn(const char *str)
6420{
6421 static int done = false;
6422 int i,j;
6423
6424 if (done)
6425 return;
6426
6427 done = true;
6428
6429 printk(KERN_WARNING "ERROR: %s\n\n", str);
6430
6431 for (i = 0; i < nr_node_ids; i++) {
6432 printk(KERN_WARNING " ");
6433 for (j = 0; j < nr_node_ids; j++)
6434 printk(KERN_CONT "%02d ", node_distance(i,j));
6435 printk(KERN_CONT "\n");
6436 }
6437 printk(KERN_WARNING "\n");
6438}
6439
6440bool find_numa_distance(int distance)
6441{
6442 int i;
6443
6444 if (distance == node_distance(0, 0))
6445 return true;
6446
6447 for (i = 0; i < sched_domains_numa_levels; i++) {
6448 if (sched_domains_numa_distance[i] == distance)
6449 return true;
6450 }
6451
6452 return false;
6453}
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474static void init_numa_topology_type(void)
6475{
6476 int a, b, c, n;
6477
6478 n = sched_max_numa_distance;
6479
6480 if (sched_domains_numa_levels <= 1) {
6481 sched_numa_topology_type = NUMA_DIRECT;
6482 return;
6483 }
6484
6485 for_each_online_node(a) {
6486 for_each_online_node(b) {
6487
6488 if (node_distance(a, b) < n)
6489 continue;
6490
6491
6492 for_each_online_node(c) {
6493 if (node_distance(a, c) < n &&
6494 node_distance(b, c) < n) {
6495 sched_numa_topology_type =
6496 NUMA_GLUELESS_MESH;
6497 return;
6498 }
6499 }
6500
6501 sched_numa_topology_type = NUMA_BACKPLANE;
6502 return;
6503 }
6504 }
6505}
6506
6507static void sched_init_numa(void)
6508{
6509 int next_distance, curr_distance = node_distance(0, 0);
6510 struct sched_domain_topology_level *tl;
6511 int level = 0;
6512 int i, j, k;
6513
6514 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6515 if (!sched_domains_numa_distance)
6516 return;
6517
6518
6519
6520
6521
6522
6523
6524
6525 next_distance = curr_distance;
6526 for (i = 0; i < nr_node_ids; i++) {
6527 for (j = 0; j < nr_node_ids; j++) {
6528 for (k = 0; k < nr_node_ids; k++) {
6529 int distance = node_distance(i, k);
6530
6531 if (distance > curr_distance &&
6532 (distance < next_distance ||
6533 next_distance == curr_distance))
6534 next_distance = distance;
6535
6536
6537
6538
6539
6540
6541 if (sched_debug() && node_distance(k, i) != distance)
6542 sched_numa_warn("Node-distance not symmetric");
6543
6544 if (sched_debug() && i && !find_numa_distance(distance))
6545 sched_numa_warn("Node-0 not representative");
6546 }
6547 if (next_distance != curr_distance) {
6548 sched_domains_numa_distance[level++] = next_distance;
6549 sched_domains_numa_levels = level;
6550 curr_distance = next_distance;
6551 } else break;
6552 }
6553
6554
6555
6556
6557 if (!sched_debug())
6558 break;
6559 }
6560
6561 if (!level)
6562 return;
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581 sched_domains_numa_levels = 0;
6582
6583 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
6584 if (!sched_domains_numa_masks)
6585 return;
6586
6587
6588
6589
6590
6591 for (i = 0; i < level; i++) {
6592 sched_domains_numa_masks[i] =
6593 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
6594 if (!sched_domains_numa_masks[i])
6595 return;
6596
6597 for (j = 0; j < nr_node_ids; j++) {
6598 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6599 if (!mask)
6600 return;
6601
6602 sched_domains_numa_masks[i][j] = mask;
6603
6604 for_each_node(k) {
6605 if (node_distance(j, k) > sched_domains_numa_distance[i])
6606 continue;
6607
6608 cpumask_or(mask, mask, cpumask_of_node(k));
6609 }
6610 }
6611 }
6612
6613
6614 for (i = 0; sched_domain_topology[i].mask; i++);
6615
6616 tl = kzalloc((i + level + 1) *
6617 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
6618 if (!tl)
6619 return;
6620
6621
6622
6623
6624 for (i = 0; sched_domain_topology[i].mask; i++)
6625 tl[i] = sched_domain_topology[i];
6626
6627
6628
6629
6630 for (j = 0; j < level; i++, j++) {
6631 tl[i] = (struct sched_domain_topology_level){
6632 .mask = sd_numa_mask,
6633 .sd_flags = cpu_numa_flags,
6634 .flags = SDTL_OVERLAP,
6635 .numa_level = j,
6636 SD_INIT_NAME(NUMA)
6637 };
6638 }
6639
6640 sched_domain_topology = tl;
6641
6642 sched_domains_numa_levels = level;
6643 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
6644
6645 init_numa_topology_type();
6646}
6647
6648static void sched_domains_numa_masks_set(int cpu)
6649{
6650 int i, j;
6651 int node = cpu_to_node(cpu);
6652
6653 for (i = 0; i < sched_domains_numa_levels; i++) {
6654 for (j = 0; j < nr_node_ids; j++) {
6655 if (node_distance(j, node) <= sched_domains_numa_distance[i])
6656 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
6657 }
6658 }
6659}
6660
6661static void sched_domains_numa_masks_clear(int cpu)
6662{
6663 int i, j;
6664 for (i = 0; i < sched_domains_numa_levels; i++) {
6665 for (j = 0; j < nr_node_ids; j++)
6666 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
6667 }
6668}
6669
6670
6671
6672
6673
6674static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6675 unsigned long action,
6676 void *hcpu)
6677{
6678 int cpu = (long)hcpu;
6679
6680 switch (action & ~CPU_TASKS_FROZEN) {
6681 case CPU_ONLINE:
6682 sched_domains_numa_masks_set(cpu);
6683 break;
6684
6685 case CPU_DEAD:
6686 sched_domains_numa_masks_clear(cpu);
6687 break;
6688
6689 default:
6690 return NOTIFY_DONE;
6691 }
6692
6693 return NOTIFY_OK;
6694}
6695#else
6696static inline void sched_init_numa(void)
6697{
6698}
6699
6700static int sched_domains_numa_masks_update(struct notifier_block *nfb,
6701 unsigned long action,
6702 void *hcpu)
6703{
6704 return 0;
6705}
6706#endif
6707
6708static int __sdt_alloc(const struct cpumask *cpu_map)
6709{
6710 struct sched_domain_topology_level *tl;
6711 int j;
6712
6713 for_each_sd_topology(tl) {
6714 struct sd_data *sdd = &tl->data;
6715
6716 sdd->sd = alloc_percpu(struct sched_domain *);
6717 if (!sdd->sd)
6718 return -ENOMEM;
6719
6720 sdd->sg = alloc_percpu(struct sched_group *);
6721 if (!sdd->sg)
6722 return -ENOMEM;
6723
6724 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
6725 if (!sdd->sgc)
6726 return -ENOMEM;
6727
6728 for_each_cpu(j, cpu_map) {
6729 struct sched_domain *sd;
6730 struct sched_group *sg;
6731 struct sched_group_capacity *sgc;
6732
6733 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6734 GFP_KERNEL, cpu_to_node(j));
6735 if (!sd)
6736 return -ENOMEM;
6737
6738 *per_cpu_ptr(sdd->sd, j) = sd;
6739
6740 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6741 GFP_KERNEL, cpu_to_node(j));
6742 if (!sg)
6743 return -ENOMEM;
6744
6745 sg->next = sg;
6746
6747 *per_cpu_ptr(sdd->sg, j) = sg;
6748
6749 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
6750 GFP_KERNEL, cpu_to_node(j));
6751 if (!sgc)
6752 return -ENOMEM;
6753
6754 *per_cpu_ptr(sdd->sgc, j) = sgc;
6755 }
6756 }
6757
6758 return 0;
6759}
6760
6761static void __sdt_free(const struct cpumask *cpu_map)
6762{
6763 struct sched_domain_topology_level *tl;
6764 int j;
6765
6766 for_each_sd_topology(tl) {
6767 struct sd_data *sdd = &tl->data;
6768
6769 for_each_cpu(j, cpu_map) {
6770 struct sched_domain *sd;
6771
6772 if (sdd->sd) {
6773 sd = *per_cpu_ptr(sdd->sd, j);
6774 if (sd && (sd->flags & SD_OVERLAP))
6775 free_sched_groups(sd->groups, 0);
6776 kfree(*per_cpu_ptr(sdd->sd, j));
6777 }
6778
6779 if (sdd->sg)
6780 kfree(*per_cpu_ptr(sdd->sg, j));
6781 if (sdd->sgc)
6782 kfree(*per_cpu_ptr(sdd->sgc, j));
6783 }
6784 free_percpu(sdd->sd);
6785 sdd->sd = NULL;
6786 free_percpu(sdd->sg);
6787 sdd->sg = NULL;
6788 free_percpu(sdd->sgc);
6789 sdd->sgc = NULL;
6790 }
6791}
6792
6793struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6794 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6795 struct sched_domain *child, int cpu)
6796{
6797 struct sched_domain *sd = sd_init(tl, cpu);
6798 if (!sd)
6799 return child;
6800
6801 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6802 if (child) {
6803 sd->level = child->level + 1;
6804 sched_domain_level_max = max(sched_domain_level_max, sd->level);
6805 child->parent = sd;
6806 sd->child = child;
6807
6808 if (!cpumask_subset(sched_domain_span(child),
6809 sched_domain_span(sd))) {
6810 pr_err("BUG: arch topology borken\n");
6811#ifdef CONFIG_SCHED_DEBUG
6812 pr_err(" the %s domain not a subset of the %s domain\n",
6813 child->name, sd->name);
6814#endif
6815
6816 cpumask_or(sched_domain_span(sd),
6817 sched_domain_span(sd),
6818 sched_domain_span(child));
6819 }
6820
6821 }
6822 set_domain_attribute(sd, attr);
6823
6824 return sd;
6825}
6826
6827
6828
6829
6830
6831static int build_sched_domains(const struct cpumask *cpu_map,
6832 struct sched_domain_attr *attr)
6833{
6834 enum s_alloc alloc_state;
6835 struct sched_domain *sd;
6836 struct s_data d;
6837 int i, ret = -ENOMEM;
6838
6839 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6840 if (alloc_state != sa_rootdomain)
6841 goto error;
6842
6843
6844 for_each_cpu(i, cpu_map) {
6845 struct sched_domain_topology_level *tl;
6846
6847 sd = NULL;
6848 for_each_sd_topology(tl) {
6849 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
6850 if (tl == sched_domain_topology)
6851 *per_cpu_ptr(d.sd, i) = sd;
6852 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6853 sd->flags |= SD_OVERLAP;
6854 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6855 break;
6856 }
6857 }
6858
6859
6860 for_each_cpu(i, cpu_map) {
6861 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6862 sd->span_weight = cpumask_weight(sched_domain_span(sd));
6863 if (sd->flags & SD_OVERLAP) {
6864 if (build_overlap_sched_groups(sd, i))
6865 goto error;
6866 } else {
6867 if (build_sched_groups(sd, i))
6868 goto error;
6869 }
6870 }
6871 }
6872
6873
6874 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6875 if (!cpumask_test_cpu(i, cpu_map))
6876 continue;
6877
6878 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6879 claim_allocations(i, sd);
6880 init_sched_groups_capacity(i, sd);
6881 }
6882 }
6883
6884
6885 rcu_read_lock();
6886 for_each_cpu(i, cpu_map) {
6887 sd = *per_cpu_ptr(d.sd, i);
6888 cpu_attach_domain(sd, d.rd, i);
6889 }
6890 rcu_read_unlock();
6891
6892 ret = 0;
6893error:
6894 __free_domain_allocs(&d, alloc_state, cpu_map);
6895 return ret;
6896}
6897
6898static cpumask_var_t *doms_cur;
6899static int ndoms_cur;
6900static struct sched_domain_attr *dattr_cur;
6901
6902
6903
6904
6905
6906
6907
6908static cpumask_var_t fallback_doms;
6909
6910
6911
6912
6913
6914
6915int __weak arch_update_cpu_topology(void)
6916{
6917 return 0;
6918}
6919
6920cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6921{
6922 int i;
6923 cpumask_var_t *doms;
6924
6925 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6926 if (!doms)
6927 return NULL;
6928 for (i = 0; i < ndoms; i++) {
6929 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6930 free_sched_domains(doms, i);
6931 return NULL;
6932 }
6933 }
6934 return doms;
6935}
6936
6937void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6938{
6939 unsigned int i;
6940 for (i = 0; i < ndoms; i++)
6941 free_cpumask_var(doms[i]);
6942 kfree(doms);
6943}
6944
6945
6946
6947
6948
6949
6950static int init_sched_domains(const struct cpumask *cpu_map)
6951{
6952 int err;
6953
6954 arch_update_cpu_topology();
6955 ndoms_cur = 1;
6956 doms_cur = alloc_sched_domains(ndoms_cur);
6957 if (!doms_cur)
6958 doms_cur = &fallback_doms;
6959 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6960 err = build_sched_domains(doms_cur[0], NULL);
6961 register_sched_domain_sysctl();
6962
6963 return err;
6964}
6965
6966
6967
6968
6969
6970static void detach_destroy_domains(const struct cpumask *cpu_map)
6971{
6972 int i;
6973
6974 rcu_read_lock();
6975 for_each_cpu(i, cpu_map)
6976 cpu_attach_domain(NULL, &def_root_domain, i);
6977 rcu_read_unlock();
6978}
6979
6980
6981static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6982 struct sched_domain_attr *new, int idx_new)
6983{
6984 struct sched_domain_attr tmp;
6985
6986
6987 if (!new && !cur)
6988 return 1;
6989
6990 tmp = SD_ATTR_INIT;
6991 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6992 new ? (new + idx_new) : &tmp,
6993 sizeof(struct sched_domain_attr));
6994}
6995
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
7023 struct sched_domain_attr *dattr_new)
7024{
7025 int i, j, n;
7026 int new_topology;
7027
7028 mutex_lock(&sched_domains_mutex);
7029
7030
7031 unregister_sched_domain_sysctl();
7032
7033
7034 new_topology = arch_update_cpu_topology();
7035
7036 n = doms_new ? ndoms_new : 0;
7037
7038
7039 for (i = 0; i < ndoms_cur; i++) {
7040 for (j = 0; j < n && !new_topology; j++) {
7041 if (cpumask_equal(doms_cur[i], doms_new[j])
7042 && dattrs_equal(dattr_cur, i, dattr_new, j))
7043 goto match1;
7044 }
7045
7046 detach_destroy_domains(doms_cur[i]);
7047match1:
7048 ;
7049 }
7050
7051 n = ndoms_cur;
7052 if (doms_new == NULL) {
7053 n = 0;
7054 doms_new = &fallback_doms;
7055 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
7056 WARN_ON_ONCE(dattr_new);
7057 }
7058
7059
7060 for (i = 0; i < ndoms_new; i++) {
7061 for (j = 0; j < n && !new_topology; j++) {
7062 if (cpumask_equal(doms_new[i], doms_cur[j])
7063 && dattrs_equal(dattr_new, i, dattr_cur, j))
7064 goto match2;
7065 }
7066
7067 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
7068match2:
7069 ;
7070 }
7071
7072
7073 if (doms_cur != &fallback_doms)
7074 free_sched_domains(doms_cur, ndoms_cur);
7075 kfree(dattr_cur);
7076 doms_cur = doms_new;
7077 dattr_cur = dattr_new;
7078 ndoms_cur = ndoms_new;
7079
7080 register_sched_domain_sysctl();
7081
7082 mutex_unlock(&sched_domains_mutex);
7083}
7084
7085static int num_cpus_frozen;
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7096 void *hcpu)
7097{
7098 switch (action) {
7099 case CPU_ONLINE_FROZEN:
7100 case CPU_DOWN_FAILED_FROZEN:
7101
7102
7103
7104
7105
7106
7107
7108 num_cpus_frozen--;
7109 if (likely(num_cpus_frozen)) {
7110 partition_sched_domains(1, NULL, NULL);
7111 break;
7112 }
7113
7114
7115
7116
7117
7118
7119
7120 case CPU_ONLINE:
7121 cpuset_update_active_cpus(true);
7122 break;
7123 default:
7124 return NOTIFY_DONE;
7125 }
7126 return NOTIFY_OK;
7127}
7128
7129static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7130 void *hcpu)
7131{
7132 unsigned long flags;
7133 long cpu = (long)hcpu;
7134 struct dl_bw *dl_b;
7135 bool overflow;
7136 int cpus;
7137
7138 switch (action) {
7139 case CPU_DOWN_PREPARE:
7140 rcu_read_lock_sched();
7141 dl_b = dl_bw_of(cpu);
7142
7143 raw_spin_lock_irqsave(&dl_b->lock, flags);
7144 cpus = dl_bw_cpus(cpu);
7145 overflow = __dl_overflow(dl_b, cpus, 0, 0);
7146 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7147
7148 rcu_read_unlock_sched();
7149
7150 if (overflow)
7151 return notifier_from_errno(-EBUSY);
7152 cpuset_update_active_cpus(false);
7153 break;
7154 case CPU_DOWN_PREPARE_FROZEN:
7155 num_cpus_frozen++;
7156 partition_sched_domains(1, NULL, NULL);
7157 break;
7158 default:
7159 return NOTIFY_DONE;
7160 }
7161 return NOTIFY_OK;
7162}
7163
7164void __init sched_init_smp(void)
7165{
7166 cpumask_var_t non_isolated_cpus;
7167
7168 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7169 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
7170
7171 sched_init_numa();
7172
7173
7174
7175
7176
7177
7178 mutex_lock(&sched_domains_mutex);
7179 init_sched_domains(cpu_active_mask);
7180 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7181 if (cpumask_empty(non_isolated_cpus))
7182 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
7183 mutex_unlock(&sched_domains_mutex);
7184
7185 hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
7186 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7187 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
7188
7189 init_hrtick();
7190
7191
7192 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
7193 BUG();
7194 sched_init_granularity();
7195 free_cpumask_var(non_isolated_cpus);
7196
7197 init_sched_rt_class();
7198 init_sched_dl_class();
7199}
7200#else
7201void __init sched_init_smp(void)
7202{
7203 sched_init_granularity();
7204}
7205#endif
7206
7207int in_sched_functions(unsigned long addr)
7208{
7209 return in_lock_functions(addr) ||
7210 (addr >= (unsigned long)__sched_text_start
7211 && addr < (unsigned long)__sched_text_end);
7212}
7213
7214#ifdef CONFIG_CGROUP_SCHED
7215
7216
7217
7218
7219struct task_group root_task_group;
7220LIST_HEAD(task_groups);
7221
7222
7223static struct kmem_cache *task_group_cache __read_mostly;
7224#endif
7225
7226DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
7227
7228void __init sched_init(void)
7229{
7230 int i, j;
7231 unsigned long alloc_size = 0, ptr;
7232
7233#ifdef CONFIG_FAIR_GROUP_SCHED
7234 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7235#endif
7236#ifdef CONFIG_RT_GROUP_SCHED
7237 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7238#endif
7239 if (alloc_size) {
7240 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7241
7242#ifdef CONFIG_FAIR_GROUP_SCHED
7243 root_task_group.se = (struct sched_entity **)ptr;
7244 ptr += nr_cpu_ids * sizeof(void **);
7245
7246 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7247 ptr += nr_cpu_ids * sizeof(void **);
7248
7249#endif
7250#ifdef CONFIG_RT_GROUP_SCHED
7251 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7252 ptr += nr_cpu_ids * sizeof(void **);
7253
7254 root_task_group.rt_rq = (struct rt_rq **)ptr;
7255 ptr += nr_cpu_ids * sizeof(void **);
7256
7257#endif
7258 }
7259#ifdef CONFIG_CPUMASK_OFFSTACK
7260 for_each_possible_cpu(i) {
7261 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7262 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7263 }
7264#endif
7265
7266 init_rt_bandwidth(&def_rt_bandwidth,
7267 global_rt_period(), global_rt_runtime());
7268 init_dl_bandwidth(&def_dl_bandwidth,
7269 global_rt_period(), global_rt_runtime());
7270
7271#ifdef CONFIG_SMP
7272 init_defrootdomain();
7273#endif
7274
7275#ifdef CONFIG_RT_GROUP_SCHED
7276 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7277 global_rt_period(), global_rt_runtime());
7278#endif
7279
7280#ifdef CONFIG_CGROUP_SCHED
7281 task_group_cache = KMEM_CACHE(task_group, 0);
7282
7283 list_add(&root_task_group.list, &task_groups);
7284 INIT_LIST_HEAD(&root_task_group.children);
7285 INIT_LIST_HEAD(&root_task_group.siblings);
7286 autogroup_init(&init_task);
7287#endif
7288
7289 for_each_possible_cpu(i) {
7290 struct rq *rq;
7291
7292 rq = cpu_rq(i);
7293 raw_spin_lock_init(&rq->lock);
7294 rq->nr_running = 0;
7295 rq->calc_load_active = 0;
7296 rq->calc_load_update = jiffies + LOAD_FREQ;
7297 init_cfs_rq(&rq->cfs);
7298 init_rt_rq(&rq->rt);
7299 init_dl_rq(&rq->dl);
7300#ifdef CONFIG_FAIR_GROUP_SCHED
7301 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
7302 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7303
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317
7318
7319
7320
7321
7322 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
7323 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7324#endif
7325
7326 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7327#ifdef CONFIG_RT_GROUP_SCHED
7328 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7329#endif
7330
7331 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7332 rq->cpu_load[j] = 0;
7333
7334 rq->last_load_update_tick = jiffies;
7335
7336#ifdef CONFIG_SMP
7337 rq->sd = NULL;
7338 rq->rd = NULL;
7339 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
7340 rq->balance_callback = NULL;
7341 rq->active_balance = 0;
7342 rq->next_balance = jiffies;
7343 rq->push_cpu = 0;
7344 rq->cpu = i;
7345 rq->online = 0;
7346 rq->idle_stamp = 0;
7347 rq->avg_idle = 2*sysctl_sched_migration_cost;
7348 rq->max_idle_balance_cost = sysctl_sched_migration_cost;
7349
7350 INIT_LIST_HEAD(&rq->cfs_tasks);
7351
7352 rq_attach_root(rq, &def_root_domain);
7353#ifdef CONFIG_NO_HZ_COMMON
7354 rq->nohz_flags = 0;
7355#endif
7356#ifdef CONFIG_NO_HZ_FULL
7357 rq->last_sched_tick = 0;
7358#endif
7359#endif
7360 init_rq_hrtick(rq);
7361 atomic_set(&rq->nr_iowait, 0);
7362 }
7363
7364 set_load_weight(&init_task);
7365
7366#ifdef CONFIG_PREEMPT_NOTIFIERS
7367 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7368#endif
7369
7370
7371
7372
7373 atomic_inc(&init_mm.mm_count);
7374 enter_lazy_tlb(&init_mm, current);
7375
7376
7377
7378
7379 current->sched_class = &fair_sched_class;
7380
7381
7382
7383
7384
7385
7386
7387 init_idle(current, smp_processor_id());
7388
7389 calc_load_update = jiffies + LOAD_FREQ;
7390
7391#ifdef CONFIG_SMP
7392 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7393
7394 if (cpu_isolated_map == NULL)
7395 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
7396 idle_thread_set_boot_cpu();
7397 set_cpu_rq_start_time();
7398#endif
7399 init_sched_fair_class();
7400
7401 scheduler_running = 1;
7402}
7403
7404#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
7405static inline int preempt_count_equals(int preempt_offset)
7406{
7407 int nested = preempt_count() + rcu_preempt_depth();
7408
7409 return (nested == preempt_offset);
7410}
7411
7412void __might_sleep(const char *file, int line, int preempt_offset)
7413{
7414
7415
7416
7417
7418
7419 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7420 "do not call blocking ops when !TASK_RUNNING; "
7421 "state=%lx set at [<%p>] %pS\n",
7422 current->state,
7423 (void *)current->task_state_change,
7424 (void *)current->task_state_change);
7425
7426 ___might_sleep(file, line, preempt_offset);
7427}
7428EXPORT_SYMBOL(__might_sleep);
7429
7430void ___might_sleep(const char *file, int line, int preempt_offset)
7431{
7432 static unsigned long prev_jiffy;
7433
7434 rcu_sleep_check();
7435 if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
7436 !is_idle_task(current)) ||
7437 system_state != SYSTEM_RUNNING || oops_in_progress)
7438 return;
7439 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7440 return;
7441 prev_jiffy = jiffies;
7442
7443 printk(KERN_ERR
7444 "BUG: sleeping function called from invalid context at %s:%d\n",
7445 file, line);
7446 printk(KERN_ERR
7447 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7448 in_atomic(), irqs_disabled(),
7449 current->pid, current->comm);
7450
7451 if (task_stack_end_corrupted(current))
7452 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7453
7454 debug_show_held_locks(current);
7455 if (irqs_disabled())
7456 print_irqtrace_events(current);
7457#ifdef CONFIG_DEBUG_PREEMPT
7458 if (!preempt_count_equals(preempt_offset)) {
7459 pr_err("Preemption disabled at:");
7460 print_ip_sym(current->preempt_disable_ip);
7461 pr_cont("\n");
7462 }
7463#endif
7464 dump_stack();
7465}
7466EXPORT_SYMBOL(___might_sleep);
7467#endif
7468
7469#ifdef CONFIG_MAGIC_SYSRQ
7470void normalize_rt_tasks(void)
7471{
7472 struct task_struct *g, *p;
7473 struct sched_attr attr = {
7474 .sched_policy = SCHED_NORMAL,
7475 };
7476
7477 read_lock(&tasklist_lock);
7478 for_each_process_thread(g, p) {
7479
7480
7481
7482 if (p->flags & PF_KTHREAD)
7483 continue;
7484
7485 p->se.exec_start = 0;
7486#ifdef CONFIG_SCHEDSTATS
7487 p->se.statistics.wait_start = 0;
7488 p->se.statistics.sleep_start = 0;
7489 p->se.statistics.block_start = 0;
7490#endif
7491
7492 if (!dl_task(p) && !rt_task(p)) {
7493
7494
7495
7496
7497 if (task_nice(p) < 0)
7498 set_user_nice(p, 0);
7499 continue;
7500 }
7501
7502 __sched_setscheduler(p, &attr, false, false);
7503 }
7504 read_unlock(&tasklist_lock);
7505}
7506
7507#endif
7508
7509#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
7510
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528struct task_struct *curr_task(int cpu)
7529{
7530 return cpu_curr(cpu);
7531}
7532
7533#endif
7534
7535#ifdef CONFIG_IA64
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550
7551void set_curr_task(int cpu, struct task_struct *p)
7552{
7553 cpu_curr(cpu) = p;
7554}
7555
7556#endif
7557
7558#ifdef CONFIG_CGROUP_SCHED
7559
7560static DEFINE_SPINLOCK(task_group_lock);
7561
7562static void sched_free_group(struct task_group *tg)
7563{
7564 free_fair_sched_group(tg);
7565 free_rt_sched_group(tg);
7566 autogroup_free(tg);
7567 kmem_cache_free(task_group_cache, tg);
7568}
7569
7570
7571struct task_group *sched_create_group(struct task_group *parent)
7572{
7573 struct task_group *tg;
7574
7575 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
7576 if (!tg)
7577 return ERR_PTR(-ENOMEM);
7578
7579 if (!alloc_fair_sched_group(tg, parent))
7580 goto err;
7581
7582 if (!alloc_rt_sched_group(tg, parent))
7583 goto err;
7584
7585 return tg;
7586
7587err:
7588 sched_free_group(tg);
7589 return ERR_PTR(-ENOMEM);
7590}
7591
7592void sched_online_group(struct task_group *tg, struct task_group *parent)
7593{
7594 unsigned long flags;
7595
7596 spin_lock_irqsave(&task_group_lock, flags);
7597 list_add_rcu(&tg->list, &task_groups);
7598
7599 WARN_ON(!parent);
7600
7601 tg->parent = parent;
7602 INIT_LIST_HEAD(&tg->children);
7603 list_add_rcu(&tg->siblings, &parent->children);
7604 spin_unlock_irqrestore(&task_group_lock, flags);
7605}
7606
7607
7608static void sched_free_group_rcu(struct rcu_head *rhp)
7609{
7610
7611 sched_free_group(container_of(rhp, struct task_group, rcu));
7612}
7613
7614void sched_destroy_group(struct task_group *tg)
7615{
7616
7617 call_rcu(&tg->rcu, sched_free_group_rcu);
7618}
7619
7620void sched_offline_group(struct task_group *tg)
7621{
7622 unsigned long flags;
7623
7624
7625 unregister_fair_sched_group(tg);
7626
7627 spin_lock_irqsave(&task_group_lock, flags);
7628 list_del_rcu(&tg->list);
7629 list_del_rcu(&tg->siblings);
7630 spin_unlock_irqrestore(&task_group_lock, flags);
7631}
7632
7633
7634
7635
7636
7637
7638void sched_move_task(struct task_struct *tsk)
7639{
7640 struct task_group *tg;
7641 int queued, running;
7642 unsigned long flags;
7643 struct rq *rq;
7644
7645 rq = task_rq_lock(tsk, &flags);
7646
7647 running = task_current(rq, tsk);
7648 queued = task_on_rq_queued(tsk);
7649
7650 if (queued)
7651 dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
7652 if (unlikely(running))
7653 put_prev_task(rq, tsk);
7654
7655
7656
7657
7658
7659
7660 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7661 struct task_group, css);
7662 tg = autogroup_task_group(tsk, tg);
7663 tsk->sched_task_group = tg;
7664
7665#ifdef CONFIG_FAIR_GROUP_SCHED
7666 if (tsk->sched_class->task_move_group)
7667 tsk->sched_class->task_move_group(tsk);
7668 else
7669#endif
7670 set_task_rq(tsk, task_cpu(tsk));
7671
7672 if (unlikely(running))
7673 tsk->sched_class->set_curr_task(rq);
7674 if (queued)
7675 enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);
7676
7677 task_rq_unlock(rq, tsk, &flags);
7678}
7679#endif
7680
7681#ifdef CONFIG_RT_GROUP_SCHED
7682
7683
7684
7685static DEFINE_MUTEX(rt_constraints_mutex);
7686
7687
7688static inline int tg_has_rt_tasks(struct task_group *tg)
7689{
7690 struct task_struct *g, *p;
7691
7692
7693
7694
7695 if (task_group_is_autogroup(tg))
7696 return 0;
7697
7698 for_each_process_thread(g, p) {
7699 if (rt_task(p) && task_group(p) == tg)
7700 return 1;
7701 }
7702
7703 return 0;
7704}
7705
7706struct rt_schedulable_data {
7707 struct task_group *tg;
7708 u64 rt_period;
7709 u64 rt_runtime;
7710};
7711
7712static int tg_rt_schedulable(struct task_group *tg, void *data)
7713{
7714 struct rt_schedulable_data *d = data;
7715 struct task_group *child;
7716 unsigned long total, sum = 0;
7717 u64 period, runtime;
7718
7719 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7720 runtime = tg->rt_bandwidth.rt_runtime;
7721
7722 if (tg == d->tg) {
7723 period = d->rt_period;
7724 runtime = d->rt_runtime;
7725 }
7726
7727
7728
7729
7730 if (runtime > period && runtime != RUNTIME_INF)
7731 return -EINVAL;
7732
7733
7734
7735
7736 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7737 return -EBUSY;
7738
7739 total = to_ratio(period, runtime);
7740
7741
7742
7743
7744 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7745 return -EINVAL;
7746
7747
7748
7749
7750 list_for_each_entry_rcu(child, &tg->children, siblings) {
7751 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7752 runtime = child->rt_bandwidth.rt_runtime;
7753
7754 if (child == d->tg) {
7755 period = d->rt_period;
7756 runtime = d->rt_runtime;
7757 }
7758
7759 sum += to_ratio(period, runtime);
7760 }
7761
7762 if (sum > total)
7763 return -EINVAL;
7764
7765 return 0;
7766}
7767
7768static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7769{
7770 int ret;
7771
7772 struct rt_schedulable_data data = {
7773 .tg = tg,
7774 .rt_period = period,
7775 .rt_runtime = runtime,
7776 };
7777
7778 rcu_read_lock();
7779 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7780 rcu_read_unlock();
7781
7782 return ret;
7783}
7784
7785static int tg_set_rt_bandwidth(struct task_group *tg,
7786 u64 rt_period, u64 rt_runtime)
7787{
7788 int i, err = 0;
7789
7790
7791
7792
7793
7794 if (tg == &root_task_group && rt_runtime == 0)
7795 return -EINVAL;
7796
7797
7798 if (rt_period == 0)
7799 return -EINVAL;
7800
7801 mutex_lock(&rt_constraints_mutex);
7802 read_lock(&tasklist_lock);
7803 err = __rt_schedulable(tg, rt_period, rt_runtime);
7804 if (err)
7805 goto unlock;
7806
7807 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7808 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7809 tg->rt_bandwidth.rt_runtime = rt_runtime;
7810
7811 for_each_possible_cpu(i) {
7812 struct rt_rq *rt_rq = tg->rt_rq[i];
7813
7814 raw_spin_lock(&rt_rq->rt_runtime_lock);
7815 rt_rq->rt_runtime = rt_runtime;
7816 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7817 }
7818 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7819unlock:
7820 read_unlock(&tasklist_lock);
7821 mutex_unlock(&rt_constraints_mutex);
7822
7823 return err;
7824}
7825
7826static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7827{
7828 u64 rt_runtime, rt_period;
7829
7830 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7831 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7832 if (rt_runtime_us < 0)
7833 rt_runtime = RUNTIME_INF;
7834
7835 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7836}
7837
7838static long sched_group_rt_runtime(struct task_group *tg)
7839{
7840 u64 rt_runtime_us;
7841
7842 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7843 return -1;
7844
7845 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7846 do_div(rt_runtime_us, NSEC_PER_USEC);
7847 return rt_runtime_us;
7848}
7849
7850static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
7851{
7852 u64 rt_runtime, rt_period;
7853
7854 rt_period = rt_period_us * NSEC_PER_USEC;
7855 rt_runtime = tg->rt_bandwidth.rt_runtime;
7856
7857 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7858}
7859
7860static long sched_group_rt_period(struct task_group *tg)
7861{
7862 u64 rt_period_us;
7863
7864 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7865 do_div(rt_period_us, NSEC_PER_USEC);
7866 return rt_period_us;
7867}
7868#endif
7869
7870#ifdef CONFIG_RT_GROUP_SCHED
7871static int sched_rt_global_constraints(void)
7872{
7873 int ret = 0;
7874
7875 mutex_lock(&rt_constraints_mutex);
7876 read_lock(&tasklist_lock);
7877 ret = __rt_schedulable(NULL, 0, 0);
7878 read_unlock(&tasklist_lock);
7879 mutex_unlock(&rt_constraints_mutex);
7880
7881 return ret;
7882}
7883
7884static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7885{
7886
7887 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7888 return 0;
7889
7890 return 1;
7891}
7892
7893#else
7894static int sched_rt_global_constraints(void)
7895{
7896 unsigned long flags;
7897 int i, ret = 0;
7898
7899 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7900 for_each_possible_cpu(i) {
7901 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7902
7903 raw_spin_lock(&rt_rq->rt_runtime_lock);
7904 rt_rq->rt_runtime = global_rt_runtime();
7905 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7906 }
7907 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7908
7909 return ret;
7910}
7911#endif
7912
7913static int sched_dl_global_validate(void)
7914{
7915 u64 runtime = global_rt_runtime();
7916 u64 period = global_rt_period();
7917 u64 new_bw = to_ratio(period, runtime);
7918 struct dl_bw *dl_b;
7919 int cpu, ret = 0;
7920 unsigned long flags;
7921
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931 for_each_possible_cpu(cpu) {
7932 rcu_read_lock_sched();
7933 dl_b = dl_bw_of(cpu);
7934
7935 raw_spin_lock_irqsave(&dl_b->lock, flags);
7936 if (new_bw < dl_b->total_bw)
7937 ret = -EBUSY;
7938 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7939
7940 rcu_read_unlock_sched();
7941
7942 if (ret)
7943 break;
7944 }
7945
7946 return ret;
7947}
7948
7949static void sched_dl_do_global(void)
7950{
7951 u64 new_bw = -1;
7952 struct dl_bw *dl_b;
7953 int cpu;
7954 unsigned long flags;
7955
7956 def_dl_bandwidth.dl_period = global_rt_period();
7957 def_dl_bandwidth.dl_runtime = global_rt_runtime();
7958
7959 if (global_rt_runtime() != RUNTIME_INF)
7960 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
7961
7962
7963
7964
7965 for_each_possible_cpu(cpu) {
7966 rcu_read_lock_sched();
7967 dl_b = dl_bw_of(cpu);
7968
7969 raw_spin_lock_irqsave(&dl_b->lock, flags);
7970 dl_b->bw = new_bw;
7971 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7972
7973 rcu_read_unlock_sched();
7974 }
7975}
7976
7977static int sched_rt_global_validate(void)
7978{
7979 if (sysctl_sched_rt_period <= 0)
7980 return -EINVAL;
7981
7982 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
7983 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
7984 return -EINVAL;
7985
7986 return 0;
7987}
7988
7989static void sched_rt_do_global(void)
7990{
7991 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7992 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
7993}
7994
7995int sched_rt_handler(struct ctl_table *table, int write,
7996 void __user *buffer, size_t *lenp,
7997 loff_t *ppos)
7998{
7999 int old_period, old_runtime;
8000 static DEFINE_MUTEX(mutex);
8001 int ret;
8002
8003 mutex_lock(&mutex);
8004 old_period = sysctl_sched_rt_period;
8005 old_runtime = sysctl_sched_rt_runtime;
8006
8007 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8008
8009 if (!ret && write) {
8010 ret = sched_rt_global_validate();
8011 if (ret)
8012 goto undo;
8013
8014 ret = sched_dl_global_validate();
8015 if (ret)
8016 goto undo;
8017
8018 ret = sched_rt_global_constraints();
8019 if (ret)
8020 goto undo;
8021
8022 sched_rt_do_global();
8023 sched_dl_do_global();
8024 }
8025 if (0) {
8026undo:
8027 sysctl_sched_rt_period = old_period;
8028 sysctl_sched_rt_runtime = old_runtime;
8029 }
8030 mutex_unlock(&mutex);
8031
8032 return ret;
8033}
8034
8035int sched_rr_handler(struct ctl_table *table, int write,
8036 void __user *buffer, size_t *lenp,
8037 loff_t *ppos)
8038{
8039 int ret;
8040 static DEFINE_MUTEX(mutex);
8041
8042 mutex_lock(&mutex);
8043 ret = proc_dointvec(table, write, buffer, lenp, ppos);
8044
8045
8046 if (!ret && write) {
8047 sched_rr_timeslice = sched_rr_timeslice <= 0 ?
8048 RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
8049 }
8050 mutex_unlock(&mutex);
8051 return ret;
8052}
8053
8054#ifdef CONFIG_CGROUP_SCHED
8055
8056static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
8057{
8058 return css ? container_of(css, struct task_group, css) : NULL;
8059}
8060
8061static struct cgroup_subsys_state *
8062cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8063{
8064 struct task_group *parent = css_tg(parent_css);
8065 struct task_group *tg;
8066
8067 if (!parent) {
8068
8069 return &root_task_group.css;
8070 }
8071
8072 tg = sched_create_group(parent);
8073 if (IS_ERR(tg))
8074 return ERR_PTR(-ENOMEM);
8075
8076 sched_online_group(tg, parent);
8077
8078 return &tg->css;
8079}
8080
8081static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
8082{
8083 struct task_group *tg = css_tg(css);
8084
8085 sched_offline_group(tg);
8086}
8087
8088static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
8089{
8090 struct task_group *tg = css_tg(css);
8091
8092
8093
8094
8095 sched_free_group(tg);
8096}
8097
8098static void cpu_cgroup_fork(struct task_struct *task)
8099{
8100 sched_move_task(task);
8101}
8102
8103static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8104{
8105 struct task_struct *task;
8106 struct cgroup_subsys_state *css;
8107
8108 cgroup_taskset_for_each(task, css, tset) {
8109#ifdef CONFIG_RT_GROUP_SCHED
8110 if (!sched_rt_can_attach(css_tg(css), task))
8111 return -EINVAL;
8112#else
8113
8114 if (task->sched_class != &fair_sched_class)
8115 return -EINVAL;
8116#endif
8117 }
8118 return 0;
8119}
8120
8121static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8122{
8123 struct task_struct *task;
8124 struct cgroup_subsys_state *css;
8125
8126 cgroup_taskset_for_each(task, css, tset)
8127 sched_move_task(task);
8128}
8129
8130#ifdef CONFIG_FAIR_GROUP_SCHED
8131static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8132 struct cftype *cftype, u64 shareval)
8133{
8134 return sched_group_set_shares(css_tg(css), scale_load(shareval));
8135}
8136
8137static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
8138 struct cftype *cft)
8139{
8140 struct task_group *tg = css_tg(css);
8141
8142 return (u64) scale_load_down(tg->shares);
8143}
8144
8145#ifdef CONFIG_CFS_BANDWIDTH
8146static DEFINE_MUTEX(cfs_constraints_mutex);
8147
8148const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC;
8149const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC;
8150
8151static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
8152
8153static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
8154{
8155 int i, ret = 0, runtime_enabled, runtime_was_enabled;
8156 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8157
8158 if (tg == &root_task_group)
8159 return -EINVAL;
8160
8161
8162
8163
8164
8165
8166 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
8167 return -EINVAL;
8168
8169
8170
8171
8172
8173
8174 if (period > max_cfs_quota_period)
8175 return -EINVAL;
8176
8177
8178
8179
8180
8181 get_online_cpus();
8182 mutex_lock(&cfs_constraints_mutex);
8183 ret = __cfs_schedulable(tg, period, quota);
8184 if (ret)
8185 goto out_unlock;
8186
8187 runtime_enabled = quota != RUNTIME_INF;
8188 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
8189
8190
8191
8192
8193 if (runtime_enabled && !runtime_was_enabled)
8194 cfs_bandwidth_usage_inc();
8195 raw_spin_lock_irq(&cfs_b->lock);
8196 cfs_b->period = ns_to_ktime(period);
8197 cfs_b->quota = quota;
8198
8199 __refill_cfs_bandwidth_runtime(cfs_b);
8200
8201 if (runtime_enabled)
8202 start_cfs_bandwidth(cfs_b);
8203 raw_spin_unlock_irq(&cfs_b->lock);
8204
8205 for_each_online_cpu(i) {
8206 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
8207 struct rq *rq = cfs_rq->rq;
8208
8209 raw_spin_lock_irq(&rq->lock);
8210 cfs_rq->runtime_enabled = runtime_enabled;
8211 cfs_rq->runtime_remaining = 0;
8212
8213 if (cfs_rq->throttled)
8214 unthrottle_cfs_rq(cfs_rq);
8215 raw_spin_unlock_irq(&rq->lock);
8216 }
8217 if (runtime_was_enabled && !runtime_enabled)
8218 cfs_bandwidth_usage_dec();
8219out_unlock:
8220 mutex_unlock(&cfs_constraints_mutex);
8221 put_online_cpus();
8222
8223 return ret;
8224}
8225
8226int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8227{
8228 u64 quota, period;
8229
8230 period = ktime_to_ns(tg->cfs_bandwidth.period);
8231 if (cfs_quota_us < 0)
8232 quota = RUNTIME_INF;
8233 else
8234 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8235
8236 return tg_set_cfs_bandwidth(tg, period, quota);
8237}
8238
8239long tg_get_cfs_quota(struct task_group *tg)
8240{
8241 u64 quota_us;
8242
8243 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
8244 return -1;
8245
8246 quota_us = tg->cfs_bandwidth.quota;
8247 do_div(quota_us, NSEC_PER_USEC);
8248
8249 return quota_us;
8250}
8251
8252int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8253{
8254 u64 quota, period;
8255
8256 period = (u64)cfs_period_us * NSEC_PER_USEC;
8257 quota = tg->cfs_bandwidth.quota;
8258
8259 return tg_set_cfs_bandwidth(tg, period, quota);
8260}
8261
8262long tg_get_cfs_period(struct task_group *tg)
8263{
8264 u64 cfs_period_us;
8265
8266 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
8267 do_div(cfs_period_us, NSEC_PER_USEC);
8268
8269 return cfs_period_us;
8270}
8271
8272static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
8273 struct cftype *cft)
8274{
8275 return tg_get_cfs_quota(css_tg(css));
8276}
8277
8278static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
8279 struct cftype *cftype, s64 cfs_quota_us)
8280{
8281 return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
8282}
8283
8284static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
8285 struct cftype *cft)
8286{
8287 return tg_get_cfs_period(css_tg(css));
8288}
8289
8290static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
8291 struct cftype *cftype, u64 cfs_period_us)
8292{
8293 return tg_set_cfs_period(css_tg(css), cfs_period_us);
8294}
8295
8296struct cfs_schedulable_data {
8297 struct task_group *tg;
8298 u64 period, quota;
8299};
8300
8301
8302
8303
8304
8305static u64 normalize_cfs_quota(struct task_group *tg,
8306 struct cfs_schedulable_data *d)
8307{
8308 u64 quota, period;
8309
8310 if (tg == d->tg) {
8311 period = d->period;
8312 quota = d->quota;
8313 } else {
8314 period = tg_get_cfs_period(tg);
8315 quota = tg_get_cfs_quota(tg);
8316 }
8317
8318
8319 if (quota == RUNTIME_INF || quota == -1)
8320 return RUNTIME_INF;
8321
8322 return to_ratio(period, quota);
8323}
8324
8325static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8326{
8327 struct cfs_schedulable_data *d = data;
8328 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8329 s64 quota = 0, parent_quota = -1;
8330
8331 if (!tg->parent) {
8332 quota = RUNTIME_INF;
8333 } else {
8334 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8335
8336 quota = normalize_cfs_quota(tg, d);
8337 parent_quota = parent_b->hierarchical_quota;
8338
8339
8340
8341
8342
8343 if (quota == RUNTIME_INF)
8344 quota = parent_quota;
8345 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8346 return -EINVAL;
8347 }
8348 cfs_b->hierarchical_quota = quota;
8349
8350 return 0;
8351}
8352
8353static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
8354{
8355 int ret;
8356 struct cfs_schedulable_data data = {
8357 .tg = tg,
8358 .period = period,
8359 .quota = quota,
8360 };
8361
8362 if (quota != RUNTIME_INF) {
8363 do_div(data.period, NSEC_PER_USEC);
8364 do_div(data.quota, NSEC_PER_USEC);
8365 }
8366
8367 rcu_read_lock();
8368 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
8369 rcu_read_unlock();
8370
8371 return ret;
8372}
8373
8374static int cpu_stats_show(struct seq_file *sf, void *v)
8375{
8376 struct task_group *tg = css_tg(seq_css(sf));
8377 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
8378
8379 seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
8380 seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
8381 seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
8382
8383 return 0;
8384}
8385#endif
8386#endif
8387
8388#ifdef CONFIG_RT_GROUP_SCHED
8389static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
8390 struct cftype *cft, s64 val)
8391{
8392 return sched_group_set_rt_runtime(css_tg(css), val);
8393}
8394
8395static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
8396 struct cftype *cft)
8397{
8398 return sched_group_rt_runtime(css_tg(css));
8399}
8400
8401static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
8402 struct cftype *cftype, u64 rt_period_us)
8403{
8404 return sched_group_set_rt_period(css_tg(css), rt_period_us);
8405}
8406
8407static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
8408 struct cftype *cft)
8409{
8410 return sched_group_rt_period(css_tg(css));
8411}
8412#endif
8413
8414static struct cftype cpu_files[] = {
8415#ifdef CONFIG_FAIR_GROUP_SCHED
8416 {
8417 .name = "shares",
8418 .read_u64 = cpu_shares_read_u64,
8419 .write_u64 = cpu_shares_write_u64,
8420 },
8421#endif
8422#ifdef CONFIG_CFS_BANDWIDTH
8423 {
8424 .name = "cfs_quota_us",
8425 .read_s64 = cpu_cfs_quota_read_s64,
8426 .write_s64 = cpu_cfs_quota_write_s64,
8427 },
8428 {
8429 .name = "cfs_period_us",
8430 .read_u64 = cpu_cfs_period_read_u64,
8431 .write_u64 = cpu_cfs_period_write_u64,
8432 },
8433 {
8434 .name = "stat",
8435 .seq_show = cpu_stats_show,
8436 },
8437#endif
8438#ifdef CONFIG_RT_GROUP_SCHED
8439 {
8440 .name = "rt_runtime_us",
8441 .read_s64 = cpu_rt_runtime_read,
8442 .write_s64 = cpu_rt_runtime_write,
8443 },
8444 {
8445 .name = "rt_period_us",
8446 .read_u64 = cpu_rt_period_read_uint,
8447 .write_u64 = cpu_rt_period_write_uint,
8448 },
8449#endif
8450 { }
8451};
8452
8453struct cgroup_subsys cpu_cgrp_subsys = {
8454 .css_alloc = cpu_cgroup_css_alloc,
8455 .css_released = cpu_cgroup_css_released,
8456 .css_free = cpu_cgroup_css_free,
8457 .fork = cpu_cgroup_fork,
8458 .can_attach = cpu_cgroup_can_attach,
8459 .attach = cpu_cgroup_attach,
8460 .legacy_cftypes = cpu_files,
8461 .early_init = true,
8462};
8463
8464#endif
8465
8466void dump_cpu_task(int cpu)
8467{
8468 pr_info("Task dump for CPU %d:\n", cpu);
8469 sched_show_task(cpu_curr(cpu));
8470}
8471
8472
8473
8474
8475
8476
8477
8478
8479
8480
8481
8482
8483
8484const int sched_prio_to_weight[40] = {
8485 88761, 71755, 56483, 46273, 36291,
8486 29154, 23254, 18705, 14949, 11916,
8487 9548, 7620, 6100, 4904, 3906,
8488 3121, 2501, 1991, 1586, 1277,
8489 1024, 820, 655, 526, 423,
8490 335, 272, 215, 172, 137,
8491 110, 87, 70, 56, 45,
8492 36, 29, 23, 18, 15,
8493};
8494
8495
8496
8497
8498
8499
8500
8501
8502const u32 sched_prio_to_wmult[40] = {
8503 48388, 59856, 76040, 92818, 118348,
8504 147320, 184698, 229616, 287308, 360437,
8505 449829, 563644, 704093, 875809, 1099582,
8506 1376151, 1717300, 2157191, 2708050, 3363326,
8507 4194304, 5237765, 6557202, 8165337, 10153587,
8508 12820798, 15790321, 19976592, 24970740, 31350126,
8509 39045157, 49367440, 61356676, 76695844, 95443717,
8510 119304647, 148102320, 186737708, 238609294, 286331153,
8511};
8512