1
2
3
4
5#include <linux/sched.h>
6#include <linux/mutex.h>
7#include <linux/sched/isolation.h>
8
9#include "sched.h"
10
11DEFINE_MUTEX(sched_domains_mutex);
12
13
14cpumask_var_t sched_domains_tmpmask;
15cpumask_var_t sched_domains_tmpmask2;
16
17#ifdef CONFIG_SCHED_DEBUG
18
19static int __init sched_debug_setup(char *str)
20{
21 sched_debug_enabled = true;
22
23 return 0;
24}
25early_param("sched_debug", sched_debug_setup);
26
27static inline bool sched_debug(void)
28{
29 return sched_debug_enabled;
30}
31
32static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
33 struct cpumask *groupmask)
34{
35 struct sched_group *group = sd->groups;
36
37 cpumask_clear(groupmask);
38
39 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
40
41 if (!(sd->flags & SD_LOAD_BALANCE)) {
42 printk("does not load-balance\n");
43 if (sd->parent)
44 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
45 " has parent");
46 return -1;
47 }
48
49 printk(KERN_CONT "span=%*pbl level=%s\n",
50 cpumask_pr_args(sched_domain_span(sd)), sd->name);
51
52 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
53 printk(KERN_ERR "ERROR: domain->span does not contain "
54 "CPU%d\n", cpu);
55 }
56 if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
57 printk(KERN_ERR "ERROR: domain->groups does not contain"
58 " CPU%d\n", cpu);
59 }
60
61 printk(KERN_DEBUG "%*s groups:", level + 1, "");
62 do {
63 if (!group) {
64 printk("\n");
65 printk(KERN_ERR "ERROR: group is NULL\n");
66 break;
67 }
68
69 if (!cpumask_weight(sched_group_span(group))) {
70 printk(KERN_CONT "\n");
71 printk(KERN_ERR "ERROR: empty group\n");
72 break;
73 }
74
75 if (!(sd->flags & SD_OVERLAP) &&
76 cpumask_intersects(groupmask, sched_group_span(group))) {
77 printk(KERN_CONT "\n");
78 printk(KERN_ERR "ERROR: repeated CPUs\n");
79 break;
80 }
81
82 cpumask_or(groupmask, groupmask, sched_group_span(group));
83
84 printk(KERN_CONT " %d:{ span=%*pbl",
85 group->sgc->id,
86 cpumask_pr_args(sched_group_span(group)));
87
88 if ((sd->flags & SD_OVERLAP) &&
89 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
90 printk(KERN_CONT " mask=%*pbl",
91 cpumask_pr_args(group_balance_mask(group)));
92 }
93
94 if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
95 printk(KERN_CONT " cap=%lu", group->sgc->capacity);
96
97 if (group == sd->groups && sd->child &&
98 !cpumask_equal(sched_domain_span(sd->child),
99 sched_group_span(group))) {
100 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
101 }
102
103 printk(KERN_CONT " }");
104
105 group = group->next;
106
107 if (group != sd->groups)
108 printk(KERN_CONT ",");
109
110 } while (group != sd->groups);
111 printk(KERN_CONT "\n");
112
113 if (!cpumask_equal(sched_domain_span(sd), groupmask))
114 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
115
116 if (sd->parent &&
117 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
118 printk(KERN_ERR "ERROR: parent span is not a superset "
119 "of domain->span\n");
120 return 0;
121}
122
123static void sched_domain_debug(struct sched_domain *sd, int cpu)
124{
125 int level = 0;
126
127 if (!sched_debug_enabled)
128 return;
129
130 if (!sd) {
131 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
132 return;
133 }
134
135 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
136
137 for (;;) {
138 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
139 break;
140 level++;
141 sd = sd->parent;
142 if (!sd)
143 break;
144 }
145}
146#else
147
148# define sched_debug_enabled 0
149# define sched_domain_debug(sd, cpu) do { } while (0)
150static inline bool sched_debug(void)
151{
152 return false;
153}
154#endif
155
156static int sd_degenerate(struct sched_domain *sd)
157{
158 if (cpumask_weight(sched_domain_span(sd)) == 1)
159 return 1;
160
161
162 if (sd->flags & (SD_LOAD_BALANCE |
163 SD_BALANCE_NEWIDLE |
164 SD_BALANCE_FORK |
165 SD_BALANCE_EXEC |
166 SD_SHARE_CPUCAPACITY |
167 SD_ASYM_CPUCAPACITY |
168 SD_SHARE_PKG_RESOURCES |
169 SD_SHARE_POWERDOMAIN)) {
170 if (sd->groups != sd->groups->next)
171 return 0;
172 }
173
174
175 if (sd->flags & (SD_WAKE_AFFINE))
176 return 0;
177
178 return 1;
179}
180
181static int
182sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
183{
184 unsigned long cflags = sd->flags, pflags = parent->flags;
185
186 if (sd_degenerate(parent))
187 return 1;
188
189 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
190 return 0;
191
192
193 if (parent->groups == parent->groups->next) {
194 pflags &= ~(SD_LOAD_BALANCE |
195 SD_BALANCE_NEWIDLE |
196 SD_BALANCE_FORK |
197 SD_BALANCE_EXEC |
198 SD_ASYM_CPUCAPACITY |
199 SD_SHARE_CPUCAPACITY |
200 SD_SHARE_PKG_RESOURCES |
201 SD_PREFER_SIBLING |
202 SD_SHARE_POWERDOMAIN);
203 if (nr_node_ids == 1)
204 pflags &= ~SD_SERIALIZE;
205 }
206 if (~cflags & pflags)
207 return 0;
208
209 return 1;
210}
211
212static void free_rootdomain(struct rcu_head *rcu)
213{
214 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
215
216 cpupri_cleanup(&rd->cpupri);
217 cpudl_cleanup(&rd->cpudl);
218 free_cpumask_var(rd->dlo_mask);
219 free_cpumask_var(rd->rto_mask);
220 free_cpumask_var(rd->online);
221 free_cpumask_var(rd->span);
222 kfree(rd);
223}
224
225void rq_attach_root(struct rq *rq, struct root_domain *rd)
226{
227 struct root_domain *old_rd = NULL;
228 unsigned long flags;
229
230 raw_spin_lock_irqsave(&rq->lock, flags);
231
232 if (rq->rd) {
233 old_rd = rq->rd;
234
235 if (cpumask_test_cpu(rq->cpu, old_rd->online))
236 set_rq_offline(rq);
237
238 cpumask_clear_cpu(rq->cpu, old_rd->span);
239
240
241
242
243
244
245 if (!atomic_dec_and_test(&old_rd->refcount))
246 old_rd = NULL;
247 }
248
249 atomic_inc(&rd->refcount);
250 rq->rd = rd;
251
252 cpumask_set_cpu(rq->cpu, rd->span);
253 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
254 set_rq_online(rq);
255
256 raw_spin_unlock_irqrestore(&rq->lock, flags);
257
258 if (old_rd)
259 call_rcu_sched(&old_rd->rcu, free_rootdomain);
260}
261
262static int init_rootdomain(struct root_domain *rd)
263{
264 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
265 goto out;
266 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
267 goto free_span;
268 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
269 goto free_online;
270 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
271 goto free_dlo_mask;
272
273#ifdef HAVE_RT_PUSH_IPI
274 rd->rto_cpu = -1;
275 raw_spin_lock_init(&rd->rto_lock);
276 init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
277#endif
278
279 init_dl_bw(&rd->dl_bw);
280 if (cpudl_init(&rd->cpudl) != 0)
281 goto free_rto_mask;
282
283 if (cpupri_init(&rd->cpupri) != 0)
284 goto free_cpudl;
285 return 0;
286
287free_cpudl:
288 cpudl_cleanup(&rd->cpudl);
289free_rto_mask:
290 free_cpumask_var(rd->rto_mask);
291free_dlo_mask:
292 free_cpumask_var(rd->dlo_mask);
293free_online:
294 free_cpumask_var(rd->online);
295free_span:
296 free_cpumask_var(rd->span);
297out:
298 return -ENOMEM;
299}
300
301
302
303
304
305struct root_domain def_root_domain;
306
307void init_defrootdomain(void)
308{
309 init_rootdomain(&def_root_domain);
310
311 atomic_set(&def_root_domain.refcount, 1);
312}
313
314static struct root_domain *alloc_rootdomain(void)
315{
316 struct root_domain *rd;
317
318 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
319 if (!rd)
320 return NULL;
321
322 if (init_rootdomain(rd) != 0) {
323 kfree(rd);
324 return NULL;
325 }
326
327 return rd;
328}
329
330static void free_sched_groups(struct sched_group *sg, int free_sgc)
331{
332 struct sched_group *tmp, *first;
333
334 if (!sg)
335 return;
336
337 first = sg;
338 do {
339 tmp = sg->next;
340
341 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
342 kfree(sg->sgc);
343
344 if (atomic_dec_and_test(&sg->ref))
345 kfree(sg);
346 sg = tmp;
347 } while (sg != first);
348}
349
350static void destroy_sched_domain(struct sched_domain *sd)
351{
352
353
354
355
356
357 free_sched_groups(sd->groups, 1);
358
359 if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
360 kfree(sd->shared);
361 kfree(sd);
362}
363
364static void destroy_sched_domains_rcu(struct rcu_head *rcu)
365{
366 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
367
368 while (sd) {
369 struct sched_domain *parent = sd->parent;
370 destroy_sched_domain(sd);
371 sd = parent;
372 }
373}
374
375static void destroy_sched_domains(struct sched_domain *sd)
376{
377 if (sd)
378 call_rcu(&sd->rcu, destroy_sched_domains_rcu);
379}
380
381
382
383
384
385
386
387
388
389
390DEFINE_PER_CPU(struct sched_domain *, sd_llc);
391DEFINE_PER_CPU(int, sd_llc_size);
392DEFINE_PER_CPU(int, sd_llc_id);
393DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
394DEFINE_PER_CPU(struct sched_domain *, sd_numa);
395DEFINE_PER_CPU(struct sched_domain *, sd_asym);
396
397static void update_top_cache_domain(int cpu)
398{
399 struct sched_domain_shared *sds = NULL;
400 struct sched_domain *sd;
401 int id = cpu;
402 int size = 1;
403
404 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
405 if (sd) {
406 id = cpumask_first(sched_domain_span(sd));
407 size = cpumask_weight(sched_domain_span(sd));
408 sds = sd->shared;
409 }
410
411 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
412 per_cpu(sd_llc_size, cpu) = size;
413 per_cpu(sd_llc_id, cpu) = id;
414 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
415
416 sd = lowest_flag_domain(cpu, SD_NUMA);
417 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
418
419 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
420 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
421}
422
423
424
425
426
427static void
428cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
429{
430 struct rq *rq = cpu_rq(cpu);
431 struct sched_domain *tmp;
432
433
434 for (tmp = sd; tmp; ) {
435 struct sched_domain *parent = tmp->parent;
436 if (!parent)
437 break;
438
439 if (sd_parent_degenerate(tmp, parent)) {
440 tmp->parent = parent->parent;
441 if (parent->parent)
442 parent->parent->child = tmp;
443
444
445
446
447
448 if (parent->flags & SD_PREFER_SIBLING)
449 tmp->flags |= SD_PREFER_SIBLING;
450 destroy_sched_domain(parent);
451 } else
452 tmp = tmp->parent;
453 }
454
455 if (sd && sd_degenerate(sd)) {
456 tmp = sd;
457 sd = sd->parent;
458 destroy_sched_domain(tmp);
459 if (sd)
460 sd->child = NULL;
461 }
462
463 sched_domain_debug(sd, cpu);
464
465 rq_attach_root(rq, rd);
466 tmp = rq->sd;
467 rcu_assign_pointer(rq->sd, sd);
468 dirty_sched_domain_sysctl(cpu);
469 destroy_sched_domains(tmp);
470
471 update_top_cache_domain(cpu);
472}
473
474struct s_data {
475 struct sched_domain ** __percpu sd;
476 struct root_domain *rd;
477};
478
479enum s_alloc {
480 sa_rootdomain,
481 sa_sd,
482 sa_sd_storage,
483 sa_none,
484};
485
486
487
488
489
490
491
492
493
494
495int group_balance_cpu(struct sched_group *sg)
496{
497 return cpumask_first(group_balance_mask(sg));
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606static void
607build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
608{
609 const struct cpumask *sg_span = sched_group_span(sg);
610 struct sd_data *sdd = sd->private;
611 struct sched_domain *sibling;
612 int i;
613
614 cpumask_clear(mask);
615
616 for_each_cpu(i, sg_span) {
617 sibling = *per_cpu_ptr(sdd->sd, i);
618
619
620
621
622
623
624 if (!sibling->child)
625 continue;
626
627
628 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
629 continue;
630
631 cpumask_set_cpu(i, mask);
632 }
633
634
635 WARN_ON_ONCE(cpumask_empty(mask));
636}
637
638
639
640
641
642
643static struct sched_group *
644build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
645{
646 struct sched_group *sg;
647 struct cpumask *sg_span;
648
649 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
650 GFP_KERNEL, cpu_to_node(cpu));
651
652 if (!sg)
653 return NULL;
654
655 sg_span = sched_group_span(sg);
656 if (sd->child)
657 cpumask_copy(sg_span, sched_domain_span(sd->child));
658 else
659 cpumask_copy(sg_span, sched_domain_span(sd));
660
661 atomic_inc(&sg->ref);
662 return sg;
663}
664
665static void init_overlap_sched_group(struct sched_domain *sd,
666 struct sched_group *sg)
667{
668 struct cpumask *mask = sched_domains_tmpmask2;
669 struct sd_data *sdd = sd->private;
670 struct cpumask *sg_span;
671 int cpu;
672
673 build_balance_mask(sd, sg, mask);
674 cpu = cpumask_first_and(sched_group_span(sg), mask);
675
676 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
677 if (atomic_inc_return(&sg->sgc->ref) == 1)
678 cpumask_copy(group_balance_mask(sg), mask);
679 else
680 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
681
682
683
684
685
686
687 sg_span = sched_group_span(sg);
688 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
689 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
690}
691
692static int
693build_overlap_sched_groups(struct sched_domain *sd, int cpu)
694{
695 struct sched_group *first = NULL, *last = NULL, *sg;
696 const struct cpumask *span = sched_domain_span(sd);
697 struct cpumask *covered = sched_domains_tmpmask;
698 struct sd_data *sdd = sd->private;
699 struct sched_domain *sibling;
700 int i;
701
702 cpumask_clear(covered);
703
704 for_each_cpu_wrap(i, span, cpu) {
705 struct cpumask *sg_span;
706
707 if (cpumask_test_cpu(i, covered))
708 continue;
709
710 sibling = *per_cpu_ptr(sdd->sd, i);
711
712
713
714
715
716
717
718
719
720
721
722 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
723 continue;
724
725 sg = build_group_from_child_sched_domain(sibling, cpu);
726 if (!sg)
727 goto fail;
728
729 sg_span = sched_group_span(sg);
730 cpumask_or(covered, covered, sg_span);
731
732 init_overlap_sched_group(sd, sg);
733
734 if (!first)
735 first = sg;
736 if (last)
737 last->next = sg;
738 last = sg;
739 last->next = first;
740 }
741 sd->groups = first;
742
743 return 0;
744
745fail:
746 free_sched_groups(first, 0);
747
748 return -ENOMEM;
749}
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823static struct sched_group *get_group(int cpu, struct sd_data *sdd)
824{
825 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
826 struct sched_domain *child = sd->child;
827 struct sched_group *sg;
828
829 if (child)
830 cpu = cpumask_first(sched_domain_span(child));
831
832 sg = *per_cpu_ptr(sdd->sg, cpu);
833 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
834
835
836 atomic_inc(&sg->ref);
837 atomic_inc(&sg->sgc->ref);
838
839 if (child) {
840 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
841 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
842 } else {
843 cpumask_set_cpu(cpu, sched_group_span(sg));
844 cpumask_set_cpu(cpu, group_balance_mask(sg));
845 }
846
847 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
848 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
849
850 return sg;
851}
852
853
854
855
856
857
858
859
860static int
861build_sched_groups(struct sched_domain *sd, int cpu)
862{
863 struct sched_group *first = NULL, *last = NULL;
864 struct sd_data *sdd = sd->private;
865 const struct cpumask *span = sched_domain_span(sd);
866 struct cpumask *covered;
867 int i;
868
869 lockdep_assert_held(&sched_domains_mutex);
870 covered = sched_domains_tmpmask;
871
872 cpumask_clear(covered);
873
874 for_each_cpu_wrap(i, span, cpu) {
875 struct sched_group *sg;
876
877 if (cpumask_test_cpu(i, covered))
878 continue;
879
880 sg = get_group(i, sdd);
881
882 cpumask_or(covered, covered, sched_group_span(sg));
883
884 if (!first)
885 first = sg;
886 if (last)
887 last->next = sg;
888 last = sg;
889 }
890 last->next = first;
891 sd->groups = first;
892
893 return 0;
894}
895
896
897
898
899
900
901
902
903
904
905
906static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
907{
908 struct sched_group *sg = sd->groups;
909
910 WARN_ON(!sg);
911
912 do {
913 int cpu, max_cpu = -1;
914
915 sg->group_weight = cpumask_weight(sched_group_span(sg));
916
917 if (!(sd->flags & SD_ASYM_PACKING))
918 goto next;
919
920 for_each_cpu(cpu, sched_group_span(sg)) {
921 if (max_cpu < 0)
922 max_cpu = cpu;
923 else if (sched_asym_prefer(cpu, max_cpu))
924 max_cpu = cpu;
925 }
926 sg->asym_prefer_cpu = max_cpu;
927
928next:
929 sg = sg->next;
930 } while (sg != sd->groups);
931
932 if (cpu != group_balance_cpu(sg))
933 return;
934
935 update_group_capacity(sd, cpu);
936}
937
938
939
940
941
942
943static int default_relax_domain_level = -1;
944int sched_domain_level_max;
945
946static int __init setup_relax_domain_level(char *str)
947{
948 if (kstrtoint(str, 0, &default_relax_domain_level))
949 pr_warn("Unable to set relax_domain_level\n");
950
951 return 1;
952}
953__setup("relax_domain_level=", setup_relax_domain_level);
954
955static void set_domain_attribute(struct sched_domain *sd,
956 struct sched_domain_attr *attr)
957{
958 int request;
959
960 if (!attr || attr->relax_domain_level < 0) {
961 if (default_relax_domain_level < 0)
962 return;
963 else
964 request = default_relax_domain_level;
965 } else
966 request = attr->relax_domain_level;
967 if (request < sd->level) {
968
969 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
970 } else {
971
972 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
973 }
974}
975
976static void __sdt_free(const struct cpumask *cpu_map);
977static int __sdt_alloc(const struct cpumask *cpu_map);
978
979static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
980 const struct cpumask *cpu_map)
981{
982 switch (what) {
983 case sa_rootdomain:
984 if (!atomic_read(&d->rd->refcount))
985 free_rootdomain(&d->rd->rcu);
986
987 case sa_sd:
988 free_percpu(d->sd);
989
990 case sa_sd_storage:
991 __sdt_free(cpu_map);
992
993 case sa_none:
994 break;
995 }
996}
997
998static enum s_alloc
999__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1000{
1001 memset(d, 0, sizeof(*d));
1002
1003 if (__sdt_alloc(cpu_map))
1004 return sa_sd_storage;
1005 d->sd = alloc_percpu(struct sched_domain *);
1006 if (!d->sd)
1007 return sa_sd_storage;
1008 d->rd = alloc_rootdomain();
1009 if (!d->rd)
1010 return sa_sd;
1011 return sa_rootdomain;
1012}
1013
1014
1015
1016
1017
1018
1019static void claim_allocations(int cpu, struct sched_domain *sd)
1020{
1021 struct sd_data *sdd = sd->private;
1022
1023 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1024 *per_cpu_ptr(sdd->sd, cpu) = NULL;
1025
1026 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1027 *per_cpu_ptr(sdd->sds, cpu) = NULL;
1028
1029 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1030 *per_cpu_ptr(sdd->sg, cpu) = NULL;
1031
1032 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1033 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
1034}
1035
1036#ifdef CONFIG_NUMA
1037static int sched_domains_numa_levels;
1038enum numa_topology_type sched_numa_topology_type;
1039static int *sched_domains_numa_distance;
1040int sched_max_numa_distance;
1041static struct cpumask ***sched_domains_numa_masks;
1042static int sched_domains_curr_level;
1043#endif
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063#define TOPOLOGY_SD_FLAGS \
1064 (SD_SHARE_CPUCAPACITY | \
1065 SD_SHARE_PKG_RESOURCES | \
1066 SD_NUMA | \
1067 SD_ASYM_PACKING | \
1068 SD_ASYM_CPUCAPACITY | \
1069 SD_SHARE_POWERDOMAIN)
1070
1071static struct sched_domain *
1072sd_init(struct sched_domain_topology_level *tl,
1073 const struct cpumask *cpu_map,
1074 struct sched_domain *child, int cpu)
1075{
1076 struct sd_data *sdd = &tl->data;
1077 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1078 int sd_id, sd_weight, sd_flags = 0;
1079
1080#ifdef CONFIG_NUMA
1081
1082
1083
1084 sched_domains_curr_level = tl->numa_level;
1085#endif
1086
1087 sd_weight = cpumask_weight(tl->mask(cpu));
1088
1089 if (tl->sd_flags)
1090 sd_flags = (*tl->sd_flags)();
1091 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1092 "wrong sd_flags in topology description\n"))
1093 sd_flags &= ~TOPOLOGY_SD_FLAGS;
1094
1095 *sd = (struct sched_domain){
1096 .min_interval = sd_weight,
1097 .max_interval = 2*sd_weight,
1098 .busy_factor = 32,
1099 .imbalance_pct = 125,
1100
1101 .cache_nice_tries = 0,
1102 .busy_idx = 0,
1103 .idle_idx = 0,
1104 .newidle_idx = 0,
1105 .wake_idx = 0,
1106 .forkexec_idx = 0,
1107
1108 .flags = 1*SD_LOAD_BALANCE
1109 | 1*SD_BALANCE_NEWIDLE
1110 | 1*SD_BALANCE_EXEC
1111 | 1*SD_BALANCE_FORK
1112 | 0*SD_BALANCE_WAKE
1113 | 1*SD_WAKE_AFFINE
1114 | 0*SD_SHARE_CPUCAPACITY
1115 | 0*SD_SHARE_PKG_RESOURCES
1116 | 0*SD_SERIALIZE
1117 | 0*SD_PREFER_SIBLING
1118 | 0*SD_NUMA
1119 | sd_flags
1120 ,
1121
1122 .last_balance = jiffies,
1123 .balance_interval = sd_weight,
1124 .smt_gain = 0,
1125 .max_newidle_lb_cost = 0,
1126 .next_decay_max_lb_cost = jiffies,
1127 .child = child,
1128#ifdef CONFIG_SCHED_DEBUG
1129 .name = tl->name,
1130#endif
1131 };
1132
1133 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1134 sd_id = cpumask_first(sched_domain_span(sd));
1135
1136
1137
1138
1139
1140 if (sd->flags & SD_ASYM_CPUCAPACITY) {
1141 struct sched_domain *t = sd;
1142
1143 for_each_lower_domain(t)
1144 t->flags |= SD_BALANCE_WAKE;
1145 }
1146
1147 if (sd->flags & SD_SHARE_CPUCAPACITY) {
1148 sd->flags |= SD_PREFER_SIBLING;
1149 sd->imbalance_pct = 110;
1150 sd->smt_gain = 1178;
1151
1152 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1153 sd->flags |= SD_PREFER_SIBLING;
1154 sd->imbalance_pct = 117;
1155 sd->cache_nice_tries = 1;
1156 sd->busy_idx = 2;
1157
1158#ifdef CONFIG_NUMA
1159 } else if (sd->flags & SD_NUMA) {
1160 sd->cache_nice_tries = 2;
1161 sd->busy_idx = 3;
1162 sd->idle_idx = 2;
1163
1164 sd->flags |= SD_SERIALIZE;
1165 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
1166 sd->flags &= ~(SD_BALANCE_EXEC |
1167 SD_BALANCE_FORK |
1168 SD_WAKE_AFFINE);
1169 }
1170
1171#endif
1172 } else {
1173 sd->flags |= SD_PREFER_SIBLING;
1174 sd->cache_nice_tries = 1;
1175 sd->busy_idx = 2;
1176 sd->idle_idx = 1;
1177 }
1178
1179
1180
1181
1182
1183 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1184 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1185 atomic_inc(&sd->shared->ref);
1186 atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1187 }
1188
1189 sd->private = sdd;
1190
1191 return sd;
1192}
1193
1194
1195
1196
1197static struct sched_domain_topology_level default_topology[] = {
1198#ifdef CONFIG_SCHED_SMT
1199 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1200#endif
1201#ifdef CONFIG_SCHED_MC
1202 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1203#endif
1204 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1205 { NULL, },
1206};
1207
1208static struct sched_domain_topology_level *sched_domain_topology =
1209 default_topology;
1210
1211#define for_each_sd_topology(tl) \
1212 for (tl = sched_domain_topology; tl->mask; tl++)
1213
1214void set_sched_topology(struct sched_domain_topology_level *tl)
1215{
1216 if (WARN_ON_ONCE(sched_smp_initialized))
1217 return;
1218
1219 sched_domain_topology = tl;
1220}
1221
1222#ifdef CONFIG_NUMA
1223
1224static const struct cpumask *sd_numa_mask(int cpu)
1225{
1226 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1227}
1228
1229static void sched_numa_warn(const char *str)
1230{
1231 static int done = false;
1232 int i,j;
1233
1234 if (done)
1235 return;
1236
1237 done = true;
1238
1239 printk(KERN_WARNING "ERROR: %s\n\n", str);
1240
1241 for (i = 0; i < nr_node_ids; i++) {
1242 printk(KERN_WARNING " ");
1243 for (j = 0; j < nr_node_ids; j++)
1244 printk(KERN_CONT "%02d ", node_distance(i,j));
1245 printk(KERN_CONT "\n");
1246 }
1247 printk(KERN_WARNING "\n");
1248}
1249
1250bool find_numa_distance(int distance)
1251{
1252 int i;
1253
1254 if (distance == node_distance(0, 0))
1255 return true;
1256
1257 for (i = 0; i < sched_domains_numa_levels; i++) {
1258 if (sched_domains_numa_distance[i] == distance)
1259 return true;
1260 }
1261
1262 return false;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static void init_numa_topology_type(void)
1285{
1286 int a, b, c, n;
1287
1288 n = sched_max_numa_distance;
1289
1290 if (sched_domains_numa_levels <= 1) {
1291 sched_numa_topology_type = NUMA_DIRECT;
1292 return;
1293 }
1294
1295 for_each_online_node(a) {
1296 for_each_online_node(b) {
1297
1298 if (node_distance(a, b) < n)
1299 continue;
1300
1301
1302 for_each_online_node(c) {
1303 if (node_distance(a, c) < n &&
1304 node_distance(b, c) < n) {
1305 sched_numa_topology_type =
1306 NUMA_GLUELESS_MESH;
1307 return;
1308 }
1309 }
1310
1311 sched_numa_topology_type = NUMA_BACKPLANE;
1312 return;
1313 }
1314 }
1315}
1316
1317void sched_init_numa(void)
1318{
1319 int next_distance, curr_distance = node_distance(0, 0);
1320 struct sched_domain_topology_level *tl;
1321 int level = 0;
1322 int i, j, k;
1323
1324 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
1325 if (!sched_domains_numa_distance)
1326 return;
1327
1328
1329 sched_domains_numa_distance[level++] = curr_distance;
1330 sched_domains_numa_levels = level;
1331
1332
1333
1334
1335
1336
1337
1338
1339 next_distance = curr_distance;
1340 for (i = 0; i < nr_node_ids; i++) {
1341 for (j = 0; j < nr_node_ids; j++) {
1342 for (k = 0; k < nr_node_ids; k++) {
1343 int distance = node_distance(i, k);
1344
1345 if (distance > curr_distance &&
1346 (distance < next_distance ||
1347 next_distance == curr_distance))
1348 next_distance = distance;
1349
1350
1351
1352
1353
1354
1355 if (sched_debug() && node_distance(k, i) != distance)
1356 sched_numa_warn("Node-distance not symmetric");
1357
1358 if (sched_debug() && i && !find_numa_distance(distance))
1359 sched_numa_warn("Node-0 not representative");
1360 }
1361 if (next_distance != curr_distance) {
1362 sched_domains_numa_distance[level++] = next_distance;
1363 sched_domains_numa_levels = level;
1364 curr_distance = next_distance;
1365 } else break;
1366 }
1367
1368
1369
1370
1371 if (!sched_debug())
1372 break;
1373 }
1374
1375 if (!level)
1376 return;
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 sched_domains_numa_levels = 0;
1395
1396 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
1397 if (!sched_domains_numa_masks)
1398 return;
1399
1400
1401
1402
1403
1404 for (i = 0; i < level; i++) {
1405 sched_domains_numa_masks[i] =
1406 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1407 if (!sched_domains_numa_masks[i])
1408 return;
1409
1410 for (j = 0; j < nr_node_ids; j++) {
1411 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1412 if (!mask)
1413 return;
1414
1415 sched_domains_numa_masks[i][j] = mask;
1416
1417 for_each_node(k) {
1418 if (node_distance(j, k) > sched_domains_numa_distance[i])
1419 continue;
1420
1421 cpumask_or(mask, mask, cpumask_of_node(k));
1422 }
1423 }
1424 }
1425
1426
1427 for (i = 0; sched_domain_topology[i].mask; i++);
1428
1429 tl = kzalloc((i + level + 1) *
1430 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1431 if (!tl)
1432 return;
1433
1434
1435
1436
1437 for (i = 0; sched_domain_topology[i].mask; i++)
1438 tl[i] = sched_domain_topology[i];
1439
1440
1441
1442
1443 tl[i++] = (struct sched_domain_topology_level){
1444 .mask = sd_numa_mask,
1445 .numa_level = 0,
1446 SD_INIT_NAME(NODE)
1447 };
1448
1449
1450
1451
1452 for (j = 1; j < level; i++, j++) {
1453 tl[i] = (struct sched_domain_topology_level){
1454 .mask = sd_numa_mask,
1455 .sd_flags = cpu_numa_flags,
1456 .flags = SDTL_OVERLAP,
1457 .numa_level = j,
1458 SD_INIT_NAME(NUMA)
1459 };
1460 }
1461
1462 sched_domain_topology = tl;
1463
1464 sched_domains_numa_levels = level;
1465 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
1466
1467 init_numa_topology_type();
1468}
1469
1470void sched_domains_numa_masks_set(unsigned int cpu)
1471{
1472 int node = cpu_to_node(cpu);
1473 int i, j;
1474
1475 for (i = 0; i < sched_domains_numa_levels; i++) {
1476 for (j = 0; j < nr_node_ids; j++) {
1477 if (node_distance(j, node) <= sched_domains_numa_distance[i])
1478 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1479 }
1480 }
1481}
1482
1483void sched_domains_numa_masks_clear(unsigned int cpu)
1484{
1485 int i, j;
1486
1487 for (i = 0; i < sched_domains_numa_levels; i++) {
1488 for (j = 0; j < nr_node_ids; j++)
1489 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1490 }
1491}
1492
1493#endif
1494
1495static int __sdt_alloc(const struct cpumask *cpu_map)
1496{
1497 struct sched_domain_topology_level *tl;
1498 int j;
1499
1500 for_each_sd_topology(tl) {
1501 struct sd_data *sdd = &tl->data;
1502
1503 sdd->sd = alloc_percpu(struct sched_domain *);
1504 if (!sdd->sd)
1505 return -ENOMEM;
1506
1507 sdd->sds = alloc_percpu(struct sched_domain_shared *);
1508 if (!sdd->sds)
1509 return -ENOMEM;
1510
1511 sdd->sg = alloc_percpu(struct sched_group *);
1512 if (!sdd->sg)
1513 return -ENOMEM;
1514
1515 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1516 if (!sdd->sgc)
1517 return -ENOMEM;
1518
1519 for_each_cpu(j, cpu_map) {
1520 struct sched_domain *sd;
1521 struct sched_domain_shared *sds;
1522 struct sched_group *sg;
1523 struct sched_group_capacity *sgc;
1524
1525 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1526 GFP_KERNEL, cpu_to_node(j));
1527 if (!sd)
1528 return -ENOMEM;
1529
1530 *per_cpu_ptr(sdd->sd, j) = sd;
1531
1532 sds = kzalloc_node(sizeof(struct sched_domain_shared),
1533 GFP_KERNEL, cpu_to_node(j));
1534 if (!sds)
1535 return -ENOMEM;
1536
1537 *per_cpu_ptr(sdd->sds, j) = sds;
1538
1539 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1540 GFP_KERNEL, cpu_to_node(j));
1541 if (!sg)
1542 return -ENOMEM;
1543
1544 sg->next = sg;
1545
1546 *per_cpu_ptr(sdd->sg, j) = sg;
1547
1548 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1549 GFP_KERNEL, cpu_to_node(j));
1550 if (!sgc)
1551 return -ENOMEM;
1552
1553#ifdef CONFIG_SCHED_DEBUG
1554 sgc->id = j;
1555#endif
1556
1557 *per_cpu_ptr(sdd->sgc, j) = sgc;
1558 }
1559 }
1560
1561 return 0;
1562}
1563
1564static void __sdt_free(const struct cpumask *cpu_map)
1565{
1566 struct sched_domain_topology_level *tl;
1567 int j;
1568
1569 for_each_sd_topology(tl) {
1570 struct sd_data *sdd = &tl->data;
1571
1572 for_each_cpu(j, cpu_map) {
1573 struct sched_domain *sd;
1574
1575 if (sdd->sd) {
1576 sd = *per_cpu_ptr(sdd->sd, j);
1577 if (sd && (sd->flags & SD_OVERLAP))
1578 free_sched_groups(sd->groups, 0);
1579 kfree(*per_cpu_ptr(sdd->sd, j));
1580 }
1581
1582 if (sdd->sds)
1583 kfree(*per_cpu_ptr(sdd->sds, j));
1584 if (sdd->sg)
1585 kfree(*per_cpu_ptr(sdd->sg, j));
1586 if (sdd->sgc)
1587 kfree(*per_cpu_ptr(sdd->sgc, j));
1588 }
1589 free_percpu(sdd->sd);
1590 sdd->sd = NULL;
1591 free_percpu(sdd->sds);
1592 sdd->sds = NULL;
1593 free_percpu(sdd->sg);
1594 sdd->sg = NULL;
1595 free_percpu(sdd->sgc);
1596 sdd->sgc = NULL;
1597 }
1598}
1599
1600static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1601 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
1602 struct sched_domain *child, int cpu)
1603{
1604 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
1605
1606 if (child) {
1607 sd->level = child->level + 1;
1608 sched_domain_level_max = max(sched_domain_level_max, sd->level);
1609 child->parent = sd;
1610
1611 if (!cpumask_subset(sched_domain_span(child),
1612 sched_domain_span(sd))) {
1613 pr_err("BUG: arch topology borken\n");
1614#ifdef CONFIG_SCHED_DEBUG
1615 pr_err(" the %s domain not a subset of the %s domain\n",
1616 child->name, sd->name);
1617#endif
1618
1619 cpumask_or(sched_domain_span(sd),
1620 sched_domain_span(sd),
1621 sched_domain_span(child));
1622 }
1623
1624 }
1625 set_domain_attribute(sd, attr);
1626
1627 return sd;
1628}
1629
1630
1631
1632
1633
1634static int
1635build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1636{
1637 enum s_alloc alloc_state;
1638 struct sched_domain *sd;
1639 struct s_data d;
1640 struct rq *rq = NULL;
1641 int i, ret = -ENOMEM;
1642
1643 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1644 if (alloc_state != sa_rootdomain)
1645 goto error;
1646
1647
1648 for_each_cpu(i, cpu_map) {
1649 struct sched_domain_topology_level *tl;
1650
1651 sd = NULL;
1652 for_each_sd_topology(tl) {
1653 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
1654 if (tl == sched_domain_topology)
1655 *per_cpu_ptr(d.sd, i) = sd;
1656 if (tl->flags & SDTL_OVERLAP)
1657 sd->flags |= SD_OVERLAP;
1658 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
1659 break;
1660 }
1661 }
1662
1663
1664 for_each_cpu(i, cpu_map) {
1665 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1666 sd->span_weight = cpumask_weight(sched_domain_span(sd));
1667 if (sd->flags & SD_OVERLAP) {
1668 if (build_overlap_sched_groups(sd, i))
1669 goto error;
1670 } else {
1671 if (build_sched_groups(sd, i))
1672 goto error;
1673 }
1674 }
1675 }
1676
1677
1678 for (i = nr_cpumask_bits-1; i >= 0; i--) {
1679 if (!cpumask_test_cpu(i, cpu_map))
1680 continue;
1681
1682 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1683 claim_allocations(i, sd);
1684 init_sched_groups_capacity(i, sd);
1685 }
1686 }
1687
1688
1689 rcu_read_lock();
1690 for_each_cpu(i, cpu_map) {
1691 rq = cpu_rq(i);
1692 sd = *per_cpu_ptr(d.sd, i);
1693
1694
1695 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
1696 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
1697
1698 cpu_attach_domain(sd, d.rd, i);
1699 }
1700 rcu_read_unlock();
1701
1702 if (rq && sched_debug_enabled) {
1703 pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
1704 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
1705 }
1706
1707 ret = 0;
1708error:
1709 __free_domain_allocs(&d, alloc_state, cpu_map);
1710 return ret;
1711}
1712
1713
1714static cpumask_var_t *doms_cur;
1715
1716
1717static int ndoms_cur;
1718
1719
1720static struct sched_domain_attr *dattr_cur;
1721
1722
1723
1724
1725
1726
1727static cpumask_var_t fallback_doms;
1728
1729
1730
1731
1732
1733
1734int __weak arch_update_cpu_topology(void)
1735{
1736 return 0;
1737}
1738
1739cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
1740{
1741 int i;
1742 cpumask_var_t *doms;
1743
1744 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
1745 if (!doms)
1746 return NULL;
1747 for (i = 0; i < ndoms; i++) {
1748 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
1749 free_sched_domains(doms, i);
1750 return NULL;
1751 }
1752 }
1753 return doms;
1754}
1755
1756void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
1757{
1758 unsigned int i;
1759 for (i = 0; i < ndoms; i++)
1760 free_cpumask_var(doms[i]);
1761 kfree(doms);
1762}
1763
1764
1765
1766
1767
1768
1769int sched_init_domains(const struct cpumask *cpu_map)
1770{
1771 int err;
1772
1773 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
1774 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
1775 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
1776
1777 arch_update_cpu_topology();
1778 ndoms_cur = 1;
1779 doms_cur = alloc_sched_domains(ndoms_cur);
1780 if (!doms_cur)
1781 doms_cur = &fallback_doms;
1782 cpumask_and(doms_cur[0], cpu_map, housekeeping_cpumask(HK_FLAG_DOMAIN));
1783 err = build_sched_domains(doms_cur[0], NULL);
1784 register_sched_domain_sysctl();
1785
1786 return err;
1787}
1788
1789
1790
1791
1792
1793static void detach_destroy_domains(const struct cpumask *cpu_map)
1794{
1795 int i;
1796
1797 rcu_read_lock();
1798 for_each_cpu(i, cpu_map)
1799 cpu_attach_domain(NULL, &def_root_domain, i);
1800 rcu_read_unlock();
1801}
1802
1803
1804static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
1805 struct sched_domain_attr *new, int idx_new)
1806{
1807 struct sched_domain_attr tmp;
1808
1809
1810 if (!new && !cur)
1811 return 1;
1812
1813 tmp = SD_ATTR_INIT;
1814 return !memcmp(cur ? (cur + idx_cur) : &tmp,
1815 new ? (new + idx_new) : &tmp,
1816 sizeof(struct sched_domain_attr));
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1846 struct sched_domain_attr *dattr_new)
1847{
1848 int i, j, n;
1849 int new_topology;
1850
1851 mutex_lock(&sched_domains_mutex);
1852
1853
1854 unregister_sched_domain_sysctl();
1855
1856
1857 new_topology = arch_update_cpu_topology();
1858
1859 if (!doms_new) {
1860 WARN_ON_ONCE(dattr_new);
1861 n = 0;
1862 doms_new = alloc_sched_domains(1);
1863 if (doms_new) {
1864 n = 1;
1865 cpumask_and(doms_new[0], cpu_active_mask,
1866 housekeeping_cpumask(HK_FLAG_DOMAIN));
1867 }
1868 } else {
1869 n = ndoms_new;
1870 }
1871
1872
1873 for (i = 0; i < ndoms_cur; i++) {
1874 for (j = 0; j < n && !new_topology; j++) {
1875 if (cpumask_equal(doms_cur[i], doms_new[j])
1876 && dattrs_equal(dattr_cur, i, dattr_new, j))
1877 goto match1;
1878 }
1879
1880 detach_destroy_domains(doms_cur[i]);
1881match1:
1882 ;
1883 }
1884
1885 n = ndoms_cur;
1886 if (!doms_new) {
1887 n = 0;
1888 doms_new = &fallback_doms;
1889 cpumask_and(doms_new[0], cpu_active_mask,
1890 housekeeping_cpumask(HK_FLAG_DOMAIN));
1891 }
1892
1893
1894 for (i = 0; i < ndoms_new; i++) {
1895 for (j = 0; j < n && !new_topology; j++) {
1896 if (cpumask_equal(doms_new[i], doms_cur[j])
1897 && dattrs_equal(dattr_new, i, dattr_cur, j))
1898 goto match2;
1899 }
1900
1901 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
1902match2:
1903 ;
1904 }
1905
1906
1907 if (doms_cur != &fallback_doms)
1908 free_sched_domains(doms_cur, ndoms_cur);
1909
1910 kfree(dattr_cur);
1911 doms_cur = doms_new;
1912 dattr_cur = dattr_new;
1913 ndoms_cur = ndoms_new;
1914
1915 register_sched_domain_sysctl();
1916
1917 mutex_unlock(&sched_domains_mutex);
1918}
1919
1920