1
2
3
4
5#include <linux/sched.h>
6#include <linux/mutex.h>
7
8#include "sched.h"
9
10DEFINE_MUTEX(sched_domains_mutex);
11
12
13cpumask_var_t sched_domains_tmpmask;
14cpumask_var_t sched_domains_tmpmask2;
15
16#ifdef CONFIG_SCHED_DEBUG
17
18static int __init sched_debug_setup(char *str)
19{
20 sched_debug_enabled = true;
21
22 return 0;
23}
24early_param("sched_debug", sched_debug_setup);
25
26static inline bool sched_debug(void)
27{
28 return sched_debug_enabled;
29}
30
31static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
32 struct cpumask *groupmask)
33{
34 struct sched_group *group = sd->groups;
35
36 cpumask_clear(groupmask);
37
38 printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
39
40 if (!(sd->flags & SD_LOAD_BALANCE)) {
41 printk("does not load-balance\n");
42 if (sd->parent)
43 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
44 " has parent");
45 return -1;
46 }
47
48 printk(KERN_CONT "span=%*pbl level=%s\n",
49 cpumask_pr_args(sched_domain_span(sd)), sd->name);
50
51 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
52 printk(KERN_ERR "ERROR: domain->span does not contain "
53 "CPU%d\n", cpu);
54 }
55 if (!cpumask_test_cpu(cpu, sched_group_span(group))) {
56 printk(KERN_ERR "ERROR: domain->groups does not contain"
57 " CPU%d\n", cpu);
58 }
59
60 printk(KERN_DEBUG "%*s groups:", level + 1, "");
61 do {
62 if (!group) {
63 printk("\n");
64 printk(KERN_ERR "ERROR: group is NULL\n");
65 break;
66 }
67
68 if (!cpumask_weight(sched_group_span(group))) {
69 printk(KERN_CONT "\n");
70 printk(KERN_ERR "ERROR: empty group\n");
71 break;
72 }
73
74 if (!(sd->flags & SD_OVERLAP) &&
75 cpumask_intersects(groupmask, sched_group_span(group))) {
76 printk(KERN_CONT "\n");
77 printk(KERN_ERR "ERROR: repeated CPUs\n");
78 break;
79 }
80
81 cpumask_or(groupmask, groupmask, sched_group_span(group));
82
83 printk(KERN_CONT " %d:{ span=%*pbl",
84 group->sgc->id,
85 cpumask_pr_args(sched_group_span(group)));
86
87 if ((sd->flags & SD_OVERLAP) &&
88 !cpumask_equal(group_balance_mask(group), sched_group_span(group))) {
89 printk(KERN_CONT " mask=%*pbl",
90 cpumask_pr_args(group_balance_mask(group)));
91 }
92
93 if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
94 printk(KERN_CONT " cap=%lu", group->sgc->capacity);
95
96 if (group == sd->groups && sd->child &&
97 !cpumask_equal(sched_domain_span(sd->child),
98 sched_group_span(group))) {
99 printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
100 }
101
102 printk(KERN_CONT " }");
103
104 group = group->next;
105
106 if (group != sd->groups)
107 printk(KERN_CONT ",");
108
109 } while (group != sd->groups);
110 printk(KERN_CONT "\n");
111
112 if (!cpumask_equal(sched_domain_span(sd), groupmask))
113 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
114
115 if (sd->parent &&
116 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
117 printk(KERN_ERR "ERROR: parent span is not a superset "
118 "of domain->span\n");
119 return 0;
120}
121
122static void sched_domain_debug(struct sched_domain *sd, int cpu)
123{
124 int level = 0;
125
126 if (!sched_debug_enabled)
127 return;
128
129 if (!sd) {
130 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
131 return;
132 }
133
134 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
135
136 for (;;) {
137 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
138 break;
139 level++;
140 sd = sd->parent;
141 if (!sd)
142 break;
143 }
144}
145#else
146
147# define sched_debug_enabled 0
148# define sched_domain_debug(sd, cpu) do { } while (0)
149static inline bool sched_debug(void)
150{
151 return false;
152}
153#endif
154
155static int sd_degenerate(struct sched_domain *sd)
156{
157 if (cpumask_weight(sched_domain_span(sd)) == 1)
158 return 1;
159
160
161 if (sd->flags & (SD_LOAD_BALANCE |
162 SD_BALANCE_NEWIDLE |
163 SD_BALANCE_FORK |
164 SD_BALANCE_EXEC |
165 SD_SHARE_CPUCAPACITY |
166 SD_ASYM_CPUCAPACITY |
167 SD_SHARE_PKG_RESOURCES |
168 SD_SHARE_POWERDOMAIN)) {
169 if (sd->groups != sd->groups->next)
170 return 0;
171 }
172
173
174 if (sd->flags & (SD_WAKE_AFFINE))
175 return 0;
176
177 return 1;
178}
179
180static int
181sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
182{
183 unsigned long cflags = sd->flags, pflags = parent->flags;
184
185 if (sd_degenerate(parent))
186 return 1;
187
188 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
189 return 0;
190
191
192 if (parent->groups == parent->groups->next) {
193 pflags &= ~(SD_LOAD_BALANCE |
194 SD_BALANCE_NEWIDLE |
195 SD_BALANCE_FORK |
196 SD_BALANCE_EXEC |
197 SD_ASYM_CPUCAPACITY |
198 SD_SHARE_CPUCAPACITY |
199 SD_SHARE_PKG_RESOURCES |
200 SD_PREFER_SIBLING |
201 SD_SHARE_POWERDOMAIN);
202 if (nr_node_ids == 1)
203 pflags &= ~SD_SERIALIZE;
204 }
205 if (~cflags & pflags)
206 return 0;
207
208 return 1;
209}
210
211static void free_rootdomain(struct rcu_head *rcu)
212{
213 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
214
215 cpupri_cleanup(&rd->cpupri);
216 cpudl_cleanup(&rd->cpudl);
217 free_cpumask_var(rd->dlo_mask);
218 free_cpumask_var(rd->rto_mask);
219 free_cpumask_var(rd->online);
220 free_cpumask_var(rd->span);
221 kfree(rd);
222}
223
224void rq_attach_root(struct rq *rq, struct root_domain *rd)
225{
226 struct root_domain *old_rd = NULL;
227 unsigned long flags;
228
229 raw_spin_lock_irqsave(&rq->lock, flags);
230
231 if (rq->rd) {
232 old_rd = rq->rd;
233
234 if (cpumask_test_cpu(rq->cpu, old_rd->online))
235 set_rq_offline(rq);
236
237 cpumask_clear_cpu(rq->cpu, old_rd->span);
238
239
240
241
242
243
244 if (!atomic_dec_and_test(&old_rd->refcount))
245 old_rd = NULL;
246 }
247
248 atomic_inc(&rd->refcount);
249 rq->rd = rd;
250
251 cpumask_set_cpu(rq->cpu, rd->span);
252 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
253 set_rq_online(rq);
254
255 raw_spin_unlock_irqrestore(&rq->lock, flags);
256
257 if (old_rd)
258 call_rcu_sched(&old_rd->rcu, free_rootdomain);
259}
260
261static int init_rootdomain(struct root_domain *rd)
262{
263 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
264 goto out;
265 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
266 goto free_span;
267 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
268 goto free_online;
269 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
270 goto free_dlo_mask;
271
272 init_dl_bw(&rd->dl_bw);
273 if (cpudl_init(&rd->cpudl) != 0)
274 goto free_rto_mask;
275
276 if (cpupri_init(&rd->cpupri) != 0)
277 goto free_cpudl;
278 return 0;
279
280free_cpudl:
281 cpudl_cleanup(&rd->cpudl);
282free_rto_mask:
283 free_cpumask_var(rd->rto_mask);
284free_dlo_mask:
285 free_cpumask_var(rd->dlo_mask);
286free_online:
287 free_cpumask_var(rd->online);
288free_span:
289 free_cpumask_var(rd->span);
290out:
291 return -ENOMEM;
292}
293
294
295
296
297
298struct root_domain def_root_domain;
299
300void init_defrootdomain(void)
301{
302 init_rootdomain(&def_root_domain);
303
304 atomic_set(&def_root_domain.refcount, 1);
305}
306
307static struct root_domain *alloc_rootdomain(void)
308{
309 struct root_domain *rd;
310
311 rd = kzalloc(sizeof(*rd), GFP_KERNEL);
312 if (!rd)
313 return NULL;
314
315 if (init_rootdomain(rd) != 0) {
316 kfree(rd);
317 return NULL;
318 }
319
320 return rd;
321}
322
323static void free_sched_groups(struct sched_group *sg, int free_sgc)
324{
325 struct sched_group *tmp, *first;
326
327 if (!sg)
328 return;
329
330 first = sg;
331 do {
332 tmp = sg->next;
333
334 if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
335 kfree(sg->sgc);
336
337 if (atomic_dec_and_test(&sg->ref))
338 kfree(sg);
339 sg = tmp;
340 } while (sg != first);
341}
342
343static void destroy_sched_domain(struct sched_domain *sd)
344{
345
346
347
348
349
350 free_sched_groups(sd->groups, 1);
351
352 if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
353 kfree(sd->shared);
354 kfree(sd);
355}
356
357static void destroy_sched_domains_rcu(struct rcu_head *rcu)
358{
359 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
360
361 while (sd) {
362 struct sched_domain *parent = sd->parent;
363 destroy_sched_domain(sd);
364 sd = parent;
365 }
366}
367
368static void destroy_sched_domains(struct sched_domain *sd)
369{
370 if (sd)
371 call_rcu(&sd->rcu, destroy_sched_domains_rcu);
372}
373
374
375
376
377
378
379
380
381
382
383DEFINE_PER_CPU(struct sched_domain *, sd_llc);
384DEFINE_PER_CPU(int, sd_llc_size);
385DEFINE_PER_CPU(int, sd_llc_id);
386DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
387DEFINE_PER_CPU(struct sched_domain *, sd_numa);
388DEFINE_PER_CPU(struct sched_domain *, sd_asym);
389
390static void update_top_cache_domain(int cpu)
391{
392 struct sched_domain_shared *sds = NULL;
393 struct sched_domain *sd;
394 int id = cpu;
395 int size = 1;
396
397 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
398 if (sd) {
399 id = cpumask_first(sched_domain_span(sd));
400 size = cpumask_weight(sched_domain_span(sd));
401 sds = sd->shared;
402 }
403
404 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
405 per_cpu(sd_llc_size, cpu) = size;
406 per_cpu(sd_llc_id, cpu) = id;
407 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
408
409 sd = lowest_flag_domain(cpu, SD_NUMA);
410 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
411
412 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
413 rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
414}
415
416
417
418
419
420static void
421cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
422{
423 struct rq *rq = cpu_rq(cpu);
424 struct sched_domain *tmp;
425
426
427 for (tmp = sd; tmp; ) {
428 struct sched_domain *parent = tmp->parent;
429 if (!parent)
430 break;
431
432 if (sd_parent_degenerate(tmp, parent)) {
433 tmp->parent = parent->parent;
434 if (parent->parent)
435 parent->parent->child = tmp;
436
437
438
439
440
441 if (parent->flags & SD_PREFER_SIBLING)
442 tmp->flags |= SD_PREFER_SIBLING;
443 destroy_sched_domain(parent);
444 } else
445 tmp = tmp->parent;
446 }
447
448 if (sd && sd_degenerate(sd)) {
449 tmp = sd;
450 sd = sd->parent;
451 destroy_sched_domain(tmp);
452 if (sd)
453 sd->child = NULL;
454 }
455
456 sched_domain_debug(sd, cpu);
457
458 rq_attach_root(rq, rd);
459 tmp = rq->sd;
460 rcu_assign_pointer(rq->sd, sd);
461 dirty_sched_domain_sysctl(cpu);
462 destroy_sched_domains(tmp);
463
464 update_top_cache_domain(cpu);
465}
466
467
468static int __init isolated_cpu_setup(char *str)
469{
470 int ret;
471
472 alloc_bootmem_cpumask_var(&cpu_isolated_map);
473 ret = cpulist_parse(str, cpu_isolated_map);
474 if (ret) {
475 pr_err("sched: Error, all isolcpus= values must be between 0 and %u\n", nr_cpu_ids);
476 return 0;
477 }
478 return 1;
479}
480__setup("isolcpus=", isolated_cpu_setup);
481
482struct s_data {
483 struct sched_domain ** __percpu sd;
484 struct root_domain *rd;
485};
486
487enum s_alloc {
488 sa_rootdomain,
489 sa_sd,
490 sa_sd_storage,
491 sa_none,
492};
493
494
495
496
497
498
499
500
501
502
503int group_balance_cpu(struct sched_group *sg)
504{
505 return cpumask_first(group_balance_mask(sg));
506}
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static void
615build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
616{
617 const struct cpumask *sg_span = sched_group_span(sg);
618 struct sd_data *sdd = sd->private;
619 struct sched_domain *sibling;
620 int i;
621
622 cpumask_clear(mask);
623
624 for_each_cpu(i, sg_span) {
625 sibling = *per_cpu_ptr(sdd->sd, i);
626
627
628
629
630
631
632 if (!sibling->child)
633 continue;
634
635
636 if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
637 continue;
638
639 cpumask_set_cpu(i, mask);
640 }
641
642
643 WARN_ON_ONCE(cpumask_empty(mask));
644}
645
646
647
648
649
650
651static struct sched_group *
652build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
653{
654 struct sched_group *sg;
655 struct cpumask *sg_span;
656
657 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
658 GFP_KERNEL, cpu_to_node(cpu));
659
660 if (!sg)
661 return NULL;
662
663 sg_span = sched_group_span(sg);
664 if (sd->child)
665 cpumask_copy(sg_span, sched_domain_span(sd->child));
666 else
667 cpumask_copy(sg_span, sched_domain_span(sd));
668
669 atomic_inc(&sg->ref);
670 return sg;
671}
672
673static void init_overlap_sched_group(struct sched_domain *sd,
674 struct sched_group *sg)
675{
676 struct cpumask *mask = sched_domains_tmpmask2;
677 struct sd_data *sdd = sd->private;
678 struct cpumask *sg_span;
679 int cpu;
680
681 build_balance_mask(sd, sg, mask);
682 cpu = cpumask_first_and(sched_group_span(sg), mask);
683
684 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
685 if (atomic_inc_return(&sg->sgc->ref) == 1)
686 cpumask_copy(group_balance_mask(sg), mask);
687 else
688 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
689
690
691
692
693
694
695 sg_span = sched_group_span(sg);
696 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
697 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
698}
699
700static int
701build_overlap_sched_groups(struct sched_domain *sd, int cpu)
702{
703 struct sched_group *first = NULL, *last = NULL, *sg;
704 const struct cpumask *span = sched_domain_span(sd);
705 struct cpumask *covered = sched_domains_tmpmask;
706 struct sd_data *sdd = sd->private;
707 struct sched_domain *sibling;
708 int i;
709
710 cpumask_clear(covered);
711
712 for_each_cpu_wrap(i, span, cpu) {
713 struct cpumask *sg_span;
714
715 if (cpumask_test_cpu(i, covered))
716 continue;
717
718 sibling = *per_cpu_ptr(sdd->sd, i);
719
720
721
722
723
724
725
726
727
728
729
730 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
731 continue;
732
733 sg = build_group_from_child_sched_domain(sibling, cpu);
734 if (!sg)
735 goto fail;
736
737 sg_span = sched_group_span(sg);
738 cpumask_or(covered, covered, sg_span);
739
740 init_overlap_sched_group(sd, sg);
741
742 if (!first)
743 first = sg;
744 if (last)
745 last->next = sg;
746 last = sg;
747 last->next = first;
748 }
749 sd->groups = first;
750
751 return 0;
752
753fail:
754 free_sched_groups(first, 0);
755
756 return -ENOMEM;
757}
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831static struct sched_group *get_group(int cpu, struct sd_data *sdd)
832{
833 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
834 struct sched_domain *child = sd->child;
835 struct sched_group *sg;
836
837 if (child)
838 cpu = cpumask_first(sched_domain_span(child));
839
840 sg = *per_cpu_ptr(sdd->sg, cpu);
841 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
842
843
844 atomic_inc(&sg->ref);
845 atomic_inc(&sg->sgc->ref);
846
847 if (child) {
848 cpumask_copy(sched_group_span(sg), sched_domain_span(child));
849 cpumask_copy(group_balance_mask(sg), sched_group_span(sg));
850 } else {
851 cpumask_set_cpu(cpu, sched_group_span(sg));
852 cpumask_set_cpu(cpu, group_balance_mask(sg));
853 }
854
855 sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_span(sg));
856 sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
857
858 return sg;
859}
860
861
862
863
864
865
866
867
868static int
869build_sched_groups(struct sched_domain *sd, int cpu)
870{
871 struct sched_group *first = NULL, *last = NULL;
872 struct sd_data *sdd = sd->private;
873 const struct cpumask *span = sched_domain_span(sd);
874 struct cpumask *covered;
875 int i;
876
877 lockdep_assert_held(&sched_domains_mutex);
878 covered = sched_domains_tmpmask;
879
880 cpumask_clear(covered);
881
882 for_each_cpu_wrap(i, span, cpu) {
883 struct sched_group *sg;
884
885 if (cpumask_test_cpu(i, covered))
886 continue;
887
888 sg = get_group(i, sdd);
889
890 cpumask_or(covered, covered, sched_group_span(sg));
891
892 if (!first)
893 first = sg;
894 if (last)
895 last->next = sg;
896 last = sg;
897 }
898 last->next = first;
899 sd->groups = first;
900
901 return 0;
902}
903
904
905
906
907
908
909
910
911
912
913
914static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
915{
916 struct sched_group *sg = sd->groups;
917
918 WARN_ON(!sg);
919
920 do {
921 int cpu, max_cpu = -1;
922
923 sg->group_weight = cpumask_weight(sched_group_span(sg));
924
925 if (!(sd->flags & SD_ASYM_PACKING))
926 goto next;
927
928 for_each_cpu(cpu, sched_group_span(sg)) {
929 if (max_cpu < 0)
930 max_cpu = cpu;
931 else if (sched_asym_prefer(cpu, max_cpu))
932 max_cpu = cpu;
933 }
934 sg->asym_prefer_cpu = max_cpu;
935
936next:
937 sg = sg->next;
938 } while (sg != sd->groups);
939
940 if (cpu != group_balance_cpu(sg))
941 return;
942
943 update_group_capacity(sd, cpu);
944}
945
946
947
948
949
950
951static int default_relax_domain_level = -1;
952int sched_domain_level_max;
953
954static int __init setup_relax_domain_level(char *str)
955{
956 if (kstrtoint(str, 0, &default_relax_domain_level))
957 pr_warn("Unable to set relax_domain_level\n");
958
959 return 1;
960}
961__setup("relax_domain_level=", setup_relax_domain_level);
962
963static void set_domain_attribute(struct sched_domain *sd,
964 struct sched_domain_attr *attr)
965{
966 int request;
967
968 if (!attr || attr->relax_domain_level < 0) {
969 if (default_relax_domain_level < 0)
970 return;
971 else
972 request = default_relax_domain_level;
973 } else
974 request = attr->relax_domain_level;
975 if (request < sd->level) {
976
977 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
978 } else {
979
980 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
981 }
982}
983
984static void __sdt_free(const struct cpumask *cpu_map);
985static int __sdt_alloc(const struct cpumask *cpu_map);
986
987static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
988 const struct cpumask *cpu_map)
989{
990 switch (what) {
991 case sa_rootdomain:
992 if (!atomic_read(&d->rd->refcount))
993 free_rootdomain(&d->rd->rcu);
994
995 case sa_sd:
996 free_percpu(d->sd);
997
998 case sa_sd_storage:
999 __sdt_free(cpu_map);
1000
1001 case sa_none:
1002 break;
1003 }
1004}
1005
1006static enum s_alloc
1007__visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
1008{
1009 memset(d, 0, sizeof(*d));
1010
1011 if (__sdt_alloc(cpu_map))
1012 return sa_sd_storage;
1013 d->sd = alloc_percpu(struct sched_domain *);
1014 if (!d->sd)
1015 return sa_sd_storage;
1016 d->rd = alloc_rootdomain();
1017 if (!d->rd)
1018 return sa_sd;
1019 return sa_rootdomain;
1020}
1021
1022
1023
1024
1025
1026
1027static void claim_allocations(int cpu, struct sched_domain *sd)
1028{
1029 struct sd_data *sdd = sd->private;
1030
1031 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1032 *per_cpu_ptr(sdd->sd, cpu) = NULL;
1033
1034 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
1035 *per_cpu_ptr(sdd->sds, cpu) = NULL;
1036
1037 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
1038 *per_cpu_ptr(sdd->sg, cpu) = NULL;
1039
1040 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
1041 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
1042}
1043
1044#ifdef CONFIG_NUMA
1045static int sched_domains_numa_levels;
1046enum numa_topology_type sched_numa_topology_type;
1047static int *sched_domains_numa_distance;
1048int sched_max_numa_distance;
1049static struct cpumask ***sched_domains_numa_masks;
1050static int sched_domains_curr_level;
1051#endif
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071#define TOPOLOGY_SD_FLAGS \
1072 (SD_SHARE_CPUCAPACITY | \
1073 SD_SHARE_PKG_RESOURCES | \
1074 SD_NUMA | \
1075 SD_ASYM_PACKING | \
1076 SD_ASYM_CPUCAPACITY | \
1077 SD_SHARE_POWERDOMAIN)
1078
1079static struct sched_domain *
1080sd_init(struct sched_domain_topology_level *tl,
1081 const struct cpumask *cpu_map,
1082 struct sched_domain *child, int cpu)
1083{
1084 struct sd_data *sdd = &tl->data;
1085 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1086 int sd_id, sd_weight, sd_flags = 0;
1087
1088#ifdef CONFIG_NUMA
1089
1090
1091
1092 sched_domains_curr_level = tl->numa_level;
1093#endif
1094
1095 sd_weight = cpumask_weight(tl->mask(cpu));
1096
1097 if (tl->sd_flags)
1098 sd_flags = (*tl->sd_flags)();
1099 if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
1100 "wrong sd_flags in topology description\n"))
1101 sd_flags &= ~TOPOLOGY_SD_FLAGS;
1102
1103 *sd = (struct sched_domain){
1104 .min_interval = sd_weight,
1105 .max_interval = 2*sd_weight,
1106 .busy_factor = 32,
1107 .imbalance_pct = 125,
1108
1109 .cache_nice_tries = 0,
1110 .busy_idx = 0,
1111 .idle_idx = 0,
1112 .newidle_idx = 0,
1113 .wake_idx = 0,
1114 .forkexec_idx = 0,
1115
1116 .flags = 1*SD_LOAD_BALANCE
1117 | 1*SD_BALANCE_NEWIDLE
1118 | 1*SD_BALANCE_EXEC
1119 | 1*SD_BALANCE_FORK
1120 | 0*SD_BALANCE_WAKE
1121 | 1*SD_WAKE_AFFINE
1122 | 0*SD_SHARE_CPUCAPACITY
1123 | 0*SD_SHARE_PKG_RESOURCES
1124 | 0*SD_SERIALIZE
1125 | 0*SD_PREFER_SIBLING
1126 | 0*SD_NUMA
1127 | sd_flags
1128 ,
1129
1130 .last_balance = jiffies,
1131 .balance_interval = sd_weight,
1132 .smt_gain = 0,
1133 .max_newidle_lb_cost = 0,
1134 .next_decay_max_lb_cost = jiffies,
1135 .child = child,
1136#ifdef CONFIG_SCHED_DEBUG
1137 .name = tl->name,
1138#endif
1139 };
1140
1141 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1142 sd_id = cpumask_first(sched_domain_span(sd));
1143
1144
1145
1146
1147
1148 if (sd->flags & SD_ASYM_CPUCAPACITY) {
1149 struct sched_domain *t = sd;
1150
1151 for_each_lower_domain(t)
1152 t->flags |= SD_BALANCE_WAKE;
1153 }
1154
1155 if (sd->flags & SD_SHARE_CPUCAPACITY) {
1156 sd->flags |= SD_PREFER_SIBLING;
1157 sd->imbalance_pct = 110;
1158 sd->smt_gain = 1178;
1159
1160 } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1161 sd->imbalance_pct = 117;
1162 sd->cache_nice_tries = 1;
1163 sd->busy_idx = 2;
1164
1165#ifdef CONFIG_NUMA
1166 } else if (sd->flags & SD_NUMA) {
1167 sd->cache_nice_tries = 2;
1168 sd->busy_idx = 3;
1169 sd->idle_idx = 2;
1170
1171 sd->flags |= SD_SERIALIZE;
1172 if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
1173 sd->flags &= ~(SD_BALANCE_EXEC |
1174 SD_BALANCE_FORK |
1175 SD_WAKE_AFFINE);
1176 }
1177
1178#endif
1179 } else {
1180 sd->flags |= SD_PREFER_SIBLING;
1181 sd->cache_nice_tries = 1;
1182 sd->busy_idx = 2;
1183 sd->idle_idx = 1;
1184 }
1185
1186
1187
1188
1189
1190 if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1191 sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
1192 atomic_inc(&sd->shared->ref);
1193 atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
1194 }
1195
1196 sd->private = sdd;
1197
1198 return sd;
1199}
1200
1201
1202
1203
1204static struct sched_domain_topology_level default_topology[] = {
1205#ifdef CONFIG_SCHED_SMT
1206 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
1207#endif
1208#ifdef CONFIG_SCHED_MC
1209 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
1210#endif
1211 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1212 { NULL, },
1213};
1214
1215static struct sched_domain_topology_level *sched_domain_topology =
1216 default_topology;
1217
1218#define for_each_sd_topology(tl) \
1219 for (tl = sched_domain_topology; tl->mask; tl++)
1220
1221void set_sched_topology(struct sched_domain_topology_level *tl)
1222{
1223 if (WARN_ON_ONCE(sched_smp_initialized))
1224 return;
1225
1226 sched_domain_topology = tl;
1227}
1228
1229#ifdef CONFIG_NUMA
1230
1231static const struct cpumask *sd_numa_mask(int cpu)
1232{
1233 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1234}
1235
1236static void sched_numa_warn(const char *str)
1237{
1238 static int done = false;
1239 int i,j;
1240
1241 if (done)
1242 return;
1243
1244 done = true;
1245
1246 printk(KERN_WARNING "ERROR: %s\n\n", str);
1247
1248 for (i = 0; i < nr_node_ids; i++) {
1249 printk(KERN_WARNING " ");
1250 for (j = 0; j < nr_node_ids; j++)
1251 printk(KERN_CONT "%02d ", node_distance(i,j));
1252 printk(KERN_CONT "\n");
1253 }
1254 printk(KERN_WARNING "\n");
1255}
1256
1257bool find_numa_distance(int distance)
1258{
1259 int i;
1260
1261 if (distance == node_distance(0, 0))
1262 return true;
1263
1264 for (i = 0; i < sched_domains_numa_levels; i++) {
1265 if (sched_domains_numa_distance[i] == distance)
1266 return true;
1267 }
1268
1269 return false;
1270}
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291static void init_numa_topology_type(void)
1292{
1293 int a, b, c, n;
1294
1295 n = sched_max_numa_distance;
1296
1297 if (sched_domains_numa_levels <= 1) {
1298 sched_numa_topology_type = NUMA_DIRECT;
1299 return;
1300 }
1301
1302 for_each_online_node(a) {
1303 for_each_online_node(b) {
1304
1305 if (node_distance(a, b) < n)
1306 continue;
1307
1308
1309 for_each_online_node(c) {
1310 if (node_distance(a, c) < n &&
1311 node_distance(b, c) < n) {
1312 sched_numa_topology_type =
1313 NUMA_GLUELESS_MESH;
1314 return;
1315 }
1316 }
1317
1318 sched_numa_topology_type = NUMA_BACKPLANE;
1319 return;
1320 }
1321 }
1322}
1323
1324void sched_init_numa(void)
1325{
1326 int next_distance, curr_distance = node_distance(0, 0);
1327 struct sched_domain_topology_level *tl;
1328 int level = 0;
1329 int i, j, k;
1330
1331 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
1332 if (!sched_domains_numa_distance)
1333 return;
1334
1335
1336
1337
1338
1339
1340
1341
1342 next_distance = curr_distance;
1343 for (i = 0; i < nr_node_ids; i++) {
1344 for (j = 0; j < nr_node_ids; j++) {
1345 for (k = 0; k < nr_node_ids; k++) {
1346 int distance = node_distance(i, k);
1347
1348 if (distance > curr_distance &&
1349 (distance < next_distance ||
1350 next_distance == curr_distance))
1351 next_distance = distance;
1352
1353
1354
1355
1356
1357
1358 if (sched_debug() && node_distance(k, i) != distance)
1359 sched_numa_warn("Node-distance not symmetric");
1360
1361 if (sched_debug() && i && !find_numa_distance(distance))
1362 sched_numa_warn("Node-0 not representative");
1363 }
1364 if (next_distance != curr_distance) {
1365 sched_domains_numa_distance[level++] = next_distance;
1366 sched_domains_numa_levels = level;
1367 curr_distance = next_distance;
1368 } else break;
1369 }
1370
1371
1372
1373
1374 if (!sched_debug())
1375 break;
1376 }
1377
1378 if (!level)
1379 return;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398 sched_domains_numa_levels = 0;
1399
1400 sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
1401 if (!sched_domains_numa_masks)
1402 return;
1403
1404
1405
1406
1407
1408 for (i = 0; i < level; i++) {
1409 sched_domains_numa_masks[i] =
1410 kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
1411 if (!sched_domains_numa_masks[i])
1412 return;
1413
1414 for (j = 0; j < nr_node_ids; j++) {
1415 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1416 if (!mask)
1417 return;
1418
1419 sched_domains_numa_masks[i][j] = mask;
1420
1421 for_each_node(k) {
1422 if (node_distance(j, k) > sched_domains_numa_distance[i])
1423 continue;
1424
1425 cpumask_or(mask, mask, cpumask_of_node(k));
1426 }
1427 }
1428 }
1429
1430
1431 for (i = 0; sched_domain_topology[i].mask; i++);
1432
1433 tl = kzalloc((i + level + 1) *
1434 sizeof(struct sched_domain_topology_level), GFP_KERNEL);
1435 if (!tl)
1436 return;
1437
1438
1439
1440
1441 for (i = 0; sched_domain_topology[i].mask; i++)
1442 tl[i] = sched_domain_topology[i];
1443
1444
1445
1446
1447 for (j = 0; j < level; i++, j++) {
1448 tl[i] = (struct sched_domain_topology_level){
1449 .mask = sd_numa_mask,
1450 .sd_flags = cpu_numa_flags,
1451 .flags = SDTL_OVERLAP,
1452 .numa_level = j,
1453 SD_INIT_NAME(NUMA)
1454 };
1455 }
1456
1457 sched_domain_topology = tl;
1458
1459 sched_domains_numa_levels = level;
1460 sched_max_numa_distance = sched_domains_numa_distance[level - 1];
1461
1462 init_numa_topology_type();
1463}
1464
1465void sched_domains_numa_masks_set(unsigned int cpu)
1466{
1467 int node = cpu_to_node(cpu);
1468 int i, j;
1469
1470 for (i = 0; i < sched_domains_numa_levels; i++) {
1471 for (j = 0; j < nr_node_ids; j++) {
1472 if (node_distance(j, node) <= sched_domains_numa_distance[i])
1473 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1474 }
1475 }
1476}
1477
1478void sched_domains_numa_masks_clear(unsigned int cpu)
1479{
1480 int i, j;
1481
1482 for (i = 0; i < sched_domains_numa_levels; i++) {
1483 for (j = 0; j < nr_node_ids; j++)
1484 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1485 }
1486}
1487
1488#endif
1489
1490static int __sdt_alloc(const struct cpumask *cpu_map)
1491{
1492 struct sched_domain_topology_level *tl;
1493 int j;
1494
1495 for_each_sd_topology(tl) {
1496 struct sd_data *sdd = &tl->data;
1497
1498 sdd->sd = alloc_percpu(struct sched_domain *);
1499 if (!sdd->sd)
1500 return -ENOMEM;
1501
1502 sdd->sds = alloc_percpu(struct sched_domain_shared *);
1503 if (!sdd->sds)
1504 return -ENOMEM;
1505
1506 sdd->sg = alloc_percpu(struct sched_group *);
1507 if (!sdd->sg)
1508 return -ENOMEM;
1509
1510 sdd->sgc = alloc_percpu(struct sched_group_capacity *);
1511 if (!sdd->sgc)
1512 return -ENOMEM;
1513
1514 for_each_cpu(j, cpu_map) {
1515 struct sched_domain *sd;
1516 struct sched_domain_shared *sds;
1517 struct sched_group *sg;
1518 struct sched_group_capacity *sgc;
1519
1520 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
1521 GFP_KERNEL, cpu_to_node(j));
1522 if (!sd)
1523 return -ENOMEM;
1524
1525 *per_cpu_ptr(sdd->sd, j) = sd;
1526
1527 sds = kzalloc_node(sizeof(struct sched_domain_shared),
1528 GFP_KERNEL, cpu_to_node(j));
1529 if (!sds)
1530 return -ENOMEM;
1531
1532 *per_cpu_ptr(sdd->sds, j) = sds;
1533
1534 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
1535 GFP_KERNEL, cpu_to_node(j));
1536 if (!sg)
1537 return -ENOMEM;
1538
1539 sg->next = sg;
1540
1541 *per_cpu_ptr(sdd->sg, j) = sg;
1542
1543 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
1544 GFP_KERNEL, cpu_to_node(j));
1545 if (!sgc)
1546 return -ENOMEM;
1547
1548#ifdef CONFIG_SCHED_DEBUG
1549 sgc->id = j;
1550#endif
1551
1552 *per_cpu_ptr(sdd->sgc, j) = sgc;
1553 }
1554 }
1555
1556 return 0;
1557}
1558
1559static void __sdt_free(const struct cpumask *cpu_map)
1560{
1561 struct sched_domain_topology_level *tl;
1562 int j;
1563
1564 for_each_sd_topology(tl) {
1565 struct sd_data *sdd = &tl->data;
1566
1567 for_each_cpu(j, cpu_map) {
1568 struct sched_domain *sd;
1569
1570 if (sdd->sd) {
1571 sd = *per_cpu_ptr(sdd->sd, j);
1572 if (sd && (sd->flags & SD_OVERLAP))
1573 free_sched_groups(sd->groups, 0);
1574 kfree(*per_cpu_ptr(sdd->sd, j));
1575 }
1576
1577 if (sdd->sds)
1578 kfree(*per_cpu_ptr(sdd->sds, j));
1579 if (sdd->sg)
1580 kfree(*per_cpu_ptr(sdd->sg, j));
1581 if (sdd->sgc)
1582 kfree(*per_cpu_ptr(sdd->sgc, j));
1583 }
1584 free_percpu(sdd->sd);
1585 sdd->sd = NULL;
1586 free_percpu(sdd->sds);
1587 sdd->sds = NULL;
1588 free_percpu(sdd->sg);
1589 sdd->sg = NULL;
1590 free_percpu(sdd->sgc);
1591 sdd->sgc = NULL;
1592 }
1593}
1594
1595static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
1596 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
1597 struct sched_domain *child, int cpu)
1598{
1599 struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
1600
1601 if (child) {
1602 sd->level = child->level + 1;
1603 sched_domain_level_max = max(sched_domain_level_max, sd->level);
1604 child->parent = sd;
1605
1606 if (!cpumask_subset(sched_domain_span(child),
1607 sched_domain_span(sd))) {
1608 pr_err("BUG: arch topology borken\n");
1609#ifdef CONFIG_SCHED_DEBUG
1610 pr_err(" the %s domain not a subset of the %s domain\n",
1611 child->name, sd->name);
1612#endif
1613
1614 cpumask_or(sched_domain_span(sd),
1615 sched_domain_span(sd),
1616 sched_domain_span(child));
1617 }
1618
1619 }
1620 set_domain_attribute(sd, attr);
1621
1622 return sd;
1623}
1624
1625
1626
1627
1628
1629static int
1630build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
1631{
1632 enum s_alloc alloc_state;
1633 struct sched_domain *sd;
1634 struct s_data d;
1635 struct rq *rq = NULL;
1636 int i, ret = -ENOMEM;
1637
1638 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
1639 if (alloc_state != sa_rootdomain)
1640 goto error;
1641
1642
1643 for_each_cpu(i, cpu_map) {
1644 struct sched_domain_topology_level *tl;
1645
1646 sd = NULL;
1647 for_each_sd_topology(tl) {
1648 sd = build_sched_domain(tl, cpu_map, attr, sd, i);
1649 if (tl == sched_domain_topology)
1650 *per_cpu_ptr(d.sd, i) = sd;
1651 if (tl->flags & SDTL_OVERLAP)
1652 sd->flags |= SD_OVERLAP;
1653 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
1654 break;
1655 }
1656 }
1657
1658
1659 for_each_cpu(i, cpu_map) {
1660 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1661 sd->span_weight = cpumask_weight(sched_domain_span(sd));
1662 if (sd->flags & SD_OVERLAP) {
1663 if (build_overlap_sched_groups(sd, i))
1664 goto error;
1665 } else {
1666 if (build_sched_groups(sd, i))
1667 goto error;
1668 }
1669 }
1670 }
1671
1672
1673 for (i = nr_cpumask_bits-1; i >= 0; i--) {
1674 if (!cpumask_test_cpu(i, cpu_map))
1675 continue;
1676
1677 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
1678 claim_allocations(i, sd);
1679 init_sched_groups_capacity(i, sd);
1680 }
1681 }
1682
1683
1684 rcu_read_lock();
1685 for_each_cpu(i, cpu_map) {
1686 rq = cpu_rq(i);
1687 sd = *per_cpu_ptr(d.sd, i);
1688
1689
1690 if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
1691 WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
1692
1693 cpu_attach_domain(sd, d.rd, i);
1694 }
1695 rcu_read_unlock();
1696
1697 if (rq && sched_debug_enabled) {
1698 pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
1699 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
1700 }
1701
1702 ret = 0;
1703error:
1704 __free_domain_allocs(&d, alloc_state, cpu_map);
1705 return ret;
1706}
1707
1708
1709static cpumask_var_t *doms_cur;
1710
1711
1712static int ndoms_cur;
1713
1714
1715static struct sched_domain_attr *dattr_cur;
1716
1717
1718
1719
1720
1721
1722static cpumask_var_t fallback_doms;
1723
1724
1725
1726
1727
1728
1729int __weak arch_update_cpu_topology(void)
1730{
1731 return 0;
1732}
1733
1734cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
1735{
1736 int i;
1737 cpumask_var_t *doms;
1738
1739 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
1740 if (!doms)
1741 return NULL;
1742 for (i = 0; i < ndoms; i++) {
1743 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
1744 free_sched_domains(doms, i);
1745 return NULL;
1746 }
1747 }
1748 return doms;
1749}
1750
1751void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
1752{
1753 unsigned int i;
1754 for (i = 0; i < ndoms; i++)
1755 free_cpumask_var(doms[i]);
1756 kfree(doms);
1757}
1758
1759
1760
1761
1762
1763
1764int sched_init_domains(const struct cpumask *cpu_map)
1765{
1766 int err;
1767
1768 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
1769 zalloc_cpumask_var(&sched_domains_tmpmask2, GFP_KERNEL);
1770 zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
1771
1772 arch_update_cpu_topology();
1773 ndoms_cur = 1;
1774 doms_cur = alloc_sched_domains(ndoms_cur);
1775 if (!doms_cur)
1776 doms_cur = &fallback_doms;
1777 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
1778 err = build_sched_domains(doms_cur[0], NULL);
1779 register_sched_domain_sysctl();
1780
1781 return err;
1782}
1783
1784
1785
1786
1787
1788static void detach_destroy_domains(const struct cpumask *cpu_map)
1789{
1790 int i;
1791
1792 rcu_read_lock();
1793 for_each_cpu(i, cpu_map)
1794 cpu_attach_domain(NULL, &def_root_domain, i);
1795 rcu_read_unlock();
1796}
1797
1798
1799static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
1800 struct sched_domain_attr *new, int idx_new)
1801{
1802 struct sched_domain_attr tmp;
1803
1804
1805 if (!new && !cur)
1806 return 1;
1807
1808 tmp = SD_ATTR_INIT;
1809 return !memcmp(cur ? (cur + idx_cur) : &tmp,
1810 new ? (new + idx_new) : &tmp,
1811 sizeof(struct sched_domain_attr));
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1841 struct sched_domain_attr *dattr_new)
1842{
1843 int i, j, n;
1844 int new_topology;
1845
1846 mutex_lock(&sched_domains_mutex);
1847
1848
1849 unregister_sched_domain_sysctl();
1850
1851
1852 new_topology = arch_update_cpu_topology();
1853
1854 if (!doms_new) {
1855 WARN_ON_ONCE(dattr_new);
1856 n = 0;
1857 doms_new = alloc_sched_domains(1);
1858 if (doms_new) {
1859 n = 1;
1860 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
1861 }
1862 } else {
1863 n = ndoms_new;
1864 }
1865
1866
1867 for (i = 0; i < ndoms_cur; i++) {
1868 for (j = 0; j < n && !new_topology; j++) {
1869 if (cpumask_equal(doms_cur[i], doms_new[j])
1870 && dattrs_equal(dattr_cur, i, dattr_new, j))
1871 goto match1;
1872 }
1873
1874 detach_destroy_domains(doms_cur[i]);
1875match1:
1876 ;
1877 }
1878
1879 n = ndoms_cur;
1880 if (!doms_new) {
1881 n = 0;
1882 doms_new = &fallback_doms;
1883 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
1884 }
1885
1886
1887 for (i = 0; i < ndoms_new; i++) {
1888 for (j = 0; j < n && !new_topology; j++) {
1889 if (cpumask_equal(doms_new[i], doms_cur[j])
1890 && dattrs_equal(dattr_new, i, dattr_cur, j))
1891 goto match2;
1892 }
1893
1894 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
1895match2:
1896 ;
1897 }
1898
1899
1900 if (doms_cur != &fallback_doms)
1901 free_sched_domains(doms_cur, ndoms_cur);
1902
1903 kfree(dattr_cur);
1904 doms_cur = doms_new;
1905 dattr_cur = dattr_new;
1906 ndoms_cur = ndoms_new;
1907
1908 register_sched_domain_sysctl();
1909
1910 mutex_unlock(&sched_domains_mutex);
1911}
1912
1913