1
2#ifndef _LINUX_SCHED_TOPOLOGY_H
3#define _LINUX_SCHED_TOPOLOGY_H
4
5#include <linux/topology.h>
6
7#include <linux/sched/idle.h>
8
9
10
11
12#ifdef CONFIG_SMP
13
14#define SD_BALANCE_NEWIDLE 0x0001
15#define SD_BALANCE_EXEC 0x0002
16#define SD_BALANCE_FORK 0x0004
17#define SD_BALANCE_WAKE 0x0008
18#define SD_WAKE_AFFINE 0x0010
19#define SD_ASYM_CPUCAPACITY 0x0020
20#define SD_SHARE_CPUCAPACITY 0x0040
21#define SD_SHARE_POWERDOMAIN 0x0080
22#define SD_SHARE_PKG_RESOURCES 0x0100
23#define SD_SERIALIZE 0x0200
24#define SD_ASYM_PACKING 0x0400
25#define SD_PREFER_SIBLING 0x0800
26#define SD_OVERLAP 0x1000
27#define SD_NUMA 0x2000
28
29#ifdef CONFIG_SCHED_SMT
30static inline int cpu_smt_flags(void)
31{
32 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
33}
34#endif
35
36#ifdef CONFIG_SCHED_MC
37static inline int cpu_core_flags(void)
38{
39 return SD_SHARE_PKG_RESOURCES;
40}
41#endif
42
43#ifdef CONFIG_NUMA
44static inline int cpu_numa_flags(void)
45{
46 return SD_NUMA;
47}
48#endif
49
50extern int arch_asym_cpu_priority(int cpu);
51
52struct sched_domain_attr {
53 int relax_domain_level;
54};
55
56#define SD_ATTR_INIT (struct sched_domain_attr) { \
57 .relax_domain_level = -1, \
58}
59
60extern int sched_domain_level_max;
61
62struct sched_group;
63
64struct sched_domain_shared {
65 atomic_t ref;
66 atomic_t nr_busy_cpus;
67 int has_idle_cores;
68};
69
70struct sched_domain {
71
72 struct sched_domain __rcu *parent;
73 struct sched_domain __rcu *child;
74 struct sched_group *groups;
75 unsigned long min_interval;
76 unsigned long max_interval;
77 unsigned int busy_factor;
78 unsigned int imbalance_pct;
79 unsigned int cache_nice_tries;
80
81 int nohz_idle;
82 int flags;
83 int level;
84
85
86 unsigned long last_balance;
87 unsigned int balance_interval;
88 unsigned int nr_balance_failed;
89
90
91 u64 max_newidle_lb_cost;
92 unsigned long next_decay_max_lb_cost;
93
94 u64 avg_scan_cost;
95
96#ifdef CONFIG_SCHEDSTATS
97
98 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
99 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
100 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
101 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
102 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
103 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
104 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
105 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
106
107
108 unsigned int alb_count;
109 unsigned int alb_failed;
110 unsigned int alb_pushed;
111
112
113 unsigned int sbe_count;
114 unsigned int sbe_balanced;
115 unsigned int sbe_pushed;
116
117
118 unsigned int sbf_count;
119 unsigned int sbf_balanced;
120 unsigned int sbf_pushed;
121
122
123 unsigned int ttwu_wake_remote;
124 unsigned int ttwu_move_affine;
125 unsigned int ttwu_move_balance;
126#endif
127#ifdef CONFIG_SCHED_DEBUG
128 char *name;
129#endif
130 union {
131 void *private;
132 struct rcu_head rcu;
133 };
134 struct sched_domain_shared *shared;
135
136 unsigned int span_weight;
137
138
139
140
141
142
143
144 unsigned long span[];
145};
146
147static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
148{
149 return to_cpumask(sd->span);
150}
151
152extern void partition_sched_domains_locked(int ndoms_new,
153 cpumask_var_t doms_new[],
154 struct sched_domain_attr *dattr_new);
155
156extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
157 struct sched_domain_attr *dattr_new);
158
159
160cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
161void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
162
163bool cpus_share_cache(int this_cpu, int that_cpu);
164
165typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
166typedef int (*sched_domain_flags_f)(void);
167
168#define SDTL_OVERLAP 0x01
169
170struct sd_data {
171 struct sched_domain *__percpu *sd;
172 struct sched_domain_shared *__percpu *sds;
173 struct sched_group *__percpu *sg;
174 struct sched_group_capacity *__percpu *sgc;
175};
176
177struct sched_domain_topology_level {
178 sched_domain_mask_f mask;
179 sched_domain_flags_f sd_flags;
180 int flags;
181 int numa_level;
182 struct sd_data data;
183#ifdef CONFIG_SCHED_DEBUG
184 char *name;
185#endif
186};
187
188extern void set_sched_topology(struct sched_domain_topology_level *tl);
189
190#ifdef CONFIG_SCHED_DEBUG
191# define SD_INIT_NAME(type) .name = #type
192#else
193# define SD_INIT_NAME(type)
194#endif
195
196#else
197
198struct sched_domain_attr;
199
200static inline void
201partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
202 struct sched_domain_attr *dattr_new)
203{
204}
205
206static inline void
207partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
208 struct sched_domain_attr *dattr_new)
209{
210}
211
212static inline bool cpus_share_cache(int this_cpu, int that_cpu)
213{
214 return true;
215}
216
217#endif
218
219#ifndef arch_scale_cpu_capacity
220
221
222
223
224
225
226
227
228
229
230static __always_inline
231unsigned long arch_scale_cpu_capacity(int cpu)
232{
233 return SCHED_CAPACITY_SCALE;
234}
235#endif
236
237#ifndef arch_scale_thermal_pressure
238static __always_inline
239unsigned long arch_scale_thermal_pressure(int cpu)
240{
241 return 0;
242}
243#endif
244
245#ifndef arch_set_thermal_pressure
246static __always_inline
247void arch_set_thermal_pressure(const struct cpumask *cpus,
248 unsigned long th_pressure)
249{ }
250#endif
251
252static inline int task_node(const struct task_struct *p)
253{
254 return cpu_to_node(task_cpu(p));
255}
256
257#endif
258