1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/sched.h>
24#include <linux/cpumask.h>
25#include <linux/cpuset.h>
26#include <linux/mutex.h>
27#include <linux/sysctl.h>
28#include <linux/nodemask.h>
29
30static DEFINE_MUTEX(itmt_update_mutex);
31DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
32
33
34static bool __read_mostly sched_itmt_capable;
35
36
37
38
39
40
41
42
43unsigned int __read_mostly sysctl_sched_itmt_enabled;
44
45static int sched_itmt_update_handler(struct ctl_table *table, int write,
46 void __user *buffer, size_t *lenp,
47 loff_t *ppos)
48{
49 unsigned int old_sysctl;
50 int ret;
51
52 mutex_lock(&itmt_update_mutex);
53
54 if (!sched_itmt_capable) {
55 mutex_unlock(&itmt_update_mutex);
56 return -EINVAL;
57 }
58
59 old_sysctl = sysctl_sched_itmt_enabled;
60 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
61
62 if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
63 x86_topology_update = true;
64 rebuild_sched_domains();
65 }
66
67 mutex_unlock(&itmt_update_mutex);
68
69 return ret;
70}
71
72static unsigned int zero;
73static unsigned int one = 1;
74static struct ctl_table itmt_kern_table[] = {
75 {
76 .procname = "sched_itmt_enabled",
77 .data = &sysctl_sched_itmt_enabled,
78 .maxlen = sizeof(unsigned int),
79 .mode = 0644,
80 .proc_handler = sched_itmt_update_handler,
81 .extra1 = &zero,
82 .extra2 = &one,
83 },
84 {}
85};
86
87static struct ctl_table itmt_root_table[] = {
88 {
89 .procname = "kernel",
90 .mode = 0555,
91 .child = itmt_kern_table,
92 },
93 {}
94};
95
96static struct ctl_table_header *itmt_sysctl_header;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115int sched_set_itmt_support(void)
116{
117 mutex_lock(&itmt_update_mutex);
118
119 if (sched_itmt_capable) {
120 mutex_unlock(&itmt_update_mutex);
121 return 0;
122 }
123
124 itmt_sysctl_header = register_sysctl_table(itmt_root_table);
125 if (!itmt_sysctl_header) {
126 mutex_unlock(&itmt_update_mutex);
127 return -ENOMEM;
128 }
129
130 sched_itmt_capable = true;
131
132 sysctl_sched_itmt_enabled = 1;
133
134 x86_topology_update = true;
135 rebuild_sched_domains();
136
137 mutex_unlock(&itmt_update_mutex);
138
139 return 0;
140}
141
142
143
144
145
146
147
148
149
150
151
152void sched_clear_itmt_support(void)
153{
154 mutex_lock(&itmt_update_mutex);
155
156 if (!sched_itmt_capable) {
157 mutex_unlock(&itmt_update_mutex);
158 return;
159 }
160 sched_itmt_capable = false;
161
162 if (itmt_sysctl_header) {
163 unregister_sysctl_table(itmt_sysctl_header);
164 itmt_sysctl_header = NULL;
165 }
166
167 if (sysctl_sched_itmt_enabled) {
168
169 sysctl_sched_itmt_enabled = 0;
170 x86_topology_update = true;
171 rebuild_sched_domains();
172 }
173
174 mutex_unlock(&itmt_update_mutex);
175}
176
177int arch_asym_cpu_priority(int cpu)
178{
179 return per_cpu(sched_core_priority, cpu);
180}
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196void sched_set_itmt_core_prio(int prio, int core_cpu)
197{
198 int cpu, i = 1;
199
200 for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
201 int smt_prio;
202
203
204
205
206
207
208 smt_prio = prio * smp_num_siblings / i;
209 per_cpu(sched_core_priority, cpu) = smt_prio;
210 i++;
211 }
212}
213