1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sched.h>
20#include <linux/cpumask.h>
21#include <linux/cpuset.h>
22#include <linux/mutex.h>
23#include <linux/sysctl.h>
24#include <linux/nodemask.h>
25
26static DEFINE_MUTEX(itmt_update_mutex);
27DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
28
29
30static bool __read_mostly sched_itmt_capable;
31
32
33
34
35
36
37
38
39unsigned int __read_mostly sysctl_sched_itmt_enabled;
40
41static int sched_itmt_update_handler(struct ctl_table *table, int write,
42 void __user *buffer, size_t *lenp,
43 loff_t *ppos)
44{
45 unsigned int old_sysctl;
46 int ret;
47
48 mutex_lock(&itmt_update_mutex);
49
50 if (!sched_itmt_capable) {
51 mutex_unlock(&itmt_update_mutex);
52 return -EINVAL;
53 }
54
55 old_sysctl = sysctl_sched_itmt_enabled;
56 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
57
58 if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
59 x86_topology_update = true;
60 rebuild_sched_domains();
61 }
62
63 mutex_unlock(&itmt_update_mutex);
64
65 return ret;
66}
67
68static unsigned int zero;
69static unsigned int one = 1;
70static struct ctl_table itmt_kern_table[] = {
71 {
72 .procname = "sched_itmt_enabled",
73 .data = &sysctl_sched_itmt_enabled,
74 .maxlen = sizeof(unsigned int),
75 .mode = 0644,
76 .proc_handler = sched_itmt_update_handler,
77 .extra1 = &zero,
78 .extra2 = &one,
79 },
80 {}
81};
82
83static struct ctl_table itmt_root_table[] = {
84 {
85 .procname = "kernel",
86 .mode = 0555,
87 .child = itmt_kern_table,
88 },
89 {}
90};
91
92static struct ctl_table_header *itmt_sysctl_header;
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111int sched_set_itmt_support(void)
112{
113 mutex_lock(&itmt_update_mutex);
114
115 if (sched_itmt_capable) {
116 mutex_unlock(&itmt_update_mutex);
117 return 0;
118 }
119
120 itmt_sysctl_header = register_sysctl_table(itmt_root_table);
121 if (!itmt_sysctl_header) {
122 mutex_unlock(&itmt_update_mutex);
123 return -ENOMEM;
124 }
125
126 sched_itmt_capable = true;
127
128 sysctl_sched_itmt_enabled = 1;
129
130 x86_topology_update = true;
131 rebuild_sched_domains();
132
133 mutex_unlock(&itmt_update_mutex);
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145
146
147
148void sched_clear_itmt_support(void)
149{
150 mutex_lock(&itmt_update_mutex);
151
152 if (!sched_itmt_capable) {
153 mutex_unlock(&itmt_update_mutex);
154 return;
155 }
156 sched_itmt_capable = false;
157
158 if (itmt_sysctl_header) {
159 unregister_sysctl_table(itmt_sysctl_header);
160 itmt_sysctl_header = NULL;
161 }
162
163 if (sysctl_sched_itmt_enabled) {
164
165 sysctl_sched_itmt_enabled = 0;
166 x86_topology_update = true;
167 rebuild_sched_domains();
168 }
169
170 mutex_unlock(&itmt_update_mutex);
171}
172
173int arch_asym_cpu_priority(int cpu)
174{
175 return per_cpu(sched_core_priority, cpu);
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192void sched_set_itmt_core_prio(int prio, int core_cpu)
193{
194 int cpu, i = 1;
195
196 for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
197 int smt_prio;
198
199
200
201
202
203
204 smt_prio = prio * smp_num_siblings / i;
205 per_cpu(sched_core_priority, cpu) = smt_prio;
206 i++;
207 }
208}
209