1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sched.h>
20#include <linux/cpumask.h>
21#include <linux/cpuset.h>
22#include <linux/mutex.h>
23#include <linux/sysctl.h>
24#include <linux/nodemask.h>
25
26static DEFINE_MUTEX(itmt_update_mutex);
27DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
28
29
30static bool __read_mostly sched_itmt_capable;
31
32
33
34
35
36
37
38
39unsigned int __read_mostly sysctl_sched_itmt_enabled;
40
41static int sched_itmt_update_handler(struct ctl_table *table, int write,
42 void *buffer, size_t *lenp, loff_t *ppos)
43{
44 unsigned int old_sysctl;
45 int ret;
46
47 mutex_lock(&itmt_update_mutex);
48
49 if (!sched_itmt_capable) {
50 mutex_unlock(&itmt_update_mutex);
51 return -EINVAL;
52 }
53
54 old_sysctl = sysctl_sched_itmt_enabled;
55 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
56
57 if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
58 x86_topology_update = true;
59 rebuild_sched_domains();
60 }
61
62 mutex_unlock(&itmt_update_mutex);
63
64 return ret;
65}
66
67static struct ctl_table itmt_kern_table[] = {
68 {
69 .procname = "sched_itmt_enabled",
70 .data = &sysctl_sched_itmt_enabled,
71 .maxlen = sizeof(unsigned int),
72 .mode = 0644,
73 .proc_handler = sched_itmt_update_handler,
74 .extra1 = SYSCTL_ZERO,
75 .extra2 = SYSCTL_ONE,
76 },
77 {}
78};
79
80static struct ctl_table itmt_root_table[] = {
81 {
82 .procname = "kernel",
83 .mode = 0555,
84 .child = itmt_kern_table,
85 },
86 {}
87};
88
89static struct ctl_table_header *itmt_sysctl_header;
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108int sched_set_itmt_support(void)
109{
110 mutex_lock(&itmt_update_mutex);
111
112 if (sched_itmt_capable) {
113 mutex_unlock(&itmt_update_mutex);
114 return 0;
115 }
116
117 itmt_sysctl_header = register_sysctl_table(itmt_root_table);
118 if (!itmt_sysctl_header) {
119 mutex_unlock(&itmt_update_mutex);
120 return -ENOMEM;
121 }
122
123 sched_itmt_capable = true;
124
125 sysctl_sched_itmt_enabled = 1;
126
127 x86_topology_update = true;
128 rebuild_sched_domains();
129
130 mutex_unlock(&itmt_update_mutex);
131
132 return 0;
133}
134
135
136
137
138
139
140
141
142
143
144
145void sched_clear_itmt_support(void)
146{
147 mutex_lock(&itmt_update_mutex);
148
149 if (!sched_itmt_capable) {
150 mutex_unlock(&itmt_update_mutex);
151 return;
152 }
153 sched_itmt_capable = false;
154
155 if (itmt_sysctl_header) {
156 unregister_sysctl_table(itmt_sysctl_header);
157 itmt_sysctl_header = NULL;
158 }
159
160 if (sysctl_sched_itmt_enabled) {
161
162 sysctl_sched_itmt_enabled = 0;
163 x86_topology_update = true;
164 rebuild_sched_domains();
165 }
166
167 mutex_unlock(&itmt_update_mutex);
168}
169
170int arch_asym_cpu_priority(int cpu)
171{
172 return per_cpu(sched_core_priority, cpu);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189void sched_set_itmt_core_prio(int prio, int core_cpu)
190{
191 int cpu, i = 1;
192
193 for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
194 int smt_prio;
195
196
197
198
199
200
201 smt_prio = prio * smp_num_siblings / i;
202 per_cpu(sched_core_priority, cpu) = smt_prio;
203 i++;
204 }
205}
206