1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sched.h>
20#include <linux/cpumask.h>
21#include <linux/cpuset.h>
22#include <linux/mutex.h>
23#include <linux/sysctl.h>
24#include <linux/nodemask.h>
25
26static DEFINE_MUTEX(itmt_update_mutex);
27DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
28
29
30static bool __read_mostly sched_itmt_capable;
31
32
33
34
35
36
37
38
39unsigned int __read_mostly sysctl_sched_itmt_enabled;
40
41static int sched_itmt_update_handler(struct ctl_table *table, int write,
42 void __user *buffer, size_t *lenp,
43 loff_t *ppos)
44{
45 unsigned int old_sysctl;
46 int ret;
47
48 mutex_lock(&itmt_update_mutex);
49
50 if (!sched_itmt_capable) {
51 mutex_unlock(&itmt_update_mutex);
52 return -EINVAL;
53 }
54
55 old_sysctl = sysctl_sched_itmt_enabled;
56 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
57
58 if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
59 x86_topology_update = true;
60 rebuild_sched_domains();
61 }
62
63 mutex_unlock(&itmt_update_mutex);
64
65 return ret;
66}
67
68static struct ctl_table itmt_kern_table[] = {
69 {
70 .procname = "sched_itmt_enabled",
71 .data = &sysctl_sched_itmt_enabled,
72 .maxlen = sizeof(unsigned int),
73 .mode = 0644,
74 .proc_handler = sched_itmt_update_handler,
75 .extra1 = SYSCTL_ZERO,
76 .extra2 = SYSCTL_ONE,
77 },
78 {}
79};
80
81static struct ctl_table itmt_root_table[] = {
82 {
83 .procname = "kernel",
84 .mode = 0555,
85 .child = itmt_kern_table,
86 },
87 {}
88};
89
90static struct ctl_table_header *itmt_sysctl_header;
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109int sched_set_itmt_support(void)
110{
111 mutex_lock(&itmt_update_mutex);
112
113 if (sched_itmt_capable) {
114 mutex_unlock(&itmt_update_mutex);
115 return 0;
116 }
117
118 itmt_sysctl_header = register_sysctl_table(itmt_root_table);
119 if (!itmt_sysctl_header) {
120 mutex_unlock(&itmt_update_mutex);
121 return -ENOMEM;
122 }
123
124 sched_itmt_capable = true;
125
126 sysctl_sched_itmt_enabled = 1;
127
128 x86_topology_update = true;
129 rebuild_sched_domains();
130
131 mutex_unlock(&itmt_update_mutex);
132
133 return 0;
134}
135
136
137
138
139
140
141
142
143
144
145
146void sched_clear_itmt_support(void)
147{
148 mutex_lock(&itmt_update_mutex);
149
150 if (!sched_itmt_capable) {
151 mutex_unlock(&itmt_update_mutex);
152 return;
153 }
154 sched_itmt_capable = false;
155
156 if (itmt_sysctl_header) {
157 unregister_sysctl_table(itmt_sysctl_header);
158 itmt_sysctl_header = NULL;
159 }
160
161 if (sysctl_sched_itmt_enabled) {
162
163 sysctl_sched_itmt_enabled = 0;
164 x86_topology_update = true;
165 rebuild_sched_domains();
166 }
167
168 mutex_unlock(&itmt_update_mutex);
169}
170
171int arch_asym_cpu_priority(int cpu)
172{
173 return per_cpu(sched_core_priority, cpu);
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190void sched_set_itmt_core_prio(int prio, int core_cpu)
191{
192 int cpu, i = 1;
193
194 for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
195 int smt_prio;
196
197
198
199
200
201
202 smt_prio = prio * smp_num_siblings / i;
203 per_cpu(sched_core_priority, cpu) = smt_prio;
204 i++;
205 }
206}
207