1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) "ii_dev: " fmt
39
40#include <linux/cpu.h>
41#include <linux/hrtimer.h>
42#include <linux/kthread.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/smpboot.h>
46#include <linux/idle_inject.h>
47
48#include <uapi/linux/sched/types.h>
49
50
51
52
53
54
55struct idle_inject_thread {
56 struct task_struct *tsk;
57 int should_run;
58};
59
60
61
62
63
64
65
66
67
68struct idle_inject_device {
69 struct hrtimer timer;
70 unsigned int idle_duration_us;
71 unsigned int run_duration_us;
72 unsigned int latency_us;
73 unsigned long cpumask[];
74};
75
76static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
77static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
78
79
80
81
82
83
84
85
86static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
87{
88 struct idle_inject_thread *iit;
89 unsigned int cpu;
90
91 for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
92 iit = per_cpu_ptr(&idle_inject_thread, cpu);
93 iit->should_run = 1;
94 wake_up_process(iit->tsk);
95 }
96}
97
98
99
100
101
102
103
104
105
106
107
108static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
109{
110 unsigned int duration_us;
111 struct idle_inject_device *ii_dev =
112 container_of(timer, struct idle_inject_device, timer);
113
114 duration_us = READ_ONCE(ii_dev->run_duration_us);
115 duration_us += READ_ONCE(ii_dev->idle_duration_us);
116
117 idle_inject_wakeup(ii_dev);
118
119 hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
120
121 return HRTIMER_RESTART;
122}
123
124
125
126
127
128
129
130
131static void idle_inject_fn(unsigned int cpu)
132{
133 struct idle_inject_device *ii_dev;
134 struct idle_inject_thread *iit;
135
136 ii_dev = per_cpu(idle_inject_device, cpu);
137 iit = per_cpu_ptr(&idle_inject_thread, cpu);
138
139
140
141
142 iit->should_run = 0;
143
144 play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC,
145 READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC);
146}
147
148
149
150
151
152
153void idle_inject_set_duration(struct idle_inject_device *ii_dev,
154 unsigned int run_duration_us,
155 unsigned int idle_duration_us)
156{
157 if (run_duration_us && idle_duration_us) {
158 WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
159 WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
160 }
161}
162
163
164
165
166
167
168void idle_inject_get_duration(struct idle_inject_device *ii_dev,
169 unsigned int *run_duration_us,
170 unsigned int *idle_duration_us)
171{
172 *run_duration_us = READ_ONCE(ii_dev->run_duration_us);
173 *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
174}
175
176
177
178
179
180void idle_inject_set_latency(struct idle_inject_device *ii_dev,
181 unsigned int latency_us)
182{
183 WRITE_ONCE(ii_dev->latency_us, latency_us);
184}
185
186
187
188
189
190
191
192
193
194
195
196int idle_inject_start(struct idle_inject_device *ii_dev)
197{
198 unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
199 unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
200
201 if (!idle_duration_us || !run_duration_us)
202 return -EINVAL;
203
204 pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
205 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
206
207 idle_inject_wakeup(ii_dev);
208
209 hrtimer_start(&ii_dev->timer,
210 ns_to_ktime((idle_duration_us + run_duration_us) *
211 NSEC_PER_USEC),
212 HRTIMER_MODE_REL);
213
214 return 0;
215}
216
217
218
219
220
221
222
223
224
225
226
227
228void idle_inject_stop(struct idle_inject_device *ii_dev)
229{
230 struct idle_inject_thread *iit;
231 unsigned int cpu;
232
233 pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
234 cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
235
236 hrtimer_cancel(&ii_dev->timer);
237
238
239
240
241
242
243
244
245 cpu_hotplug_disable();
246
247
248
249
250
251
252
253 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
254 iit = per_cpu_ptr(&idle_inject_thread, cpu);
255 iit->should_run = 0;
256
257 wait_task_inactive(iit->tsk, 0);
258 }
259
260 cpu_hotplug_enable();
261}
262
263
264
265
266
267
268
269
270static void idle_inject_setup(unsigned int cpu)
271{
272 sched_set_fifo(current);
273}
274
275
276
277
278
279
280
281static int idle_inject_should_run(unsigned int cpu)
282{
283 struct idle_inject_thread *iit =
284 per_cpu_ptr(&idle_inject_thread, cpu);
285
286 return iit->should_run;
287}
288
289
290
291
292
293
294
295
296
297
298
299
300struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
301{
302 struct idle_inject_device *ii_dev;
303 int cpu, cpu_rb;
304
305 ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
306 if (!ii_dev)
307 return NULL;
308
309 cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
310 hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
311 ii_dev->timer.function = idle_inject_timer_fn;
312 ii_dev->latency_us = UINT_MAX;
313
314 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
315
316 if (per_cpu(idle_inject_device, cpu)) {
317 pr_err("cpu%d is already registered\n", cpu);
318 goto out_rollback;
319 }
320
321 per_cpu(idle_inject_device, cpu) = ii_dev;
322 }
323
324 return ii_dev;
325
326out_rollback:
327 for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
328 if (cpu == cpu_rb)
329 break;
330 per_cpu(idle_inject_device, cpu_rb) = NULL;
331 }
332
333 kfree(ii_dev);
334
335 return NULL;
336}
337
338
339
340
341
342
343
344
345
346void idle_inject_unregister(struct idle_inject_device *ii_dev)
347{
348 unsigned int cpu;
349
350 idle_inject_stop(ii_dev);
351
352 for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
353 per_cpu(idle_inject_device, cpu) = NULL;
354
355 kfree(ii_dev);
356}
357
358static struct smp_hotplug_thread idle_inject_threads = {
359 .store = &idle_inject_thread.tsk,
360 .setup = idle_inject_setup,
361 .thread_fn = idle_inject_fn,
362 .thread_comm = "idle_inject/%u",
363 .thread_should_run = idle_inject_should_run,
364};
365
366static int __init idle_inject_init(void)
367{
368 return smpboot_register_percpu_thread(&idle_inject_threads);
369}
370early_initcall(idle_inject_init);
371