1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/kthread.h>
43#include <linux/debugfs.h>
44#include <linux/uaccess.h>
45#include <linux/cpumask.h>
46#include <linux/delay.h>
47#include "trace.h"
48
49static struct trace_array *hwlat_trace;
50
51#define U64STR_SIZE 22
52
53#define BANNER "hwlat_detector: "
54#define DEFAULT_SAMPLE_WINDOW 1000000
55#define DEFAULT_SAMPLE_WIDTH 500000
56#define DEFAULT_LAT_THRESHOLD 10
57
58
59static struct task_struct *hwlat_kthread;
60
61static struct dentry *hwlat_sample_width;
62static struct dentry *hwlat_sample_window;
63
64
65static unsigned long save_tracing_thresh;
66
67
68static u64 nmi_ts_start;
69static u64 nmi_total_ts;
70static int nmi_count;
71static int nmi_cpu;
72
73
74bool trace_hwlat_callback_enabled;
75
76
77static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
78
79
80struct hwlat_sample {
81 u64 seqnum;
82 u64 duration;
83 u64 outer_duration;
84 u64 nmi_total_ts;
85 struct timespec timestamp;
86 int nmi_count;
87};
88
89
90static struct hwlat_data {
91
92 struct mutex lock;
93
94 u64 count;
95
96 u64 sample_window;
97 u64 sample_width;
98
99} hwlat_data = {
100 .sample_window = DEFAULT_SAMPLE_WINDOW,
101 .sample_width = DEFAULT_SAMPLE_WIDTH,
102};
103
104static void trace_hwlat_sample(struct hwlat_sample *sample)
105{
106 struct trace_array *tr = hwlat_trace;
107 struct ftrace_event_call *call = &event_hwlat;
108 struct ring_buffer *buffer = tr->trace_buffer.buffer;
109 struct ring_buffer_event *event;
110 struct hwlat_entry *entry;
111 unsigned long flags;
112 int pc;
113
114 pc = preempt_count();
115 local_save_flags(flags);
116
117 event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
118 flags, pc);
119 if (!event)
120 return;
121 entry = ring_buffer_event_data(event);
122 entry->seqnum = sample->seqnum;
123 entry->duration = sample->duration;
124 entry->outer_duration = sample->outer_duration;
125 entry->timestamp = sample->timestamp;
126 entry->nmi_total_ts = sample->nmi_total_ts;
127 entry->nmi_count = sample->nmi_count;
128
129 if (!filter_check_discard(call, entry, buffer, event))
130 __buffer_unlock_commit(buffer, event);
131}
132
133
134#define time_type u64
135#define time_get() trace_clock_local()
136#define time_to_us(x) div_u64(x, 1000)
137#define time_sub(a, b) ((a) - (b))
138#define init_time(a, b) (a = b)
139#define time_u64(a) a
140
141void trace_hwlat_callback(bool enter)
142{
143 if (smp_processor_id() != nmi_cpu)
144 return;
145
146
147
148
149
150 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
151 if (enter)
152 nmi_ts_start = time_get();
153 else
154 nmi_total_ts = time_get() - nmi_ts_start;
155 }
156
157 if (enter)
158 nmi_count++;
159}
160
161
162
163
164
165
166
167
168static int get_sample(void)
169{
170 struct trace_array *tr = hwlat_trace;
171 time_type start, t1, t2, last_t2;
172 s64 diff, total, last_total = 0;
173 u64 sample = 0;
174 u64 thresh = tracing_thresh;
175 u64 outer_sample = 0;
176 int ret = -1;
177
178 do_div(thresh, NSEC_PER_USEC);
179
180 nmi_cpu = smp_processor_id();
181 nmi_total_ts = 0;
182 nmi_count = 0;
183
184 barrier();
185
186 trace_hwlat_callback_enabled = true;
187
188 init_time(last_t2, 0);
189 start = time_get();
190
191 do {
192
193 t1 = time_get();
194 t2 = time_get();
195
196 if (time_u64(last_t2)) {
197
198 diff = time_to_us(time_sub(t1, last_t2));
199
200 if (diff < 0) {
201 pr_err(BANNER "time running backwards\n");
202 goto out;
203 }
204 if (diff > outer_sample)
205 outer_sample = diff;
206 }
207 last_t2 = t2;
208
209 total = time_to_us(time_sub(t2, start));
210
211
212 if (total < last_total) {
213 pr_err("Time total overflowed\n");
214 break;
215 }
216 last_total = total;
217
218
219 diff = time_to_us(time_sub(t2, t1));
220
221
222 if (diff < 0) {
223 pr_err(BANNER "time running backwards\n");
224 goto out;
225 }
226
227 if (diff > sample)
228 sample = diff;
229
230 } while (total <= hwlat_data.sample_width);
231
232 barrier();
233 trace_hwlat_callback_enabled = false;
234 barrier();
235
236 ret = 0;
237
238
239 if (sample > thresh || outer_sample > thresh) {
240 struct hwlat_sample s;
241
242 ret = 1;
243
244
245 if (nmi_total_ts)
246 do_div(nmi_total_ts, NSEC_PER_USEC);
247
248 hwlat_data.count++;
249 s.seqnum = hwlat_data.count;
250 s.duration = sample;
251 s.outer_duration = outer_sample;
252 s.timestamp = CURRENT_TIME;
253 s.nmi_total_ts = nmi_total_ts;
254 s.nmi_count = nmi_count;
255 trace_hwlat_sample(&s);
256
257
258 if (sample > tr->max_latency)
259 tr->max_latency = sample;
260 }
261
262out:
263 return ret;
264}
265
266static struct cpumask save_cpumask;
267static bool disable_migrate;
268
269static void move_to_next_cpu(bool initmask)
270{
271 static struct cpumask *current_mask;
272 int next_cpu;
273
274 if (disable_migrate)
275 return;
276
277
278 if (initmask) {
279 current_mask = &save_cpumask;
280 get_online_cpus();
281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
282 put_online_cpus();
283 next_cpu = cpumask_first(current_mask);
284 goto set_affinity;
285 }
286
287
288
289
290
291
292 if (!cpumask_equal(current_mask, ¤t->cpus_allowed))
293 goto disable;
294
295 get_online_cpus();
296 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
297 next_cpu = cpumask_next(smp_processor_id(), current_mask);
298 put_online_cpus();
299
300 if (next_cpu >= nr_cpu_ids)
301 next_cpu = cpumask_first(current_mask);
302
303 set_affinity:
304 if (next_cpu >= nr_cpu_ids)
305 goto disable;
306
307 cpumask_clear(current_mask);
308 cpumask_set_cpu(next_cpu, current_mask);
309
310 sched_setaffinity(0, current_mask);
311 return;
312
313 disable:
314 disable_migrate = true;
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330static int kthread_fn(void *data)
331{
332 u64 interval;
333 bool initmask = true;
334
335 while (!kthread_should_stop()) {
336
337 move_to_next_cpu(initmask);
338 initmask = false;
339
340 local_irq_disable();
341 get_sample();
342 local_irq_enable();
343
344 mutex_lock(&hwlat_data.lock);
345 interval = hwlat_data.sample_window - hwlat_data.sample_width;
346 mutex_unlock(&hwlat_data.lock);
347
348 do_div(interval, USEC_PER_MSEC);
349
350
351 if (interval < 1)
352 interval = 1;
353
354 if (msleep_interruptible(interval))
355 break;
356 }
357
358 return 0;
359}
360
361
362
363
364
365
366
367static int start_kthread(struct trace_array *tr)
368{
369 struct task_struct *kthread;
370
371 if (WARN_ON(hwlat_kthread))
372 return 0;
373
374 kthread = kthread_create(kthread_fn, NULL, "hwlatd");
375 if (IS_ERR(kthread)) {
376 pr_err(BANNER "could not start sampling thread\n");
377 return -ENOMEM;
378 }
379 hwlat_kthread = kthread;
380 wake_up_process(kthread);
381
382 return 0;
383}
384
385
386
387
388
389
390
391static void stop_kthread(void)
392{
393 if (!hwlat_kthread)
394 return;
395 kthread_stop(hwlat_kthread);
396 hwlat_kthread = NULL;
397}
398
399
400
401
402
403
404
405
406
407
408
409static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
410 size_t cnt, loff_t *ppos)
411{
412 char buf[U64STR_SIZE];
413 u64 *entry = filp->private_data;
414 u64 val;
415 int len;
416
417 if (!entry)
418 return -EFAULT;
419
420 if (cnt > sizeof(buf))
421 cnt = sizeof(buf);
422
423 val = *entry;
424
425 len = snprintf(buf, sizeof(buf), "%llu\n", val);
426
427 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
428}
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445static ssize_t
446hwlat_width_write(struct file *filp, const char __user *ubuf,
447 size_t cnt, loff_t *ppos)
448{
449 u64 val;
450 int err;
451
452 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
453 if (err)
454 return err;
455
456 mutex_lock(&hwlat_data.lock);
457 if (val < hwlat_data.sample_window)
458 hwlat_data.sample_width = val;
459 else
460 err = -EINVAL;
461 mutex_unlock(&hwlat_data.lock);
462
463 if (err)
464 return err;
465
466 return cnt;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484static ssize_t
485hwlat_window_write(struct file *filp, const char __user *ubuf,
486 size_t cnt, loff_t *ppos)
487{
488 u64 val;
489 int err;
490
491 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
492 if (err)
493 return err;
494
495 mutex_lock(&hwlat_data.lock);
496 if (hwlat_data.sample_width < val)
497 hwlat_data.sample_window = val;
498 else
499 err = -EINVAL;
500 mutex_unlock(&hwlat_data.lock);
501
502 if (err)
503 return err;
504
505 return cnt;
506}
507
508static const struct file_operations width_fops = {
509 .open = tracing_open_generic,
510 .read = hwlat_read,
511 .write = hwlat_width_write,
512};
513
514static const struct file_operations window_fops = {
515 .open = tracing_open_generic,
516 .read = hwlat_read,
517 .write = hwlat_window_write,
518};
519
520
521
522
523
524
525
526
527
528static int init_debugfs(void)
529{
530 struct dentry *d_tracer;
531 struct dentry *top_dir;
532
533 d_tracer = tracing_init_dentry();
534 if (IS_ERR(d_tracer))
535 return -ENOMEM;
536
537 top_dir = debugfs_create_dir("hwlat_detector", d_tracer);
538 if (!top_dir)
539 return -ENOMEM;
540
541 hwlat_sample_window = trace_create_file("window", 0640,
542 top_dir,
543 &hwlat_data.sample_window,
544 &window_fops);
545 if (!hwlat_sample_window)
546 goto err;
547
548 hwlat_sample_width = trace_create_file("width", 0644,
549 top_dir,
550 &hwlat_data.sample_width,
551 &width_fops);
552 if (!hwlat_sample_width)
553 goto err;
554
555 return 0;
556
557 err:
558 debugfs_remove_recursive(top_dir);
559 return -ENOMEM;
560}
561
562static void hwlat_tracer_start(struct trace_array *tr)
563{
564 int err;
565
566 err = start_kthread(tr);
567 if (err)
568 pr_err(BANNER "Cannot start hwlat kthread\n");
569}
570
571static void hwlat_tracer_stop(struct trace_array *tr)
572{
573 stop_kthread();
574}
575
576static bool hwlat_busy;
577
578static int hwlat_tracer_init(struct trace_array *tr)
579{
580
581 if (hwlat_busy)
582 return -EBUSY;
583
584 hwlat_trace = tr;
585
586 disable_migrate = false;
587 hwlat_data.count = 0;
588 tr->max_latency = 0;
589 save_tracing_thresh = tracing_thresh;
590
591
592 if (!tracing_thresh)
593 tracing_thresh = last_tracing_thresh;
594
595 if (tracer_tracing_is_on(tr))
596 hwlat_tracer_start(tr);
597
598 hwlat_busy = true;
599
600 return 0;
601}
602
603static void hwlat_tracer_reset(struct trace_array *tr)
604{
605 stop_kthread();
606
607
608 last_tracing_thresh = tracing_thresh;
609
610 tracing_thresh = save_tracing_thresh;
611 hwlat_busy = false;
612}
613
614static struct tracer hwlat_tracer __read_mostly =
615{
616 .name = "hwlat",
617 .init = hwlat_tracer_init,
618 .reset = hwlat_tracer_reset,
619 .start = hwlat_tracer_start,
620 .stop = hwlat_tracer_stop,
621 .allow_instances = true,
622};
623
624__init static int init_hwlat_tracer(void)
625{
626 int ret;
627
628 mutex_init(&hwlat_data.lock);
629
630 ret = register_tracer(&hwlat_tracer);
631 if (ret)
632 return ret;
633
634 init_debugfs();
635
636 return 0;
637}
638late_initcall(init_hwlat_tracer);
639