1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/kthread.h>
41#include <linux/tracefs.h>
42#include <linux/uaccess.h>
43#include <linux/cpumask.h>
44#include <linux/delay.h>
45#include <linux/sched/clock.h>
46#include "trace.h"
47
48static struct trace_array *hwlat_trace;
49
50#define U64STR_SIZE 22
51
52#define BANNER "hwlat_detector: "
53#define DEFAULT_SAMPLE_WINDOW 1000000
54#define DEFAULT_SAMPLE_WIDTH 500000
55#define DEFAULT_LAT_THRESHOLD 10
56
57static struct dentry *hwlat_sample_width;
58static struct dentry *hwlat_sample_window;
59static struct dentry *hwlat_thread_mode;
60
61enum {
62 MODE_NONE = 0,
63 MODE_ROUND_ROBIN,
64 MODE_PER_CPU,
65 MODE_MAX
66};
67static char *thread_mode_str[] = { "none", "round-robin", "per-cpu" };
68
69
70static unsigned long save_tracing_thresh;
71
72
73struct hwlat_kthread_data {
74 struct task_struct *kthread;
75
76 u64 nmi_ts_start;
77 u64 nmi_total_ts;
78 int nmi_count;
79 int nmi_cpu;
80};
81
82struct hwlat_kthread_data hwlat_single_cpu_data;
83DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data);
84
85
86bool trace_hwlat_callback_enabled;
87
88
89static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
90
91
92struct hwlat_sample {
93 u64 seqnum;
94 u64 duration;
95 u64 outer_duration;
96 u64 nmi_total_ts;
97 struct timespec64 timestamp;
98 int nmi_count;
99 int count;
100};
101
102
103static struct hwlat_data {
104
105 struct mutex lock;
106
107 u64 count;
108
109 u64 sample_window;
110 u64 sample_width;
111
112 int thread_mode;
113
114} hwlat_data = {
115 .sample_window = DEFAULT_SAMPLE_WINDOW,
116 .sample_width = DEFAULT_SAMPLE_WIDTH,
117 .thread_mode = MODE_ROUND_ROBIN
118};
119
120static struct hwlat_kthread_data *get_cpu_data(void)
121{
122 if (hwlat_data.thread_mode == MODE_PER_CPU)
123 return this_cpu_ptr(&hwlat_per_cpu_data);
124 else
125 return &hwlat_single_cpu_data;
126}
127
128static bool hwlat_busy;
129
130static void trace_hwlat_sample(struct hwlat_sample *sample)
131{
132 struct trace_array *tr = hwlat_trace;
133 struct trace_event_call *call = &event_hwlat;
134 struct trace_buffer *buffer = tr->array_buffer.buffer;
135 struct ring_buffer_event *event;
136 struct hwlat_entry *entry;
137
138 event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
139 tracing_gen_ctx());
140 if (!event)
141 return;
142 entry = ring_buffer_event_data(event);
143 entry->seqnum = sample->seqnum;
144 entry->duration = sample->duration;
145 entry->outer_duration = sample->outer_duration;
146 entry->timestamp = sample->timestamp;
147 entry->nmi_total_ts = sample->nmi_total_ts;
148 entry->nmi_count = sample->nmi_count;
149 entry->count = sample->count;
150
151 if (!call_filter_check_discard(call, entry, buffer, event))
152 trace_buffer_unlock_commit_nostack(buffer, event);
153}
154
155
156#define time_type u64
157#define time_get() trace_clock_local()
158#define time_to_us(x) div_u64(x, 1000)
159#define time_sub(a, b) ((a) - (b))
160#define init_time(a, b) (a = b)
161#define time_u64(a) a
162
163void trace_hwlat_callback(bool enter)
164{
165 struct hwlat_kthread_data *kdata = get_cpu_data();
166
167 if (!kdata->kthread)
168 return;
169
170
171
172
173
174 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
175 if (enter)
176 kdata->nmi_ts_start = time_get();
177 else
178 kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start;
179 }
180
181 if (enter)
182 kdata->nmi_count++;
183}
184
185
186
187
188#define hwlat_err(msg) ({ \
189 struct trace_array *tr = hwlat_trace; \
190 \
191 trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
192})
193
194
195
196
197
198
199
200
201static int get_sample(void)
202{
203 struct hwlat_kthread_data *kdata = get_cpu_data();
204 struct trace_array *tr = hwlat_trace;
205 struct hwlat_sample s;
206 time_type start, t1, t2, last_t2;
207 s64 diff, outer_diff, total, last_total = 0;
208 u64 sample = 0;
209 u64 thresh = tracing_thresh;
210 u64 outer_sample = 0;
211 int ret = -1;
212 unsigned int count = 0;
213
214 do_div(thresh, NSEC_PER_USEC);
215
216 kdata->nmi_total_ts = 0;
217 kdata->nmi_count = 0;
218
219 barrier();
220
221 trace_hwlat_callback_enabled = true;
222
223 init_time(last_t2, 0);
224 start = time_get();
225 outer_diff = 0;
226
227 do {
228
229 t1 = time_get();
230 t2 = time_get();
231
232 if (time_u64(last_t2)) {
233
234 outer_diff = time_to_us(time_sub(t1, last_t2));
235
236 if (outer_diff < 0) {
237 hwlat_err(BANNER "time running backwards\n");
238 goto out;
239 }
240 if (outer_diff > outer_sample)
241 outer_sample = outer_diff;
242 }
243 last_t2 = t2;
244
245 total = time_to_us(time_sub(t2, start));
246
247
248 if (total < last_total) {
249 hwlat_err("Time total overflowed\n");
250 break;
251 }
252 last_total = total;
253
254
255 diff = time_to_us(time_sub(t2, t1));
256
257 if (diff > thresh || outer_diff > thresh) {
258 if (!count)
259 ktime_get_real_ts64(&s.timestamp);
260 count++;
261 }
262
263
264 if (diff < 0) {
265 hwlat_err(BANNER "time running backwards\n");
266 goto out;
267 }
268
269 if (diff > sample)
270 sample = diff;
271
272 } while (total <= hwlat_data.sample_width);
273
274 barrier();
275 trace_hwlat_callback_enabled = false;
276 barrier();
277
278 ret = 0;
279
280
281 if (sample > thresh || outer_sample > thresh) {
282 u64 latency;
283
284 ret = 1;
285
286
287 if (kdata->nmi_total_ts)
288 do_div(kdata->nmi_total_ts, NSEC_PER_USEC);
289
290 hwlat_data.count++;
291 s.seqnum = hwlat_data.count;
292 s.duration = sample;
293 s.outer_duration = outer_sample;
294 s.nmi_total_ts = kdata->nmi_total_ts;
295 s.nmi_count = kdata->nmi_count;
296 s.count = count;
297 trace_hwlat_sample(&s);
298
299 latency = max(sample, outer_sample);
300
301
302 if (latency > tr->max_latency) {
303 tr->max_latency = latency;
304 latency_fsnotify(tr);
305 }
306 }
307
308out:
309 return ret;
310}
311
312static struct cpumask save_cpumask;
313
314static void move_to_next_cpu(void)
315{
316 struct cpumask *current_mask = &save_cpumask;
317 struct trace_array *tr = hwlat_trace;
318 int next_cpu;
319
320
321
322
323
324
325 if (!cpumask_equal(current_mask, current->cpus_ptr))
326 goto change_mode;
327
328 get_online_cpus();
329 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
330 next_cpu = cpumask_next(raw_smp_processor_id(), current_mask);
331 put_online_cpus();
332
333 if (next_cpu >= nr_cpu_ids)
334 next_cpu = cpumask_first(current_mask);
335
336 if (next_cpu >= nr_cpu_ids)
337 goto change_mode;
338
339 cpumask_clear(current_mask);
340 cpumask_set_cpu(next_cpu, current_mask);
341
342 sched_setaffinity(0, current_mask);
343 return;
344
345 change_mode:
346 hwlat_data.thread_mode = MODE_NONE;
347 pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n");
348}
349
350
351
352
353
354
355
356
357
358
359
360static int kthread_fn(void *data)
361{
362 u64 interval;
363
364 while (!kthread_should_stop()) {
365
366 if (hwlat_data.thread_mode == MODE_ROUND_ROBIN)
367 move_to_next_cpu();
368
369 local_irq_disable();
370 get_sample();
371 local_irq_enable();
372
373 mutex_lock(&hwlat_data.lock);
374 interval = hwlat_data.sample_window - hwlat_data.sample_width;
375 mutex_unlock(&hwlat_data.lock);
376
377 do_div(interval, USEC_PER_MSEC);
378
379
380 if (interval < 1)
381 interval = 1;
382
383 if (msleep_interruptible(interval))
384 break;
385 }
386
387 return 0;
388}
389
390
391
392
393
394
395
396static void stop_single_kthread(void)
397{
398 struct hwlat_kthread_data *kdata = get_cpu_data();
399 struct task_struct *kthread;
400
401 get_online_cpus();
402 kthread = kdata->kthread;
403
404 if (!kthread)
405 goto out_put_cpus;
406
407 kthread_stop(kthread);
408 kdata->kthread = NULL;
409
410out_put_cpus:
411 put_online_cpus();
412}
413
414
415
416
417
418
419
420
421static int start_single_kthread(struct trace_array *tr)
422{
423 struct hwlat_kthread_data *kdata = get_cpu_data();
424 struct cpumask *current_mask = &save_cpumask;
425 struct task_struct *kthread;
426 int next_cpu;
427
428 get_online_cpus();
429 if (kdata->kthread)
430 goto out_put_cpus;
431
432 kthread = kthread_create(kthread_fn, NULL, "hwlatd");
433 if (IS_ERR(kthread)) {
434 pr_err(BANNER "could not start sampling thread\n");
435 put_online_cpus();
436 return -ENOMEM;
437 }
438
439
440 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
441
442 if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) {
443 next_cpu = cpumask_first(current_mask);
444 cpumask_clear(current_mask);
445 cpumask_set_cpu(next_cpu, current_mask);
446
447 }
448
449 sched_setaffinity(kthread->pid, current_mask);
450
451 kdata->kthread = kthread;
452 wake_up_process(kthread);
453
454out_put_cpus:
455 put_online_cpus();
456 return 0;
457}
458
459
460
461
462static void stop_cpu_kthread(unsigned int cpu)
463{
464 struct task_struct *kthread;
465
466 kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
467 if (kthread)
468 kthread_stop(kthread);
469 per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
470}
471
472
473
474
475
476
477
478static void stop_per_cpu_kthreads(void)
479{
480 unsigned int cpu;
481
482 get_online_cpus();
483 for_each_online_cpu(cpu)
484 stop_cpu_kthread(cpu);
485 put_online_cpus();
486}
487
488
489
490
491static int start_cpu_kthread(unsigned int cpu)
492{
493 struct task_struct *kthread;
494 char comm[24];
495
496 snprintf(comm, 24, "hwlatd/%d", cpu);
497
498 kthread = kthread_create_on_cpu(kthread_fn, NULL, cpu, comm);
499 if (IS_ERR(kthread)) {
500 pr_err(BANNER "could not start sampling thread\n");
501 return -ENOMEM;
502 }
503
504 per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
505 wake_up_process(kthread);
506
507 return 0;
508}
509
510#ifdef CONFIG_HOTPLUG_CPU
511static void hwlat_hotplug_workfn(struct work_struct *dummy)
512{
513 struct trace_array *tr = hwlat_trace;
514 unsigned int cpu = smp_processor_id();
515
516 mutex_lock(&trace_types_lock);
517 mutex_lock(&hwlat_data.lock);
518 get_online_cpus();
519
520 if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
521 goto out_unlock;
522
523 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
524 goto out_unlock;
525
526 start_cpu_kthread(cpu);
527
528out_unlock:
529 put_online_cpus();
530 mutex_unlock(&hwlat_data.lock);
531 mutex_unlock(&trace_types_lock);
532}
533
534static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn);
535
536
537
538
539static int hwlat_cpu_init(unsigned int cpu)
540{
541 schedule_work_on(cpu, &hwlat_hotplug_work);
542 return 0;
543}
544
545
546
547
548static int hwlat_cpu_die(unsigned int cpu)
549{
550 stop_cpu_kthread(cpu);
551 return 0;
552}
553
554static void hwlat_init_hotplug_support(void)
555{
556 int ret;
557
558 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/hwlat:online",
559 hwlat_cpu_init, hwlat_cpu_die);
560 if (ret < 0)
561 pr_warn(BANNER "Error to init cpu hotplug support\n");
562
563 return;
564}
565#else
566static void hwlat_init_hotplug_support(void)
567{
568 return;
569}
570#endif
571
572
573
574
575
576
577
578
579static int start_per_cpu_kthreads(struct trace_array *tr)
580{
581 struct cpumask *current_mask = &save_cpumask;
582 unsigned int cpu;
583 int retval;
584
585 get_online_cpus();
586
587
588
589 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
590
591 for_each_online_cpu(cpu)
592 per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
593
594 for_each_cpu(cpu, current_mask) {
595 retval = start_cpu_kthread(cpu);
596 if (retval)
597 goto out_error;
598 }
599 put_online_cpus();
600
601 return 0;
602
603out_error:
604 put_online_cpus();
605 stop_per_cpu_kthreads();
606 return retval;
607}
608
609static void *s_mode_start(struct seq_file *s, loff_t *pos)
610{
611 int mode = *pos;
612
613 mutex_lock(&hwlat_data.lock);
614
615 if (mode >= MODE_MAX)
616 return NULL;
617
618 return pos;
619}
620
621static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos)
622{
623 int mode = ++(*pos);
624
625 if (mode >= MODE_MAX)
626 return NULL;
627
628 return pos;
629}
630
631static int s_mode_show(struct seq_file *s, void *v)
632{
633 loff_t *pos = v;
634 int mode = *pos;
635
636 if (mode == hwlat_data.thread_mode)
637 seq_printf(s, "[%s]", thread_mode_str[mode]);
638 else
639 seq_printf(s, "%s", thread_mode_str[mode]);
640
641 if (mode != MODE_MAX)
642 seq_puts(s, " ");
643
644 return 0;
645}
646
647static void s_mode_stop(struct seq_file *s, void *v)
648{
649 seq_puts(s, "\n");
650 mutex_unlock(&hwlat_data.lock);
651}
652
653static const struct seq_operations thread_mode_seq_ops = {
654 .start = s_mode_start,
655 .next = s_mode_next,
656 .show = s_mode_show,
657 .stop = s_mode_stop
658};
659
660static int hwlat_mode_open(struct inode *inode, struct file *file)
661{
662 return seq_open(file, &thread_mode_seq_ops);
663};
664
665static void hwlat_tracer_start(struct trace_array *tr);
666static void hwlat_tracer_stop(struct trace_array *tr);
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf,
684 size_t cnt, loff_t *ppos)
685{
686 struct trace_array *tr = hwlat_trace;
687 const char *mode;
688 char buf[64];
689 int ret, i;
690
691 if (cnt >= sizeof(buf))
692 return -EINVAL;
693
694 if (copy_from_user(buf, ubuf, cnt))
695 return -EFAULT;
696
697 buf[cnt] = 0;
698
699 mode = strstrip(buf);
700
701 ret = -EINVAL;
702
703
704
705
706
707 mutex_lock(&trace_types_lock);
708 if (hwlat_busy)
709 hwlat_tracer_stop(tr);
710
711 mutex_lock(&hwlat_data.lock);
712
713 for (i = 0; i < MODE_MAX; i++) {
714 if (strcmp(mode, thread_mode_str[i]) == 0) {
715 hwlat_data.thread_mode = i;
716 ret = cnt;
717 }
718 }
719
720 mutex_unlock(&hwlat_data.lock);
721
722 if (hwlat_busy)
723 hwlat_tracer_start(tr);
724 mutex_unlock(&trace_types_lock);
725
726 *ppos += cnt;
727
728
729
730 return ret;
731}
732
733
734
735
736
737
738static struct trace_min_max_param hwlat_width = {
739 .lock = &hwlat_data.lock,
740 .val = &hwlat_data.sample_width,
741 .max = &hwlat_data.sample_window,
742 .min = NULL,
743};
744
745
746
747
748
749
750static struct trace_min_max_param hwlat_window = {
751 .lock = &hwlat_data.lock,
752 .val = &hwlat_data.sample_window,
753 .max = NULL,
754 .min = &hwlat_data.sample_width,
755};
756
757static const struct file_operations thread_mode_fops = {
758 .open = hwlat_mode_open,
759 .read = seq_read,
760 .llseek = seq_lseek,
761 .release = seq_release,
762 .write = hwlat_mode_write
763};
764
765
766
767
768
769
770
771
772static int init_tracefs(void)
773{
774 int ret;
775 struct dentry *top_dir;
776
777 ret = tracing_init_dentry();
778 if (ret)
779 return -ENOMEM;
780
781 top_dir = tracefs_create_dir("hwlat_detector", NULL);
782 if (!top_dir)
783 return -ENOMEM;
784
785 hwlat_sample_window = tracefs_create_file("window", 0640,
786 top_dir,
787 &hwlat_window,
788 &trace_min_max_fops);
789 if (!hwlat_sample_window)
790 goto err;
791
792 hwlat_sample_width = tracefs_create_file("width", 0644,
793 top_dir,
794 &hwlat_width,
795 &trace_min_max_fops);
796 if (!hwlat_sample_width)
797 goto err;
798
799 hwlat_thread_mode = trace_create_file("mode", 0644,
800 top_dir,
801 NULL,
802 &thread_mode_fops);
803 if (!hwlat_thread_mode)
804 goto err;
805
806 return 0;
807
808 err:
809 tracefs_remove(top_dir);
810 return -ENOMEM;
811}
812
813static void hwlat_tracer_start(struct trace_array *tr)
814{
815 int err;
816
817 if (hwlat_data.thread_mode == MODE_PER_CPU)
818 err = start_per_cpu_kthreads(tr);
819 else
820 err = start_single_kthread(tr);
821 if (err)
822 pr_err(BANNER "Cannot start hwlat kthread\n");
823}
824
825static void hwlat_tracer_stop(struct trace_array *tr)
826{
827 if (hwlat_data.thread_mode == MODE_PER_CPU)
828 stop_per_cpu_kthreads();
829 else
830 stop_single_kthread();
831}
832
833static int hwlat_tracer_init(struct trace_array *tr)
834{
835
836 if (hwlat_busy)
837 return -EBUSY;
838
839 hwlat_trace = tr;
840
841 hwlat_data.count = 0;
842 tr->max_latency = 0;
843 save_tracing_thresh = tracing_thresh;
844
845
846 if (!tracing_thresh)
847 tracing_thresh = last_tracing_thresh;
848
849 if (tracer_tracing_is_on(tr))
850 hwlat_tracer_start(tr);
851
852 hwlat_busy = true;
853
854 return 0;
855}
856
857static void hwlat_tracer_reset(struct trace_array *tr)
858{
859 hwlat_tracer_stop(tr);
860
861
862 last_tracing_thresh = tracing_thresh;
863
864 tracing_thresh = save_tracing_thresh;
865 hwlat_busy = false;
866}
867
868static struct tracer hwlat_tracer __read_mostly =
869{
870 .name = "hwlat",
871 .init = hwlat_tracer_init,
872 .reset = hwlat_tracer_reset,
873 .start = hwlat_tracer_start,
874 .stop = hwlat_tracer_stop,
875 .allow_instances = true,
876};
877
878__init static int init_hwlat_tracer(void)
879{
880 int ret;
881
882 mutex_init(&hwlat_data.lock);
883
884 ret = register_tracer(&hwlat_tracer);
885 if (ret)
886 return ret;
887
888 hwlat_init_hotplug_support();
889
890 init_tracefs();
891
892 return 0;
893}
894late_initcall(init_hwlat_tracer);
895