1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kallsyms.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17
18#include "trace.h"
19
20#include <trace/events/preemptirq.h>
21
22#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
23static struct trace_array *irqsoff_trace __read_mostly;
24static int tracer_enabled __read_mostly;
25
26static DEFINE_PER_CPU(int, tracing_cpu);
27
28static DEFINE_RAW_SPINLOCK(max_trace_lock);
29
30enum {
31 TRACER_IRQS_OFF = (1 << 1),
32 TRACER_PREEMPT_OFF = (1 << 2),
33};
34
35static int trace_type __read_mostly;
36
37static int save_flags;
38
39static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
40static int start_irqsoff_tracer(struct trace_array *tr, int graph);
41
42#ifdef CONFIG_PREEMPT_TRACER
43static inline int
44preempt_trace(int pc)
45{
46 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
47}
48#else
49# define preempt_trace(pc) (0)
50#endif
51
52#ifdef CONFIG_IRQSOFF_TRACER
53static inline int
54irq_trace(void)
55{
56 return ((trace_type & TRACER_IRQS_OFF) &&
57 irqs_disabled());
58}
59#else
60# define irq_trace() (0)
61#endif
62
63#ifdef CONFIG_FUNCTION_GRAPH_TRACER
64static int irqsoff_display_graph(struct trace_array *tr, int set);
65# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
66#else
67static inline int irqsoff_display_graph(struct trace_array *tr, int set)
68{
69 return -EINVAL;
70}
71# define is_graph(tr) false
72#endif
73
74
75
76
77
78
79
80
81
82static __cacheline_aligned_in_smp unsigned long max_sequence;
83
84#ifdef CONFIG_FUNCTION_TRACER
85
86
87
88
89
90
91
92
93
94
95
96
97
98static int func_prolog_dec(struct trace_array *tr,
99 struct trace_array_cpu **data,
100 unsigned long *flags)
101{
102 long disabled;
103 int cpu;
104
105
106
107
108
109
110
111 cpu = raw_smp_processor_id();
112 if (likely(!per_cpu(tracing_cpu, cpu)))
113 return 0;
114
115 local_save_flags(*flags);
116
117
118
119
120
121 if (!irqs_disabled_flags(*flags) && !preempt_count())
122 return 0;
123
124 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
125 disabled = atomic_inc_return(&(*data)->disabled);
126
127 if (likely(disabled == 1))
128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135
136
137
138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct pt_regs *pt_regs)
141{
142 struct trace_array *tr = irqsoff_trace;
143 struct trace_array_cpu *data;
144 unsigned long flags;
145
146 if (!func_prolog_dec(tr, &data, &flags))
147 return;
148
149 trace_function(tr, ip, parent_ip, flags, preempt_count());
150
151 atomic_dec(&data->disabled);
152}
153#endif
154
155#ifdef CONFIG_FUNCTION_GRAPH_TRACER
156static int irqsoff_display_graph(struct trace_array *tr, int set)
157{
158 int cpu;
159
160 if (!(is_graph(tr) ^ set))
161 return 0;
162
163 stop_irqsoff_tracer(irqsoff_trace, !set);
164
165 for_each_possible_cpu(cpu)
166 per_cpu(tracing_cpu, cpu) = 0;
167
168 tr->max_latency = 0;
169 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
170
171 return start_irqsoff_tracer(irqsoff_trace, set);
172}
173
174static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
175{
176 struct trace_array *tr = irqsoff_trace;
177 struct trace_array_cpu *data;
178 unsigned long flags;
179 int ret;
180 int pc;
181
182 if (ftrace_graph_ignore_func(trace))
183 return 0;
184
185
186
187
188
189
190
191 if (ftrace_graph_notrace_addr(trace->func))
192 return 1;
193
194 if (!func_prolog_dec(tr, &data, &flags))
195 return 0;
196
197 pc = preempt_count();
198 ret = __trace_graph_entry(tr, trace, flags, pc);
199 atomic_dec(&data->disabled);
200
201 return ret;
202}
203
204static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
205{
206 struct trace_array *tr = irqsoff_trace;
207 struct trace_array_cpu *data;
208 unsigned long flags;
209 int pc;
210
211 if (!func_prolog_dec(tr, &data, &flags))
212 return;
213
214 pc = preempt_count();
215 __trace_graph_return(tr, trace, flags, pc);
216 atomic_dec(&data->disabled);
217}
218
219static void irqsoff_trace_open(struct trace_iterator *iter)
220{
221 if (is_graph(iter->tr))
222 graph_trace_open(iter);
223
224}
225
226static void irqsoff_trace_close(struct trace_iterator *iter)
227{
228 if (iter->private)
229 graph_trace_close(iter);
230}
231
232#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
233 TRACE_GRAPH_PRINT_PROC | \
234 TRACE_GRAPH_PRINT_ABS_TIME | \
235 TRACE_GRAPH_PRINT_DURATION)
236
237static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
238{
239
240
241
242
243 if (is_graph(iter->tr))
244 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
245
246 return TRACE_TYPE_UNHANDLED;
247}
248
249static void irqsoff_print_header(struct seq_file *s)
250{
251 struct trace_array *tr = irqsoff_trace;
252
253 if (is_graph(tr))
254 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
255 else
256 trace_default_header(s);
257}
258
259static void
260__trace_function(struct trace_array *tr,
261 unsigned long ip, unsigned long parent_ip,
262 unsigned long flags, int pc)
263{
264 if (is_graph(tr))
265 trace_graph_function(tr, ip, parent_ip, flags, pc);
266 else
267 trace_function(tr, ip, parent_ip, flags, pc);
268}
269
270#else
271#define __trace_function trace_function
272
273#ifdef CONFIG_FUNCTION_TRACER
274static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
275{
276 return -1;
277}
278#endif
279
280static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
281{
282 return TRACE_TYPE_UNHANDLED;
283}
284
285static void irqsoff_trace_open(struct trace_iterator *iter) { }
286static void irqsoff_trace_close(struct trace_iterator *iter) { }
287
288#ifdef CONFIG_FUNCTION_TRACER
289static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
290static void irqsoff_print_header(struct seq_file *s)
291{
292 trace_default_header(s);
293}
294#else
295static void irqsoff_print_header(struct seq_file *s)
296{
297 trace_latency_header(s);
298}
299#endif
300#endif
301
302
303
304
305static bool report_latency(struct trace_array *tr, u64 delta)
306{
307 if (tracing_thresh) {
308 if (delta < tracing_thresh)
309 return false;
310 } else {
311 if (delta <= tr->max_latency)
312 return false;
313 }
314 return true;
315}
316
317static void
318check_critical_timing(struct trace_array *tr,
319 struct trace_array_cpu *data,
320 unsigned long parent_ip,
321 int cpu)
322{
323 u64 T0, T1, delta;
324 unsigned long flags;
325 int pc;
326
327 T0 = data->preempt_timestamp;
328 T1 = ftrace_now(cpu);
329 delta = T1-T0;
330
331 local_save_flags(flags);
332
333 pc = preempt_count();
334
335 if (!report_latency(tr, delta))
336 goto out;
337
338 raw_spin_lock_irqsave(&max_trace_lock, flags);
339
340
341 if (!report_latency(tr, delta))
342 goto out_unlock;
343
344 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
345
346 __trace_stack(tr, flags, 5, pc);
347
348 if (data->critical_sequence != max_sequence)
349 goto out_unlock;
350
351 data->critical_end = parent_ip;
352
353 if (likely(!is_tracing_stopped())) {
354 tr->max_latency = delta;
355 update_max_tr_single(tr, current, cpu);
356 }
357
358 max_sequence++;
359
360out_unlock:
361 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
362
363out:
364 data->critical_sequence = max_sequence;
365 data->preempt_timestamp = ftrace_now(cpu);
366 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
367}
368
369static inline void
370start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
371{
372 int cpu;
373 struct trace_array *tr = irqsoff_trace;
374 struct trace_array_cpu *data;
375 unsigned long flags;
376
377 if (!tracer_enabled || !tracing_is_enabled())
378 return;
379
380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
383 return;
384
385 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386
387 if (unlikely(!data) || atomic_read(&data->disabled))
388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
393 data->preempt_timestamp = ftrace_now(cpu);
394 data->critical_start = parent_ip ? : ip;
395
396 local_save_flags(flags);
397
398 __trace_function(tr, ip, parent_ip, flags, pc);
399
400 per_cpu(tracing_cpu, cpu) = 1;
401
402 atomic_dec(&data->disabled);
403}
404
405static inline void
406stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
407{
408 int cpu;
409 struct trace_array *tr = irqsoff_trace;
410 struct trace_array_cpu *data;
411 unsigned long flags;
412
413 cpu = raw_smp_processor_id();
414
415 if (unlikely(per_cpu(tracing_cpu, cpu)))
416 per_cpu(tracing_cpu, cpu) = 0;
417 else
418 return;
419
420 if (!tracer_enabled || !tracing_is_enabled())
421 return;
422
423 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424
425 if (unlikely(!data) ||
426 !data->critical_start || atomic_read(&data->disabled))
427 return;
428
429 atomic_inc(&data->disabled);
430
431 local_save_flags(flags);
432 __trace_function(tr, ip, parent_ip, flags, pc);
433 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
434 data->critical_start = 0;
435 atomic_dec(&data->disabled);
436}
437
438
439void start_critical_timings(void)
440{
441 int pc = preempt_count();
442
443 if (preempt_trace(pc) || irq_trace())
444 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
445}
446EXPORT_SYMBOL_GPL(start_critical_timings);
447
448void stop_critical_timings(void)
449{
450 int pc = preempt_count();
451
452 if (preempt_trace(pc) || irq_trace())
453 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
454}
455EXPORT_SYMBOL_GPL(stop_critical_timings);
456
457#ifdef CONFIG_FUNCTION_TRACER
458static bool function_enabled;
459
460static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
461{
462 int ret;
463
464
465 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
466 return 0;
467
468 if (graph)
469 ret = register_ftrace_graph(&irqsoff_graph_return,
470 &irqsoff_graph_entry);
471 else
472 ret = register_ftrace_function(tr->ops);
473
474 if (!ret)
475 function_enabled = true;
476
477 return ret;
478}
479
480static void unregister_irqsoff_function(struct trace_array *tr, int graph)
481{
482 if (!function_enabled)
483 return;
484
485 if (graph)
486 unregister_ftrace_graph();
487 else
488 unregister_ftrace_function(tr->ops);
489
490 function_enabled = false;
491}
492
493static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
494{
495 if (!(mask & TRACE_ITER_FUNCTION))
496 return 0;
497
498 if (set)
499 register_irqsoff_function(tr, is_graph(tr), 1);
500 else
501 unregister_irqsoff_function(tr, is_graph(tr));
502 return 1;
503}
504#else
505static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
506{
507 return 0;
508}
509static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
510static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
511{
512 return 0;
513}
514#endif
515
516static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
517{
518 struct tracer *tracer = tr->current_trace;
519
520 if (irqsoff_function_set(tr, mask, set))
521 return 0;
522
523#ifdef CONFIG_FUNCTION_GRAPH_TRACER
524 if (mask & TRACE_ITER_DISPLAY_GRAPH)
525 return irqsoff_display_graph(tr, set);
526#endif
527
528 return trace_keep_overwrite(tracer, mask, set);
529}
530
531static int start_irqsoff_tracer(struct trace_array *tr, int graph)
532{
533 int ret;
534
535 ret = register_irqsoff_function(tr, graph, 0);
536
537 if (!ret && tracing_is_enabled())
538 tracer_enabled = 1;
539 else
540 tracer_enabled = 0;
541
542 return ret;
543}
544
545static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
546{
547 tracer_enabled = 0;
548
549 unregister_irqsoff_function(tr, graph);
550}
551
552static bool irqsoff_busy;
553
554static int __irqsoff_tracer_init(struct trace_array *tr)
555{
556 if (irqsoff_busy)
557 return -EBUSY;
558
559 save_flags = tr->trace_flags;
560
561
562 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
563 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
564
565 tr->max_latency = 0;
566 irqsoff_trace = tr;
567
568 smp_wmb();
569
570 ftrace_init_array_ops(tr, irqsoff_tracer_call);
571
572
573 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
574 is_graph(tr))))
575 printk(KERN_ERR "failed to start irqsoff tracer\n");
576
577 irqsoff_busy = true;
578 return 0;
579}
580
581static void __irqsoff_tracer_reset(struct trace_array *tr)
582{
583 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
584 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
585
586 stop_irqsoff_tracer(tr, is_graph(tr));
587
588 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
589 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
590 ftrace_reset_array_ops(tr);
591
592 irqsoff_busy = false;
593}
594
595static void irqsoff_tracer_start(struct trace_array *tr)
596{
597 tracer_enabled = 1;
598}
599
600static void irqsoff_tracer_stop(struct trace_array *tr)
601{
602 tracer_enabled = 0;
603}
604
605#ifdef CONFIG_IRQSOFF_TRACER
606
607
608
609void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
610{
611 unsigned int pc = preempt_count();
612
613 if (!preempt_trace(pc) && irq_trace())
614 stop_critical_timing(a0, a1, pc);
615}
616
617void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
618{
619 unsigned int pc = preempt_count();
620
621 if (!preempt_trace(pc) && irq_trace())
622 start_critical_timing(a0, a1, pc);
623}
624
625static int irqsoff_tracer_init(struct trace_array *tr)
626{
627 trace_type = TRACER_IRQS_OFF;
628
629 return __irqsoff_tracer_init(tr);
630}
631
632static void irqsoff_tracer_reset(struct trace_array *tr)
633{
634 __irqsoff_tracer_reset(tr);
635}
636
637static struct tracer irqsoff_tracer __read_mostly =
638{
639 .name = "irqsoff",
640 .init = irqsoff_tracer_init,
641 .reset = irqsoff_tracer_reset,
642 .start = irqsoff_tracer_start,
643 .stop = irqsoff_tracer_stop,
644 .print_max = true,
645 .print_header = irqsoff_print_header,
646 .print_line = irqsoff_print_line,
647 .flag_changed = irqsoff_flag_changed,
648#ifdef CONFIG_FTRACE_SELFTEST
649 .selftest = trace_selftest_startup_irqsoff,
650#endif
651 .open = irqsoff_trace_open,
652 .close = irqsoff_trace_close,
653 .allow_instances = true,
654 .use_max_tr = true,
655};
656#endif
657
658#ifdef CONFIG_PREEMPT_TRACER
659void tracer_preempt_on(unsigned long a0, unsigned long a1)
660{
661 int pc = preempt_count();
662
663 if (preempt_trace(pc) && !irq_trace())
664 stop_critical_timing(a0, a1, pc);
665}
666
667void tracer_preempt_off(unsigned long a0, unsigned long a1)
668{
669 int pc = preempt_count();
670
671 if (preempt_trace(pc) && !irq_trace())
672 start_critical_timing(a0, a1, pc);
673}
674
675static int preemptoff_tracer_init(struct trace_array *tr)
676{
677 trace_type = TRACER_PREEMPT_OFF;
678
679 return __irqsoff_tracer_init(tr);
680}
681
682static void preemptoff_tracer_reset(struct trace_array *tr)
683{
684 __irqsoff_tracer_reset(tr);
685}
686
687static struct tracer preemptoff_tracer __read_mostly =
688{
689 .name = "preemptoff",
690 .init = preemptoff_tracer_init,
691 .reset = preemptoff_tracer_reset,
692 .start = irqsoff_tracer_start,
693 .stop = irqsoff_tracer_stop,
694 .print_max = true,
695 .print_header = irqsoff_print_header,
696 .print_line = irqsoff_print_line,
697 .flag_changed = irqsoff_flag_changed,
698#ifdef CONFIG_FTRACE_SELFTEST
699 .selftest = trace_selftest_startup_preemptoff,
700#endif
701 .open = irqsoff_trace_open,
702 .close = irqsoff_trace_close,
703 .allow_instances = true,
704 .use_max_tr = true,
705};
706#endif
707
708#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
709
710static int preemptirqsoff_tracer_init(struct trace_array *tr)
711{
712 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
713
714 return __irqsoff_tracer_init(tr);
715}
716
717static void preemptirqsoff_tracer_reset(struct trace_array *tr)
718{
719 __irqsoff_tracer_reset(tr);
720}
721
722static struct tracer preemptirqsoff_tracer __read_mostly =
723{
724 .name = "preemptirqsoff",
725 .init = preemptirqsoff_tracer_init,
726 .reset = preemptirqsoff_tracer_reset,
727 .start = irqsoff_tracer_start,
728 .stop = irqsoff_tracer_stop,
729 .print_max = true,
730 .print_header = irqsoff_print_header,
731 .print_line = irqsoff_print_line,
732 .flag_changed = irqsoff_flag_changed,
733#ifdef CONFIG_FTRACE_SELFTEST
734 .selftest = trace_selftest_startup_preemptirqsoff,
735#endif
736 .open = irqsoff_trace_open,
737 .close = irqsoff_trace_close,
738 .allow_instances = true,
739 .use_max_tr = true,
740};
741#endif
742
743__init static int init_irqsoff_tracer(void)
744{
745#ifdef CONFIG_IRQSOFF_TRACER
746 register_tracer(&irqsoff_tracer);
747#endif
748#ifdef CONFIG_PREEMPT_TRACER
749 register_tracer(&preemptoff_tracer);
750#endif
751#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
752 register_tracer(&preemptirqsoff_tracer);
753#endif
754
755 return 0;
756}
757core_initcall(init_irqsoff_tracer);
758#endif
759