1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
24static DEFINE_PER_CPU(int, tracing_cpu);
25
26static DEFINE_SPINLOCK(max_trace_lock);
27
28enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
35static int save_lat_flag;
36
37#ifdef CONFIG_PREEMPT_TRACER
38static inline int
39preempt_trace(void)
40{
41 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
42}
43#else
44# define preempt_trace() (0)
45#endif
46
47#ifdef CONFIG_IRQSOFF_TRACER
48static inline int
49irq_trace(void)
50{
51 return ((trace_type & TRACER_IRQS_OFF) &&
52 irqs_disabled());
53}
54#else
55# define irq_trace() (0)
56#endif
57
58
59
60
61
62
63
64
65
66static __cacheline_aligned_in_smp unsigned long max_sequence;
67
68#ifdef CONFIG_FUNCTION_TRACER
69
70
71
72static void
73irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
74{
75 struct trace_array *tr = irqsoff_trace;
76 struct trace_array_cpu *data;
77 unsigned long flags;
78 long disabled;
79 int cpu;
80
81
82
83
84
85
86
87 cpu = raw_smp_processor_id();
88 if (likely(!per_cpu(tracing_cpu, cpu)))
89 return;
90
91 local_save_flags(flags);
92
93 if (!irqs_disabled_flags(flags))
94 return;
95
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1))
100 trace_function(tr, ip, parent_ip, flags, preempt_count());
101
102 atomic_dec(&data->disabled);
103}
104
105static struct ftrace_ops trace_ops __read_mostly =
106{
107 .func = irqsoff_tracer_call,
108};
109#endif
110
111
112
113
114static int report_latency(cycle_t delta)
115{
116 if (tracing_thresh) {
117 if (delta < tracing_thresh)
118 return 0;
119 } else {
120 if (delta <= tracing_max_latency)
121 return 0;
122 }
123 return 1;
124}
125
126static void
127check_critical_timing(struct trace_array *tr,
128 struct trace_array_cpu *data,
129 unsigned long parent_ip,
130 int cpu)
131{
132 cycle_t T0, T1, delta;
133 unsigned long flags;
134 int pc;
135
136 T0 = data->preempt_timestamp;
137 T1 = ftrace_now(cpu);
138 delta = T1-T0;
139
140 local_save_flags(flags);
141
142 pc = preempt_count();
143
144 if (!report_latency(delta))
145 goto out;
146
147 spin_lock_irqsave(&max_trace_lock, flags);
148
149
150 if (!report_latency(delta))
151 goto out_unlock;
152
153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
154
155 if (data->critical_sequence != max_sequence)
156 goto out_unlock;
157
158 data->critical_end = parent_ip;
159
160 if (likely(!is_tracing_stopped())) {
161 tracing_max_latency = delta;
162 update_max_tr_single(tr, current, cpu);
163 }
164
165 max_sequence++;
166
167out_unlock:
168 spin_unlock_irqrestore(&max_trace_lock, flags);
169
170out:
171 data->critical_sequence = max_sequence;
172 data->preempt_timestamp = ftrace_now(cpu);
173 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
174}
175
176static inline void
177start_critical_timing(unsigned long ip, unsigned long parent_ip)
178{
179 int cpu;
180 struct trace_array *tr = irqsoff_trace;
181 struct trace_array_cpu *data;
182 unsigned long flags;
183
184 if (likely(!tracer_enabled))
185 return;
186
187 cpu = raw_smp_processor_id();
188
189 if (per_cpu(tracing_cpu, cpu))
190 return;
191
192 data = tr->data[cpu];
193
194 if (unlikely(!data) || atomic_read(&data->disabled))
195 return;
196
197 atomic_inc(&data->disabled);
198
199 data->critical_sequence = max_sequence;
200 data->preempt_timestamp = ftrace_now(cpu);
201 data->critical_start = parent_ip ? : ip;
202
203 local_save_flags(flags);
204
205 trace_function(tr, ip, parent_ip, flags, preempt_count());
206
207 per_cpu(tracing_cpu, cpu) = 1;
208
209 atomic_dec(&data->disabled);
210}
211
212static inline void
213stop_critical_timing(unsigned long ip, unsigned long parent_ip)
214{
215 int cpu;
216 struct trace_array *tr = irqsoff_trace;
217 struct trace_array_cpu *data;
218 unsigned long flags;
219
220 cpu = raw_smp_processor_id();
221
222 if (unlikely(per_cpu(tracing_cpu, cpu)))
223 per_cpu(tracing_cpu, cpu) = 0;
224 else
225 return;
226
227 if (!tracer_enabled)
228 return;
229
230 data = tr->data[cpu];
231
232 if (unlikely(!data) ||
233 !data->critical_start || atomic_read(&data->disabled))
234 return;
235
236 atomic_inc(&data->disabled);
237
238 local_save_flags(flags);
239 trace_function(tr, ip, parent_ip, flags, preempt_count());
240 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
241 data->critical_start = 0;
242 atomic_dec(&data->disabled);
243}
244
245
246void start_critical_timings(void)
247{
248 if (preempt_trace() || irq_trace())
249 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
250}
251EXPORT_SYMBOL_GPL(start_critical_timings);
252
253void stop_critical_timings(void)
254{
255 if (preempt_trace() || irq_trace())
256 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
257}
258EXPORT_SYMBOL_GPL(stop_critical_timings);
259
260#ifdef CONFIG_IRQSOFF_TRACER
261#ifdef CONFIG_PROVE_LOCKING
262void time_hardirqs_on(unsigned long a0, unsigned long a1)
263{
264 if (!preempt_trace() && irq_trace())
265 stop_critical_timing(a0, a1);
266}
267
268void time_hardirqs_off(unsigned long a0, unsigned long a1)
269{
270 if (!preempt_trace() && irq_trace())
271 start_critical_timing(a0, a1);
272}
273
274#else
275
276
277
278
279
280void early_boot_irqs_off(void)
281{
282}
283
284void early_boot_irqs_on(void)
285{
286}
287
288void trace_softirqs_on(unsigned long ip)
289{
290}
291
292void trace_softirqs_off(unsigned long ip)
293{
294}
295
296inline void print_irqtrace_events(struct task_struct *curr)
297{
298}
299
300
301
302
303void trace_hardirqs_on(void)
304{
305 if (!preempt_trace() && irq_trace())
306 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
307}
308EXPORT_SYMBOL(trace_hardirqs_on);
309
310void trace_hardirqs_off(void)
311{
312 if (!preempt_trace() && irq_trace())
313 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
314}
315EXPORT_SYMBOL(trace_hardirqs_off);
316
317void trace_hardirqs_on_caller(unsigned long caller_addr)
318{
319 if (!preempt_trace() && irq_trace())
320 stop_critical_timing(CALLER_ADDR0, caller_addr);
321}
322EXPORT_SYMBOL(trace_hardirqs_on_caller);
323
324void trace_hardirqs_off_caller(unsigned long caller_addr)
325{
326 if (!preempt_trace() && irq_trace())
327 start_critical_timing(CALLER_ADDR0, caller_addr);
328}
329EXPORT_SYMBOL(trace_hardirqs_off_caller);
330
331#endif
332#endif
333
334#ifdef CONFIG_PREEMPT_TRACER
335void trace_preempt_on(unsigned long a0, unsigned long a1)
336{
337 if (preempt_trace())
338 stop_critical_timing(a0, a1);
339}
340
341void trace_preempt_off(unsigned long a0, unsigned long a1)
342{
343 if (preempt_trace())
344 start_critical_timing(a0, a1);
345}
346#endif
347
348static void start_irqsoff_tracer(struct trace_array *tr)
349{
350 register_ftrace_function(&trace_ops);
351 if (tracing_is_enabled())
352 tracer_enabled = 1;
353 else
354 tracer_enabled = 0;
355}
356
357static void stop_irqsoff_tracer(struct trace_array *tr)
358{
359 tracer_enabled = 0;
360 unregister_ftrace_function(&trace_ops);
361}
362
363static void __irqsoff_tracer_init(struct trace_array *tr)
364{
365 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
366 trace_flags |= TRACE_ITER_LATENCY_FMT;
367
368 tracing_max_latency = 0;
369 irqsoff_trace = tr;
370
371 smp_wmb();
372 tracing_reset_online_cpus(tr);
373 start_irqsoff_tracer(tr);
374}
375
376static void irqsoff_tracer_reset(struct trace_array *tr)
377{
378 stop_irqsoff_tracer(tr);
379
380 if (!save_lat_flag)
381 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
382}
383
384static void irqsoff_tracer_start(struct trace_array *tr)
385{
386 tracer_enabled = 1;
387}
388
389static void irqsoff_tracer_stop(struct trace_array *tr)
390{
391 tracer_enabled = 0;
392}
393
394#ifdef CONFIG_IRQSOFF_TRACER
395static int irqsoff_tracer_init(struct trace_array *tr)
396{
397 trace_type = TRACER_IRQS_OFF;
398
399 __irqsoff_tracer_init(tr);
400 return 0;
401}
402static struct tracer irqsoff_tracer __read_mostly =
403{
404 .name = "irqsoff",
405 .init = irqsoff_tracer_init,
406 .reset = irqsoff_tracer_reset,
407 .start = irqsoff_tracer_start,
408 .stop = irqsoff_tracer_stop,
409 .print_max = 1,
410#ifdef CONFIG_FTRACE_SELFTEST
411 .selftest = trace_selftest_startup_irqsoff,
412#endif
413};
414# define register_irqsoff(trace) register_tracer(&trace)
415#else
416# define register_irqsoff(trace) do { } while (0)
417#endif
418
419#ifdef CONFIG_PREEMPT_TRACER
420static int preemptoff_tracer_init(struct trace_array *tr)
421{
422 trace_type = TRACER_PREEMPT_OFF;
423
424 __irqsoff_tracer_init(tr);
425 return 0;
426}
427
428static struct tracer preemptoff_tracer __read_mostly =
429{
430 .name = "preemptoff",
431 .init = preemptoff_tracer_init,
432 .reset = irqsoff_tracer_reset,
433 .start = irqsoff_tracer_start,
434 .stop = irqsoff_tracer_stop,
435 .print_max = 1,
436#ifdef CONFIG_FTRACE_SELFTEST
437 .selftest = trace_selftest_startup_preemptoff,
438#endif
439};
440# define register_preemptoff(trace) register_tracer(&trace)
441#else
442# define register_preemptoff(trace) do { } while (0)
443#endif
444
445#if defined(CONFIG_IRQSOFF_TRACER) && \
446 defined(CONFIG_PREEMPT_TRACER)
447
448static int preemptirqsoff_tracer_init(struct trace_array *tr)
449{
450 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
451
452 __irqsoff_tracer_init(tr);
453 return 0;
454}
455
456static struct tracer preemptirqsoff_tracer __read_mostly =
457{
458 .name = "preemptirqsoff",
459 .init = preemptirqsoff_tracer_init,
460 .reset = irqsoff_tracer_reset,
461 .start = irqsoff_tracer_start,
462 .stop = irqsoff_tracer_stop,
463 .print_max = 1,
464#ifdef CONFIG_FTRACE_SELFTEST
465 .selftest = trace_selftest_startup_preemptirqsoff,
466#endif
467};
468
469# define register_preemptirqsoff(trace) register_tracer(&trace)
470#else
471# define register_preemptirqsoff(trace) do { } while (0)
472#endif
473
474__init static int init_irqsoff_tracer(void)
475{
476 register_irqsoff(irqsoff_tracer);
477 register_preemptoff(preemptoff_tracer);
478 register_preemptirqsoff(preemptirqsoff_tracer);
479
480 return 0;
481}
482device_initcall(init_irqsoff_tracer);
483