1
2#ifndef _LINUX_KERNEL_TRACE_H
3#define _LINUX_KERNEL_TRACE_H
4
5#include <linux/fs.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/clocksource.h>
9#include <linux/ring_buffer.h>
10#include <linux/mmiotrace.h>
11#include <linux/tracepoint.h>
12#include <linux/ftrace.h>
13#include <linux/hw_breakpoint.h>
14#include <linux/trace_seq.h>
15#include <linux/ftrace_event.h>
16#include <linux/compiler.h>
17#include <linux/trace_seq.h>
18
19#ifdef CONFIG_FTRACE_SYSCALLS
20#include <asm/unistd.h>
21#include <asm/syscall.h>
22#endif
23
24enum trace_type {
25 __TRACE_FIRST_TYPE = 0,
26
27 TRACE_FN,
28 TRACE_CTX,
29 TRACE_WAKE,
30 TRACE_STACK,
31 TRACE_PRINT,
32 TRACE_BPRINT,
33 TRACE_MMIO_RW,
34 TRACE_MMIO_MAP,
35 TRACE_BRANCH,
36 TRACE_GRAPH_RET,
37 TRACE_GRAPH_ENT,
38 TRACE_USER_STACK,
39 TRACE_BLK,
40 TRACE_BPUTS,
41
42 __TRACE_LAST_TYPE,
43};
44
45
46#undef __field
47#define __field(type, item) type item;
48
49#undef __field_struct
50#define __field_struct(type, item) __field(type, item)
51
52#undef __field_desc
53#define __field_desc(type, container, item)
54
55#undef __array
56#define __array(type, item, size) type item[size];
57
58#undef __array_desc
59#define __array_desc(type, container, item, size)
60
61#undef __dynamic_array
62#define __dynamic_array(type, item) type item[];
63
64#undef F_STRUCT
65#define F_STRUCT(args...) args
66
67#undef FTRACE_ENTRY
68#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
69 struct struct_name { \
70 struct trace_entry ent; \
71 tstruct \
72 }
73
74#undef TP_ARGS
75#define TP_ARGS(args...) args
76
77#undef FTRACE_ENTRY_DUP
78#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
79
80#undef FTRACE_ENTRY_REG
81#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
82 filter, regfn) \
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
84 filter)
85
86#include "trace_entries.h"
87
88
89
90
91
92struct syscall_trace_enter {
93 struct trace_entry ent;
94 int nr;
95 unsigned long args[];
96};
97
98struct syscall_trace_exit {
99 struct trace_entry ent;
100 int nr;
101 long ret;
102};
103
104struct kprobe_trace_entry_head {
105 struct trace_entry ent;
106 unsigned long ip;
107};
108
109struct kretprobe_trace_entry_head {
110 struct trace_entry ent;
111 unsigned long func;
112 unsigned long ret_ip;
113};
114
115
116
117
118
119
120
121
122
123
124enum trace_flag_type {
125 TRACE_FLAG_IRQS_OFF = 0x01,
126 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
127 TRACE_FLAG_NEED_RESCHED = 0x04,
128 TRACE_FLAG_HARDIRQ = 0x08,
129 TRACE_FLAG_SOFTIRQ = 0x10,
130 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
131};
132
133#define TRACE_BUF_SIZE 1024
134
135struct trace_array;
136
137
138
139
140
141
142struct trace_array_cpu {
143 atomic_t disabled;
144 void *buffer_page;
145
146 unsigned long entries;
147 unsigned long saved_latency;
148 unsigned long critical_start;
149 unsigned long critical_end;
150 unsigned long critical_sequence;
151 unsigned long nice;
152 unsigned long policy;
153 unsigned long rt_priority;
154 unsigned long skipped_entries;
155 cycle_t preempt_timestamp;
156 pid_t pid;
157 kuid_t uid;
158 char comm[TASK_COMM_LEN];
159};
160
161struct tracer;
162
163struct trace_buffer {
164 struct trace_array *tr;
165 struct ring_buffer *buffer;
166 struct trace_array_cpu __percpu *data;
167 cycle_t time_start;
168 int cpu;
169};
170
171
172
173
174
175
176struct trace_array {
177 struct list_head list;
178 char *name;
179 struct trace_buffer trace_buffer;
180#ifdef CONFIG_TRACER_MAX_TRACE
181
182
183
184
185
186
187
188
189
190
191
192 struct trace_buffer max_buffer;
193 bool allocated_snapshot;
194 unsigned long max_latency;
195#endif
196
197
198
199
200
201
202
203
204
205
206
207
208
209 arch_spinlock_t max_lock;
210 int buffer_disabled;
211#ifdef CONFIG_FTRACE_SYSCALLS
212 int sys_refcount_enter;
213 int sys_refcount_exit;
214 struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
215 struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
216#endif
217 int stop_count;
218 int clock_id;
219 struct tracer *current_trace;
220 unsigned int flags;
221 raw_spinlock_t start_lock;
222 struct dentry *dir;
223 struct dentry *options;
224 struct dentry *percpu_dir;
225 struct dentry *event_dir;
226 struct list_head systems;
227 struct list_head events;
228 cpumask_var_t tracing_cpumask;
229 int ref;
230#ifdef CONFIG_FUNCTION_TRACER
231 struct ftrace_ops *ops;
232
233 int function_enabled;
234#endif
235};
236
237enum {
238 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
239};
240
241extern struct list_head ftrace_trace_arrays;
242
243extern struct mutex trace_types_lock;
244
245extern int trace_array_get(struct trace_array *tr);
246extern void trace_array_put(struct trace_array *tr);
247
248
249
250
251
252static inline struct trace_array *top_trace_array(void)
253{
254 struct trace_array *tr;
255
256 if (list_empty(&ftrace_trace_arrays))
257 return NULL;
258
259 tr = list_entry(ftrace_trace_arrays.prev,
260 typeof(*tr), list);
261 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
262 return tr;
263}
264
265#define FTRACE_CMP_TYPE(var, type) \
266 __builtin_types_compatible_p(typeof(var), type *)
267
268#undef IF_ASSIGN
269#define IF_ASSIGN(var, entry, etype, id) \
270 if (FTRACE_CMP_TYPE(var, etype)) { \
271 var = (typeof(var))(entry); \
272 WARN_ON(id && (entry)->type != id); \
273 break; \
274 }
275
276
277extern void __ftrace_bad_type(void);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292#define trace_assign_type(var, ent) \
293 do { \
294 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
295 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
296 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
297 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
298 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
299 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
300 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
301 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
302 TRACE_MMIO_RW); \
303 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
304 TRACE_MMIO_MAP); \
305 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
306 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
307 TRACE_GRAPH_ENT); \
308 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
309 TRACE_GRAPH_RET); \
310 __ftrace_bad_type(); \
311 } while (0)
312
313
314
315
316
317
318struct tracer_opt {
319 const char *name;
320 u32 bit;
321};
322
323
324
325
326
327struct tracer_flags {
328 u32 val;
329 struct tracer_opt *opts;
330};
331
332
333#define TRACER_OPT(s, b) .name = #s, .bit = b
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356struct tracer {
357 const char *name;
358 int (*init)(struct trace_array *tr);
359 void (*reset)(struct trace_array *tr);
360 void (*start)(struct trace_array *tr);
361 void (*stop)(struct trace_array *tr);
362 int (*update_thresh)(struct trace_array *tr);
363 void (*open)(struct trace_iterator *iter);
364 void (*pipe_open)(struct trace_iterator *iter);
365 void (*close)(struct trace_iterator *iter);
366 void (*pipe_close)(struct trace_iterator *iter);
367 ssize_t (*read)(struct trace_iterator *iter,
368 struct file *filp, char __user *ubuf,
369 size_t cnt, loff_t *ppos);
370 ssize_t (*splice_read)(struct trace_iterator *iter,
371 struct file *filp,
372 loff_t *ppos,
373 struct pipe_inode_info *pipe,
374 size_t len,
375 unsigned int flags);
376#ifdef CONFIG_FTRACE_STARTUP_TEST
377 int (*selftest)(struct tracer *trace,
378 struct trace_array *tr);
379#endif
380 void (*print_header)(struct seq_file *m);
381 enum print_line_t (*print_line)(struct trace_iterator *iter);
382
383 int (*set_flag)(struct trace_array *tr,
384 u32 old_flags, u32 bit, int set);
385
386 int (*flag_changed)(struct trace_array *tr,
387 u32 mask, int set);
388 struct tracer *next;
389 struct tracer_flags *flags;
390 int enabled;
391 int ref;
392 bool print_max;
393 bool allow_instances;
394#ifdef CONFIG_TRACER_MAX_TRACE
395 bool use_max_tr;
396#endif
397};
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427enum {
428 TRACE_BUFFER_BIT,
429 TRACE_BUFFER_NMI_BIT,
430 TRACE_BUFFER_IRQ_BIT,
431 TRACE_BUFFER_SIRQ_BIT,
432
433
434 TRACE_FTRACE_BIT,
435 TRACE_FTRACE_NMI_BIT,
436 TRACE_FTRACE_IRQ_BIT,
437 TRACE_FTRACE_SIRQ_BIT,
438
439
440 TRACE_INTERNAL_BIT,
441 TRACE_INTERNAL_NMI_BIT,
442 TRACE_INTERNAL_IRQ_BIT,
443 TRACE_INTERNAL_SIRQ_BIT,
444
445 TRACE_CONTROL_BIT,
446
447
448
449
450
451
452
453
454 TRACE_IRQ_BIT,
455};
456
457#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
458#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
459#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
460
461#define TRACE_CONTEXT_BITS 4
462
463#define TRACE_FTRACE_START TRACE_FTRACE_BIT
464#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
465
466#define TRACE_LIST_START TRACE_INTERNAL_BIT
467#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
468
469#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
470
471static __always_inline int trace_get_context_bit(void)
472{
473 int bit;
474
475 if (in_interrupt()) {
476 if (in_nmi())
477 bit = 0;
478
479 else if (in_irq())
480 bit = 1;
481 else
482 bit = 2;
483 } else
484 bit = 3;
485
486 return bit;
487}
488
489static __always_inline int trace_test_and_set_recursion(int start, int max)
490{
491 unsigned int val = current->trace_recursion;
492 int bit;
493
494
495 if ((val & TRACE_CONTEXT_MASK) > max)
496 return 0;
497
498 bit = trace_get_context_bit() + start;
499 if (unlikely(val & (1 << bit)))
500 return -1;
501
502 val |= 1 << bit;
503 current->trace_recursion = val;
504 barrier();
505
506 return bit;
507}
508
509static __always_inline void trace_clear_recursion(int bit)
510{
511 unsigned int val = current->trace_recursion;
512
513 if (!bit)
514 return;
515
516 bit = 1 << bit;
517 val &= ~bit;
518
519 barrier();
520 current->trace_recursion = val;
521}
522
523static inline struct ring_buffer_iter *
524trace_buffer_iter(struct trace_iterator *iter, int cpu)
525{
526 if (iter->buffer_iter && iter->buffer_iter[cpu])
527 return iter->buffer_iter[cpu];
528 return NULL;
529}
530
531int tracer_init(struct tracer *t, struct trace_array *tr);
532int tracing_is_enabled(void);
533void tracing_reset(struct trace_buffer *buf, int cpu);
534void tracing_reset_online_cpus(struct trace_buffer *buf);
535void tracing_reset_current(int cpu);
536void tracing_reset_all_online_cpus(void);
537int tracing_open_generic(struct inode *inode, struct file *filp);
538bool tracing_is_disabled(void);
539struct dentry *trace_create_file(const char *name,
540 umode_t mode,
541 struct dentry *parent,
542 void *data,
543 const struct file_operations *fops);
544
545struct dentry *tracing_init_dentry(void);
546
547struct ring_buffer_event;
548
549struct ring_buffer_event *
550trace_buffer_lock_reserve(struct ring_buffer *buffer,
551 int type,
552 unsigned long len,
553 unsigned long flags,
554 int pc);
555
556struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
557 struct trace_array_cpu *data);
558
559struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
560 int *ent_cpu, u64 *ent_ts);
561
562void __buffer_unlock_commit(struct ring_buffer *buffer,
563 struct ring_buffer_event *event);
564
565int trace_empty(struct trace_iterator *iter);
566
567void *trace_find_next_entry_inc(struct trace_iterator *iter);
568
569void trace_init_global_iter(struct trace_iterator *iter);
570
571void tracing_iter_reset(struct trace_iterator *iter, int cpu);
572
573void trace_function(struct trace_array *tr,
574 unsigned long ip,
575 unsigned long parent_ip,
576 unsigned long flags, int pc);
577void trace_graph_function(struct trace_array *tr,
578 unsigned long ip,
579 unsigned long parent_ip,
580 unsigned long flags, int pc);
581void trace_latency_header(struct seq_file *m);
582void trace_default_header(struct seq_file *m);
583void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
584int trace_empty(struct trace_iterator *iter);
585
586void trace_graph_return(struct ftrace_graph_ret *trace);
587int trace_graph_entry(struct ftrace_graph_ent *trace);
588void set_graph_array(struct trace_array *tr);
589
590void tracing_start_cmdline_record(void);
591void tracing_stop_cmdline_record(void);
592int register_tracer(struct tracer *type);
593int is_tracing_stopped(void);
594
595loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
596
597extern cpumask_var_t __read_mostly tracing_buffer_mask;
598
599#define for_each_tracing_cpu(cpu) \
600 for_each_cpu(cpu, tracing_buffer_mask)
601
602extern unsigned long nsecs_to_usecs(unsigned long nsecs);
603
604extern unsigned long tracing_thresh;
605
606#ifdef CONFIG_TRACER_MAX_TRACE
607void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
608void update_max_tr_single(struct trace_array *tr,
609 struct task_struct *tsk, int cpu);
610#endif
611
612#ifdef CONFIG_STACKTRACE
613void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
614 int skip, int pc);
615
616void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
617 int skip, int pc, struct pt_regs *regs);
618
619void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
620 int pc);
621
622void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
623 int pc);
624#else
625static inline void ftrace_trace_stack(struct ring_buffer *buffer,
626 unsigned long flags, int skip, int pc)
627{
628}
629
630static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
631 unsigned long flags, int skip,
632 int pc, struct pt_regs *regs)
633{
634}
635
636static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
637 unsigned long flags, int pc)
638{
639}
640
641static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
642 int skip, int pc)
643{
644}
645#endif
646
647extern cycle_t ftrace_now(int cpu);
648
649extern void trace_find_cmdline(int pid, char comm[]);
650
651#ifdef CONFIG_DYNAMIC_FTRACE
652extern unsigned long ftrace_update_tot_cnt;
653#endif
654#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
655extern int DYN_FTRACE_TEST_NAME(void);
656#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
657extern int DYN_FTRACE_TEST_NAME2(void);
658
659extern bool ring_buffer_expanded;
660extern bool tracing_selftest_disabled;
661DECLARE_PER_CPU(int, ftrace_cpu_disabled);
662
663#ifdef CONFIG_FTRACE_STARTUP_TEST
664extern int trace_selftest_startup_function(struct tracer *trace,
665 struct trace_array *tr);
666extern int trace_selftest_startup_function_graph(struct tracer *trace,
667 struct trace_array *tr);
668extern int trace_selftest_startup_irqsoff(struct tracer *trace,
669 struct trace_array *tr);
670extern int trace_selftest_startup_preemptoff(struct tracer *trace,
671 struct trace_array *tr);
672extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
673 struct trace_array *tr);
674extern int trace_selftest_startup_wakeup(struct tracer *trace,
675 struct trace_array *tr);
676extern int trace_selftest_startup_nop(struct tracer *trace,
677 struct trace_array *tr);
678extern int trace_selftest_startup_sched_switch(struct tracer *trace,
679 struct trace_array *tr);
680extern int trace_selftest_startup_branch(struct tracer *trace,
681 struct trace_array *tr);
682
683
684
685
686
687#define __tracer_data __refdata
688#else
689
690#define __tracer_data __read_mostly
691#endif
692
693extern void *head_page(struct trace_array_cpu *data);
694extern unsigned long long ns2usecs(cycle_t nsec);
695extern int
696trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
697extern int
698trace_vprintk(unsigned long ip, const char *fmt, va_list args);
699extern int
700trace_array_vprintk(struct trace_array *tr,
701 unsigned long ip, const char *fmt, va_list args);
702int trace_array_printk(struct trace_array *tr,
703 unsigned long ip, const char *fmt, ...);
704int trace_array_printk_buf(struct ring_buffer *buffer,
705 unsigned long ip, const char *fmt, ...);
706void trace_printk_seq(struct trace_seq *s);
707enum print_line_t print_trace_line(struct trace_iterator *iter);
708
709extern unsigned long trace_flags;
710
711extern char trace_find_mark(unsigned long long duration);
712
713
714#ifdef CONFIG_FUNCTION_GRAPH_TRACER
715
716
717#define TRACE_GRAPH_PRINT_OVERRUN 0x1
718#define TRACE_GRAPH_PRINT_CPU 0x2
719#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
720#define TRACE_GRAPH_PRINT_PROC 0x8
721#define TRACE_GRAPH_PRINT_DURATION 0x10
722#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
723#define TRACE_GRAPH_PRINT_IRQS 0x40
724#define TRACE_GRAPH_PRINT_TAIL 0x80
725#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
726#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
727
728extern enum print_line_t
729print_graph_function_flags(struct trace_iterator *iter, u32 flags);
730extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
731extern void
732trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
733extern void graph_trace_open(struct trace_iterator *iter);
734extern void graph_trace_close(struct trace_iterator *iter);
735extern int __trace_graph_entry(struct trace_array *tr,
736 struct ftrace_graph_ent *trace,
737 unsigned long flags, int pc);
738extern void __trace_graph_return(struct trace_array *tr,
739 struct ftrace_graph_ret *trace,
740 unsigned long flags, int pc);
741
742
743#ifdef CONFIG_DYNAMIC_FTRACE
744
745#define FTRACE_GRAPH_MAX_FUNCS 32
746extern int ftrace_graph_count;
747extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
748extern int ftrace_graph_notrace_count;
749extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
750
751static inline int ftrace_graph_addr(unsigned long addr)
752{
753 int i;
754
755 if (!ftrace_graph_count)
756 return 1;
757
758 for (i = 0; i < ftrace_graph_count; i++) {
759 if (addr == ftrace_graph_funcs[i]) {
760
761
762
763
764
765 if (in_irq())
766 trace_recursion_set(TRACE_IRQ_BIT);
767 else
768 trace_recursion_clear(TRACE_IRQ_BIT);
769 return 1;
770 }
771 }
772
773 return 0;
774}
775
776static inline int ftrace_graph_notrace_addr(unsigned long addr)
777{
778 int i;
779
780 if (!ftrace_graph_notrace_count)
781 return 0;
782
783 for (i = 0; i < ftrace_graph_notrace_count; i++) {
784 if (addr == ftrace_graph_notrace_funcs[i])
785 return 1;
786 }
787
788 return 0;
789}
790#else
791static inline int ftrace_graph_addr(unsigned long addr)
792{
793 return 1;
794}
795
796static inline int ftrace_graph_notrace_addr(unsigned long addr)
797{
798 return 0;
799}
800#endif
801#else
802static inline enum print_line_t
803print_graph_function_flags(struct trace_iterator *iter, u32 flags)
804{
805 return TRACE_TYPE_UNHANDLED;
806}
807#endif
808
809extern struct list_head ftrace_pids;
810
811#ifdef CONFIG_FUNCTION_TRACER
812extern bool ftrace_filter_param __initdata;
813static inline int ftrace_trace_task(struct task_struct *task)
814{
815 if (list_empty(&ftrace_pids))
816 return 1;
817
818 return test_tsk_trace_trace(task);
819}
820extern int ftrace_is_dead(void);
821int ftrace_create_function_files(struct trace_array *tr,
822 struct dentry *parent);
823void ftrace_destroy_function_files(struct trace_array *tr);
824void ftrace_init_global_array_ops(struct trace_array *tr);
825void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
826void ftrace_reset_array_ops(struct trace_array *tr);
827int using_ftrace_ops_list_func(void);
828#else
829static inline int ftrace_trace_task(struct task_struct *task)
830{
831 return 1;
832}
833static inline int ftrace_is_dead(void) { return 0; }
834static inline int
835ftrace_create_function_files(struct trace_array *tr,
836 struct dentry *parent)
837{
838 return 0;
839}
840static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
841static inline __init void
842ftrace_init_global_array_ops(struct trace_array *tr) { }
843static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
844
845#define ftrace_init_array_ops(tr, func) do { } while (0)
846#endif
847
848#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
849void ftrace_create_filter_files(struct ftrace_ops *ops,
850 struct dentry *parent);
851void ftrace_destroy_filter_files(struct ftrace_ops *ops);
852#else
853
854
855
856
857#define ftrace_create_filter_files(ops, parent) do { } while (0)
858#define ftrace_destroy_filter_files(ops) do { } while (0)
859#endif
860
861int ftrace_event_is_function(struct ftrace_event_call *call);
862
863
864
865
866
867
868
869
870struct trace_parser {
871 bool cont;
872 char *buffer;
873 unsigned idx;
874 unsigned size;
875};
876
877static inline bool trace_parser_loaded(struct trace_parser *parser)
878{
879 return (parser->idx != 0);
880}
881
882static inline bool trace_parser_cont(struct trace_parser *parser)
883{
884 return parser->cont;
885}
886
887static inline void trace_parser_clear(struct trace_parser *parser)
888{
889 parser->cont = false;
890 parser->idx = 0;
891}
892
893extern int trace_parser_get_init(struct trace_parser *parser, int size);
894extern void trace_parser_put(struct trace_parser *parser);
895extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
896 size_t cnt, loff_t *ppos);
897
898
899
900
901
902
903
904
905enum trace_iterator_flags {
906 TRACE_ITER_PRINT_PARENT = 0x01,
907 TRACE_ITER_SYM_OFFSET = 0x02,
908 TRACE_ITER_SYM_ADDR = 0x04,
909 TRACE_ITER_VERBOSE = 0x08,
910 TRACE_ITER_RAW = 0x10,
911 TRACE_ITER_HEX = 0x20,
912 TRACE_ITER_BIN = 0x40,
913 TRACE_ITER_BLOCK = 0x80,
914 TRACE_ITER_STACKTRACE = 0x100,
915 TRACE_ITER_PRINTK = 0x200,
916 TRACE_ITER_PREEMPTONLY = 0x400,
917 TRACE_ITER_BRANCH = 0x800,
918 TRACE_ITER_ANNOTATE = 0x1000,
919 TRACE_ITER_USERSTACKTRACE = 0x2000,
920 TRACE_ITER_SYM_USEROBJ = 0x4000,
921 TRACE_ITER_PRINTK_MSGONLY = 0x8000,
922 TRACE_ITER_CONTEXT_INFO = 0x10000,
923 TRACE_ITER_LATENCY_FMT = 0x20000,
924 TRACE_ITER_SLEEP_TIME = 0x40000,
925 TRACE_ITER_GRAPH_TIME = 0x80000,
926 TRACE_ITER_RECORD_CMD = 0x100000,
927 TRACE_ITER_OVERWRITE = 0x200000,
928 TRACE_ITER_STOP_ON_FREE = 0x400000,
929 TRACE_ITER_IRQ_INFO = 0x800000,
930 TRACE_ITER_MARKERS = 0x1000000,
931 TRACE_ITER_FUNCTION = 0x2000000,
932};
933
934
935
936
937
938#define TRACE_ITER_SYM_MASK \
939 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
940
941extern struct tracer nop_trace;
942
943#ifdef CONFIG_BRANCH_TRACER
944extern int enable_branch_tracing(struct trace_array *tr);
945extern void disable_branch_tracing(void);
946static inline int trace_branch_enable(struct trace_array *tr)
947{
948 if (trace_flags & TRACE_ITER_BRANCH)
949 return enable_branch_tracing(tr);
950 return 0;
951}
952static inline void trace_branch_disable(void)
953{
954
955 disable_branch_tracing();
956}
957#else
958static inline int trace_branch_enable(struct trace_array *tr)
959{
960 return 0;
961}
962static inline void trace_branch_disable(void)
963{
964}
965#endif
966
967
968int tracing_update_buffers(void);
969
970struct ftrace_event_field {
971 struct list_head link;
972 const char *name;
973 const char *type;
974 int filter_type;
975 int offset;
976 int size;
977 int is_signed;
978};
979
980struct event_filter {
981 int n_preds;
982 int a_preds;
983 struct filter_pred *preds;
984 struct filter_pred *root;
985 char *filter_string;
986};
987
988struct event_subsystem {
989 struct list_head list;
990 const char *name;
991 struct event_filter *filter;
992 int ref_count;
993};
994
995struct ftrace_subsystem_dir {
996 struct list_head list;
997 struct event_subsystem *subsystem;
998 struct trace_array *tr;
999 struct dentry *entry;
1000 int ref_count;
1001 int nr_events;
1002};
1003
1004#define FILTER_PRED_INVALID ((unsigned short)-1)
1005#define FILTER_PRED_IS_RIGHT (1 << 15)
1006#define FILTER_PRED_FOLD (1 << 15)
1007
1008
1009
1010
1011
1012
1013
1014
1015#define MAX_FILTER_PRED 16384
1016
1017struct filter_pred;
1018struct regex;
1019
1020typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1021
1022typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1023
1024enum regex_type {
1025 MATCH_FULL = 0,
1026 MATCH_FRONT_ONLY,
1027 MATCH_MIDDLE_ONLY,
1028 MATCH_END_ONLY,
1029};
1030
1031struct regex {
1032 char pattern[MAX_FILTER_STR_VAL];
1033 int len;
1034 int field_len;
1035 regex_match_func match;
1036};
1037
1038struct filter_pred {
1039 filter_pred_fn_t fn;
1040 u64 val;
1041 struct regex regex;
1042 unsigned short *ops;
1043 struct ftrace_event_field *field;
1044 int offset;
1045 int not;
1046 int op;
1047 unsigned short index;
1048 unsigned short parent;
1049 unsigned short left;
1050 unsigned short right;
1051};
1052
1053extern enum regex_type
1054filter_parse_regex(char *buff, int len, char **search, int *not);
1055extern void print_event_filter(struct ftrace_event_file *file,
1056 struct trace_seq *s);
1057extern int apply_event_filter(struct ftrace_event_file *file,
1058 char *filter_string);
1059extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1060 char *filter_string);
1061extern void print_subsystem_event_filter(struct event_subsystem *system,
1062 struct trace_seq *s);
1063extern int filter_assign_type(const char *type);
1064extern int create_event_filter(struct ftrace_event_call *call,
1065 char *filter_str, bool set_str,
1066 struct event_filter **filterp);
1067extern void free_event_filter(struct event_filter *filter);
1068
1069struct ftrace_event_field *
1070trace_find_event_field(struct ftrace_event_call *call, char *name);
1071
1072extern void trace_event_enable_cmd_record(bool enable);
1073extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1074extern int event_trace_del_tracer(struct trace_array *tr);
1075
1076extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1077 const char *system,
1078 const char *event);
1079
1080static inline void *event_file_data(struct file *filp)
1081{
1082 return ACCESS_ONCE(file_inode(filp)->i_private);
1083}
1084
1085extern struct mutex event_mutex;
1086extern struct list_head ftrace_events;
1087
1088extern const struct file_operations event_trigger_fops;
1089
1090extern int register_trigger_cmds(void);
1091extern void clear_event_triggers(struct trace_array *tr);
1092
1093struct event_trigger_data {
1094 unsigned long count;
1095 int ref;
1096 struct event_trigger_ops *ops;
1097 struct event_command *cmd_ops;
1098 struct event_filter __rcu *filter;
1099 char *filter_str;
1100 void *private_data;
1101 struct list_head list;
1102};
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140struct event_trigger_ops {
1141 void (*func)(struct event_trigger_data *data);
1142 int (*init)(struct event_trigger_ops *ops,
1143 struct event_trigger_data *data);
1144 void (*free)(struct event_trigger_ops *ops,
1145 struct event_trigger_data *data);
1146 int (*print)(struct seq_file *m,
1147 struct event_trigger_ops *ops,
1148 struct event_trigger_data *data);
1149};
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239struct event_command {
1240 struct list_head list;
1241 char *name;
1242 enum event_trigger_type trigger_type;
1243 bool post_trigger;
1244 int (*func)(struct event_command *cmd_ops,
1245 struct ftrace_event_file *file,
1246 char *glob, char *cmd, char *params);
1247 int (*reg)(char *glob,
1248 struct event_trigger_ops *ops,
1249 struct event_trigger_data *data,
1250 struct ftrace_event_file *file);
1251 void (*unreg)(char *glob,
1252 struct event_trigger_ops *ops,
1253 struct event_trigger_data *data,
1254 struct ftrace_event_file *file);
1255 int (*set_filter)(char *filter_str,
1256 struct event_trigger_data *data,
1257 struct ftrace_event_file *file);
1258 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1259};
1260
1261extern int trace_event_enable_disable(struct ftrace_event_file *file,
1262 int enable, int soft_disable);
1263extern int tracing_alloc_snapshot(void);
1264
1265extern const char *__start___trace_bprintk_fmt[];
1266extern const char *__stop___trace_bprintk_fmt[];
1267
1268extern const char *__start___tracepoint_str[];
1269extern const char *__stop___tracepoint_str[];
1270
1271void trace_printk_init_buffers(void);
1272void trace_printk_start_comm(void);
1273int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1274int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1286
1287#undef FTRACE_ENTRY
1288#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1289 extern struct ftrace_event_call \
1290 __aligned(4) event_##call;
1291#undef FTRACE_ENTRY_DUP
1292#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1293 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1294 filter)
1295#include "trace_entries.h"
1296
1297#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1298int perf_ftrace_event_register(struct ftrace_event_call *call,
1299 enum trace_reg type, void *data);
1300#else
1301#define perf_ftrace_event_register NULL
1302#endif
1303
1304#ifdef CONFIG_FTRACE_SYSCALLS
1305void init_ftrace_syscalls(void);
1306#else
1307static inline void init_ftrace_syscalls(void) { }
1308#endif
1309
1310#ifdef CONFIG_EVENT_TRACING
1311void trace_event_init(void);
1312void trace_event_enum_update(struct trace_enum_map **map, int len);
1313#else
1314static inline void __init trace_event_init(void) { }
1315static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1316#endif
1317
1318extern struct trace_iterator *tracepoint_print_iter;
1319
1320#endif
1321