1
2#ifndef _LINUX_KERNEL_TRACE_H
3#define _LINUX_KERNEL_TRACE_H
4
5#include <linux/fs.h>
6#include <linux/atomic.h>
7#include <linux/sched.h>
8#include <linux/clocksource.h>
9#include <linux/ring_buffer.h>
10#include <linux/mmiotrace.h>
11#include <linux/tracepoint.h>
12#include <linux/ftrace.h>
13#include <linux/hw_breakpoint.h>
14#include <linux/trace_seq.h>
15#include <linux/trace_events.h>
16#include <linux/compiler.h>
17#include <linux/trace_seq.h>
18
19#ifdef CONFIG_FTRACE_SYSCALLS
20#include <asm/unistd.h>
21#include <asm/syscall.h>
22#endif
23
24enum trace_type {
25 __TRACE_FIRST_TYPE = 0,
26
27 TRACE_FN,
28 TRACE_CTX,
29 TRACE_WAKE,
30 TRACE_STACK,
31 TRACE_PRINT,
32 TRACE_BPRINT,
33 TRACE_MMIO_RW,
34 TRACE_MMIO_MAP,
35 TRACE_BRANCH,
36 TRACE_GRAPH_RET,
37 TRACE_GRAPH_ENT,
38 TRACE_USER_STACK,
39 TRACE_BLK,
40 TRACE_BPUTS,
41
42 __TRACE_LAST_TYPE,
43};
44
45
46#undef __field
47#define __field(type, item) type item;
48
49#undef __field_struct
50#define __field_struct(type, item) __field(type, item)
51
52#undef __field_desc
53#define __field_desc(type, container, item)
54
55#undef __array
56#define __array(type, item, size) type item[size];
57
58#undef __array_desc
59#define __array_desc(type, container, item, size)
60
61#undef __dynamic_array
62#define __dynamic_array(type, item) type item[];
63
64#undef F_STRUCT
65#define F_STRUCT(args...) args
66
67#undef FTRACE_ENTRY
68#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
69 struct struct_name { \
70 struct trace_entry ent; \
71 tstruct \
72 }
73
74#undef FTRACE_ENTRY_DUP
75#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
76
77#undef FTRACE_ENTRY_REG
78#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
79 filter, regfn) \
80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
81 filter)
82
83#include "trace_entries.h"
84
85
86
87
88
89struct syscall_trace_enter {
90 struct trace_entry ent;
91 int nr;
92 unsigned long args[];
93};
94
95struct syscall_trace_exit {
96 struct trace_entry ent;
97 int nr;
98 long ret;
99};
100
101struct kprobe_trace_entry_head {
102 struct trace_entry ent;
103 unsigned long ip;
104};
105
106struct kretprobe_trace_entry_head {
107 struct trace_entry ent;
108 unsigned long func;
109 unsigned long ret_ip;
110};
111
112
113
114
115
116
117
118
119
120
121enum trace_flag_type {
122 TRACE_FLAG_IRQS_OFF = 0x01,
123 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
124 TRACE_FLAG_NEED_RESCHED = 0x04,
125 TRACE_FLAG_HARDIRQ = 0x08,
126 TRACE_FLAG_SOFTIRQ = 0x10,
127 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
128 TRACE_FLAG_NMI = 0x40,
129};
130
131#define TRACE_BUF_SIZE 1024
132
133struct trace_array;
134
135
136
137
138
139
140struct trace_array_cpu {
141 atomic_t disabled;
142 void *buffer_page;
143
144 unsigned long entries;
145 unsigned long saved_latency;
146 unsigned long critical_start;
147 unsigned long critical_end;
148 unsigned long critical_sequence;
149 unsigned long nice;
150 unsigned long policy;
151 unsigned long rt_priority;
152 unsigned long skipped_entries;
153 cycle_t preempt_timestamp;
154 pid_t pid;
155 kuid_t uid;
156 char comm[TASK_COMM_LEN];
157
158 bool ignore_pid;
159};
160
161struct tracer;
162struct trace_option_dentry;
163
164struct trace_buffer {
165 struct trace_array *tr;
166 struct ring_buffer *buffer;
167 struct trace_array_cpu __percpu *data;
168 cycle_t time_start;
169 int cpu;
170};
171
172#define TRACE_FLAGS_MAX_SIZE 32
173
174struct trace_options {
175 struct tracer *tracer;
176 struct trace_option_dentry *topts;
177};
178
179struct trace_pid_list {
180 unsigned int nr_pids;
181 int order;
182 pid_t *pids;
183};
184
185
186
187
188
189
190struct trace_array {
191 struct list_head list;
192 char *name;
193 struct trace_buffer trace_buffer;
194#ifdef CONFIG_TRACER_MAX_TRACE
195
196
197
198
199
200
201
202
203
204
205
206 struct trace_buffer max_buffer;
207 bool allocated_snapshot;
208 unsigned long max_latency;
209#endif
210 struct trace_pid_list __rcu *filtered_pids;
211
212
213
214
215
216
217
218
219
220
221
222
223
224 arch_spinlock_t max_lock;
225 int buffer_disabled;
226#ifdef CONFIG_FTRACE_SYSCALLS
227 int sys_refcount_enter;
228 int sys_refcount_exit;
229 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
230 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
231#endif
232 int stop_count;
233 int clock_id;
234 int nr_topts;
235 struct tracer *current_trace;
236 unsigned int trace_flags;
237 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
238 unsigned int flags;
239 raw_spinlock_t start_lock;
240 struct dentry *dir;
241 struct dentry *options;
242 struct dentry *percpu_dir;
243 struct dentry *event_dir;
244 struct trace_options *topts;
245 struct list_head systems;
246 struct list_head events;
247 cpumask_var_t tracing_cpumask;
248 int ref;
249#ifdef CONFIG_FUNCTION_TRACER
250 struct ftrace_ops *ops;
251
252 int function_enabled;
253#endif
254};
255
256enum {
257 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
258};
259
260extern struct list_head ftrace_trace_arrays;
261
262extern struct mutex trace_types_lock;
263
264extern int trace_array_get(struct trace_array *tr);
265extern void trace_array_put(struct trace_array *tr);
266
267
268
269
270
271static inline struct trace_array *top_trace_array(void)
272{
273 struct trace_array *tr;
274
275 if (list_empty(&ftrace_trace_arrays))
276 return NULL;
277
278 tr = list_entry(ftrace_trace_arrays.prev,
279 typeof(*tr), list);
280 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
281 return tr;
282}
283
284#define FTRACE_CMP_TYPE(var, type) \
285 __builtin_types_compatible_p(typeof(var), type *)
286
287#undef IF_ASSIGN
288#define IF_ASSIGN(var, entry, etype, id) \
289 if (FTRACE_CMP_TYPE(var, etype)) { \
290 var = (typeof(var))(entry); \
291 WARN_ON(id && (entry)->type != id); \
292 break; \
293 }
294
295
296extern void __ftrace_bad_type(void);
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311#define trace_assign_type(var, ent) \
312 do { \
313 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
314 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
315 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
316 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
317 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
318 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
319 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
320 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
321 TRACE_MMIO_RW); \
322 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
323 TRACE_MMIO_MAP); \
324 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
325 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
326 TRACE_GRAPH_ENT); \
327 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
328 TRACE_GRAPH_RET); \
329 __ftrace_bad_type(); \
330 } while (0)
331
332
333
334
335
336
337struct tracer_opt {
338 const char *name;
339 u32 bit;
340};
341
342
343
344
345
346struct tracer_flags {
347 u32 val;
348 struct tracer_opt *opts;
349 struct tracer *trace;
350};
351
352
353#define TRACER_OPT(s, b) .name = #s, .bit = b
354
355
356struct trace_option_dentry {
357 struct tracer_opt *opt;
358 struct tracer_flags *flags;
359 struct trace_array *tr;
360 struct dentry *entry;
361};
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383struct tracer {
384 const char *name;
385 int (*init)(struct trace_array *tr);
386 void (*reset)(struct trace_array *tr);
387 void (*start)(struct trace_array *tr);
388 void (*stop)(struct trace_array *tr);
389 int (*update_thresh)(struct trace_array *tr);
390 void (*open)(struct trace_iterator *iter);
391 void (*pipe_open)(struct trace_iterator *iter);
392 void (*close)(struct trace_iterator *iter);
393 void (*pipe_close)(struct trace_iterator *iter);
394 ssize_t (*read)(struct trace_iterator *iter,
395 struct file *filp, char __user *ubuf,
396 size_t cnt, loff_t *ppos);
397 ssize_t (*splice_read)(struct trace_iterator *iter,
398 struct file *filp,
399 loff_t *ppos,
400 struct pipe_inode_info *pipe,
401 size_t len,
402 unsigned int flags);
403#ifdef CONFIG_FTRACE_STARTUP_TEST
404 int (*selftest)(struct tracer *trace,
405 struct trace_array *tr);
406#endif
407 void (*print_header)(struct seq_file *m);
408 enum print_line_t (*print_line)(struct trace_iterator *iter);
409
410 int (*set_flag)(struct trace_array *tr,
411 u32 old_flags, u32 bit, int set);
412
413 int (*flag_changed)(struct trace_array *tr,
414 u32 mask, int set);
415 struct tracer *next;
416 struct tracer_flags *flags;
417 int enabled;
418 int ref;
419 bool print_max;
420 bool allow_instances;
421#ifdef CONFIG_TRACER_MAX_TRACE
422 bool use_max_tr;
423#endif
424};
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454enum {
455 TRACE_BUFFER_BIT,
456 TRACE_BUFFER_NMI_BIT,
457 TRACE_BUFFER_IRQ_BIT,
458 TRACE_BUFFER_SIRQ_BIT,
459
460
461 TRACE_FTRACE_BIT,
462 TRACE_FTRACE_NMI_BIT,
463 TRACE_FTRACE_IRQ_BIT,
464 TRACE_FTRACE_SIRQ_BIT,
465
466
467 TRACE_INTERNAL_BIT,
468 TRACE_INTERNAL_NMI_BIT,
469 TRACE_INTERNAL_IRQ_BIT,
470 TRACE_INTERNAL_SIRQ_BIT,
471
472 TRACE_BRANCH_BIT,
473
474
475
476
477
478
479
480 TRACE_IRQ_BIT,
481};
482
483#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
484#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
485#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
486
487#define TRACE_CONTEXT_BITS 4
488
489#define TRACE_FTRACE_START TRACE_FTRACE_BIT
490#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
491
492#define TRACE_LIST_START TRACE_INTERNAL_BIT
493#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
494
495#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
496
497static __always_inline int trace_get_context_bit(void)
498{
499 int bit;
500
501 if (in_interrupt()) {
502 if (in_nmi())
503 bit = 0;
504
505 else if (in_irq())
506 bit = 1;
507 else
508 bit = 2;
509 } else
510 bit = 3;
511
512 return bit;
513}
514
515static __always_inline int trace_test_and_set_recursion(int start, int max)
516{
517 unsigned int val = current->trace_recursion;
518 int bit;
519
520
521 if ((val & TRACE_CONTEXT_MASK) > max)
522 return 0;
523
524 bit = trace_get_context_bit() + start;
525 if (unlikely(val & (1 << bit)))
526 return -1;
527
528 val |= 1 << bit;
529 current->trace_recursion = val;
530 barrier();
531
532 return bit;
533}
534
535static __always_inline void trace_clear_recursion(int bit)
536{
537 unsigned int val = current->trace_recursion;
538
539 if (!bit)
540 return;
541
542 bit = 1 << bit;
543 val &= ~bit;
544
545 barrier();
546 current->trace_recursion = val;
547}
548
549static inline struct ring_buffer_iter *
550trace_buffer_iter(struct trace_iterator *iter, int cpu)
551{
552 if (iter->buffer_iter && iter->buffer_iter[cpu])
553 return iter->buffer_iter[cpu];
554 return NULL;
555}
556
557int tracer_init(struct tracer *t, struct trace_array *tr);
558int tracing_is_enabled(void);
559void tracing_reset(struct trace_buffer *buf, int cpu);
560void tracing_reset_online_cpus(struct trace_buffer *buf);
561void tracing_reset_current(int cpu);
562void tracing_reset_all_online_cpus(void);
563int tracing_open_generic(struct inode *inode, struct file *filp);
564bool tracing_is_disabled(void);
565struct dentry *trace_create_file(const char *name,
566 umode_t mode,
567 struct dentry *parent,
568 void *data,
569 const struct file_operations *fops);
570
571struct dentry *tracing_init_dentry(void);
572
573struct ring_buffer_event;
574
575struct ring_buffer_event *
576trace_buffer_lock_reserve(struct ring_buffer *buffer,
577 int type,
578 unsigned long len,
579 unsigned long flags,
580 int pc);
581
582struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
583 struct trace_array_cpu *data);
584
585struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
586 int *ent_cpu, u64 *ent_ts);
587
588void __buffer_unlock_commit(struct ring_buffer *buffer,
589 struct ring_buffer_event *event);
590
591int trace_empty(struct trace_iterator *iter);
592
593void *trace_find_next_entry_inc(struct trace_iterator *iter);
594
595void trace_init_global_iter(struct trace_iterator *iter);
596
597void tracing_iter_reset(struct trace_iterator *iter, int cpu);
598
599void trace_function(struct trace_array *tr,
600 unsigned long ip,
601 unsigned long parent_ip,
602 unsigned long flags, int pc);
603void trace_graph_function(struct trace_array *tr,
604 unsigned long ip,
605 unsigned long parent_ip,
606 unsigned long flags, int pc);
607void trace_latency_header(struct seq_file *m);
608void trace_default_header(struct seq_file *m);
609void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
610int trace_empty(struct trace_iterator *iter);
611
612void trace_graph_return(struct ftrace_graph_ret *trace);
613int trace_graph_entry(struct ftrace_graph_ent *trace);
614void set_graph_array(struct trace_array *tr);
615
616void tracing_start_cmdline_record(void);
617void tracing_stop_cmdline_record(void);
618int register_tracer(struct tracer *type);
619int is_tracing_stopped(void);
620
621loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
622
623extern cpumask_var_t __read_mostly tracing_buffer_mask;
624
625#define for_each_tracing_cpu(cpu) \
626 for_each_cpu(cpu, tracing_buffer_mask)
627
628extern unsigned long nsecs_to_usecs(unsigned long nsecs);
629
630extern unsigned long tracing_thresh;
631
632#ifdef CONFIG_TRACER_MAX_TRACE
633void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
634void update_max_tr_single(struct trace_array *tr,
635 struct task_struct *tsk, int cpu);
636#endif
637
638#ifdef CONFIG_STACKTRACE
639void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
640 int pc);
641
642void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
643 int pc);
644#else
645static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
646 unsigned long flags, int pc)
647{
648}
649
650static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
651 int skip, int pc)
652{
653}
654#endif
655
656extern cycle_t ftrace_now(int cpu);
657
658extern void trace_find_cmdline(int pid, char comm[]);
659
660#ifdef CONFIG_DYNAMIC_FTRACE
661extern unsigned long ftrace_update_tot_cnt;
662#endif
663#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
664extern int DYN_FTRACE_TEST_NAME(void);
665#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
666extern int DYN_FTRACE_TEST_NAME2(void);
667
668extern bool ring_buffer_expanded;
669extern bool tracing_selftest_disabled;
670
671#ifdef CONFIG_FTRACE_STARTUP_TEST
672extern int trace_selftest_startup_function(struct tracer *trace,
673 struct trace_array *tr);
674extern int trace_selftest_startup_function_graph(struct tracer *trace,
675 struct trace_array *tr);
676extern int trace_selftest_startup_irqsoff(struct tracer *trace,
677 struct trace_array *tr);
678extern int trace_selftest_startup_preemptoff(struct tracer *trace,
679 struct trace_array *tr);
680extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
681 struct trace_array *tr);
682extern int trace_selftest_startup_wakeup(struct tracer *trace,
683 struct trace_array *tr);
684extern int trace_selftest_startup_nop(struct tracer *trace,
685 struct trace_array *tr);
686extern int trace_selftest_startup_sched_switch(struct tracer *trace,
687 struct trace_array *tr);
688extern int trace_selftest_startup_branch(struct tracer *trace,
689 struct trace_array *tr);
690
691
692
693
694
695#define __tracer_data __refdata
696#else
697
698#define __tracer_data __read_mostly
699#endif
700
701extern void *head_page(struct trace_array_cpu *data);
702extern unsigned long long ns2usecs(cycle_t nsec);
703extern int
704trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
705extern int
706trace_vprintk(unsigned long ip, const char *fmt, va_list args);
707extern int
708trace_array_vprintk(struct trace_array *tr,
709 unsigned long ip, const char *fmt, va_list args);
710int trace_array_printk(struct trace_array *tr,
711 unsigned long ip, const char *fmt, ...);
712int trace_array_printk_buf(struct ring_buffer *buffer,
713 unsigned long ip, const char *fmt, ...);
714void trace_printk_seq(struct trace_seq *s);
715enum print_line_t print_trace_line(struct trace_iterator *iter);
716
717extern char trace_find_mark(unsigned long long duration);
718
719
720#ifdef CONFIG_FUNCTION_GRAPH_TRACER
721
722
723#define TRACE_GRAPH_PRINT_OVERRUN 0x1
724#define TRACE_GRAPH_PRINT_CPU 0x2
725#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
726#define TRACE_GRAPH_PRINT_PROC 0x8
727#define TRACE_GRAPH_PRINT_DURATION 0x10
728#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
729#define TRACE_GRAPH_PRINT_IRQS 0x40
730#define TRACE_GRAPH_PRINT_TAIL 0x80
731#define TRACE_GRAPH_SLEEP_TIME 0x100
732#define TRACE_GRAPH_GRAPH_TIME 0x200
733#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
734#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
735
736extern void ftrace_graph_sleep_time_control(bool enable);
737extern void ftrace_graph_graph_time_control(bool enable);
738
739extern enum print_line_t
740print_graph_function_flags(struct trace_iterator *iter, u32 flags);
741extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
742extern void
743trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
744extern void graph_trace_open(struct trace_iterator *iter);
745extern void graph_trace_close(struct trace_iterator *iter);
746extern int __trace_graph_entry(struct trace_array *tr,
747 struct ftrace_graph_ent *trace,
748 unsigned long flags, int pc);
749extern void __trace_graph_return(struct trace_array *tr,
750 struct ftrace_graph_ret *trace,
751 unsigned long flags, int pc);
752
753
754#ifdef CONFIG_DYNAMIC_FTRACE
755
756#define FTRACE_GRAPH_MAX_FUNCS 32
757extern int ftrace_graph_count;
758extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
759extern int ftrace_graph_notrace_count;
760extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
761
762static inline int ftrace_graph_addr(unsigned long addr)
763{
764 int i;
765
766 if (!ftrace_graph_count)
767 return 1;
768
769 for (i = 0; i < ftrace_graph_count; i++) {
770 if (addr == ftrace_graph_funcs[i]) {
771
772
773
774
775
776 if (in_irq())
777 trace_recursion_set(TRACE_IRQ_BIT);
778 else
779 trace_recursion_clear(TRACE_IRQ_BIT);
780 return 1;
781 }
782 }
783
784 return 0;
785}
786
787static inline int ftrace_graph_notrace_addr(unsigned long addr)
788{
789 int i;
790
791 if (!ftrace_graph_notrace_count)
792 return 0;
793
794 for (i = 0; i < ftrace_graph_notrace_count; i++) {
795 if (addr == ftrace_graph_notrace_funcs[i])
796 return 1;
797 }
798
799 return 0;
800}
801#else
802static inline int ftrace_graph_addr(unsigned long addr)
803{
804 return 1;
805}
806
807static inline int ftrace_graph_notrace_addr(unsigned long addr)
808{
809 return 0;
810}
811#endif
812#else
813static inline enum print_line_t
814print_graph_function_flags(struct trace_iterator *iter, u32 flags)
815{
816 return TRACE_TYPE_UNHANDLED;
817}
818#endif
819
820extern struct list_head ftrace_pids;
821
822#ifdef CONFIG_FUNCTION_TRACER
823extern bool ftrace_filter_param __initdata;
824static inline int ftrace_trace_task(struct task_struct *task)
825{
826 if (list_empty(&ftrace_pids))
827 return 1;
828
829 return test_tsk_trace_trace(task);
830}
831extern int ftrace_is_dead(void);
832int ftrace_create_function_files(struct trace_array *tr,
833 struct dentry *parent);
834void ftrace_destroy_function_files(struct trace_array *tr);
835void ftrace_init_global_array_ops(struct trace_array *tr);
836void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
837void ftrace_reset_array_ops(struct trace_array *tr);
838int using_ftrace_ops_list_func(void);
839#else
840static inline int ftrace_trace_task(struct task_struct *task)
841{
842 return 1;
843}
844static inline int ftrace_is_dead(void) { return 0; }
845static inline int
846ftrace_create_function_files(struct trace_array *tr,
847 struct dentry *parent)
848{
849 return 0;
850}
851static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
852static inline __init void
853ftrace_init_global_array_ops(struct trace_array *tr) { }
854static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
855
856#define ftrace_init_array_ops(tr, func) do { } while (0)
857#endif
858
859#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
860void ftrace_create_filter_files(struct ftrace_ops *ops,
861 struct dentry *parent);
862void ftrace_destroy_filter_files(struct ftrace_ops *ops);
863#else
864
865
866
867
868#define ftrace_create_filter_files(ops, parent) do { } while (0)
869#define ftrace_destroy_filter_files(ops) do { } while (0)
870#endif
871
872bool ftrace_event_is_function(struct trace_event_call *call);
873
874
875
876
877
878
879
880
881struct trace_parser {
882 bool cont;
883 char *buffer;
884 unsigned idx;
885 unsigned size;
886};
887
888static inline bool trace_parser_loaded(struct trace_parser *parser)
889{
890 return (parser->idx != 0);
891}
892
893static inline bool trace_parser_cont(struct trace_parser *parser)
894{
895 return parser->cont;
896}
897
898static inline void trace_parser_clear(struct trace_parser *parser)
899{
900 parser->cont = false;
901 parser->idx = 0;
902}
903
904extern int trace_parser_get_init(struct trace_parser *parser, int size);
905extern void trace_parser_put(struct trace_parser *parser);
906extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
907 size_t cnt, loff_t *ppos);
908
909
910
911
912#ifdef CONFIG_FUNCTION_GRAPH_TRACER
913# define FGRAPH_FLAGS \
914 C(DISPLAY_GRAPH, "display-graph"),
915#else
916# define FGRAPH_FLAGS
917#endif
918
919#ifdef CONFIG_BRANCH_TRACER
920# define BRANCH_FLAGS \
921 C(BRANCH, "branch"),
922#else
923# define BRANCH_FLAGS
924#endif
925
926#ifdef CONFIG_FUNCTION_TRACER
927# define FUNCTION_FLAGS \
928 C(FUNCTION, "function-trace"),
929# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
930#else
931# define FUNCTION_FLAGS
932# define FUNCTION_DEFAULT_FLAGS 0UL
933#endif
934
935#ifdef CONFIG_STACKTRACE
936# define STACK_FLAGS \
937 C(STACKTRACE, "stacktrace"),
938#else
939# define STACK_FLAGS
940#endif
941
942
943
944
945
946
947
948
949#define TRACE_FLAGS \
950 C(PRINT_PARENT, "print-parent"), \
951 C(SYM_OFFSET, "sym-offset"), \
952 C(SYM_ADDR, "sym-addr"), \
953 C(VERBOSE, "verbose"), \
954 C(RAW, "raw"), \
955 C(HEX, "hex"), \
956 C(BIN, "bin"), \
957 C(BLOCK, "block"), \
958 C(PRINTK, "trace_printk"), \
959 C(ANNOTATE, "annotate"), \
960 C(USERSTACKTRACE, "userstacktrace"), \
961 C(SYM_USEROBJ, "sym-userobj"), \
962 C(PRINTK_MSGONLY, "printk-msg-only"), \
963 C(CONTEXT_INFO, "context-info"), \
964 C(LATENCY_FMT, "latency-format"), \
965 C(RECORD_CMD, "record-cmd"), \
966 C(OVERWRITE, "overwrite"), \
967 C(STOP_ON_FREE, "disable_on_free"), \
968 C(IRQ_INFO, "irq-info"), \
969 C(MARKERS, "markers"), \
970 FUNCTION_FLAGS \
971 FGRAPH_FLAGS \
972 STACK_FLAGS \
973 BRANCH_FLAGS
974
975
976
977
978
979#undef C
980#define C(a, b) TRACE_ITER_##a##_BIT
981
982enum trace_iterator_bits {
983 TRACE_FLAGS
984
985 TRACE_ITER_LAST_BIT
986};
987
988
989
990
991
992#undef C
993#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
994
995enum trace_iterator_flags { TRACE_FLAGS };
996
997
998
999
1000
1001#define TRACE_ITER_SYM_MASK \
1002 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1003
1004extern struct tracer nop_trace;
1005
1006#ifdef CONFIG_BRANCH_TRACER
1007extern int enable_branch_tracing(struct trace_array *tr);
1008extern void disable_branch_tracing(void);
1009static inline int trace_branch_enable(struct trace_array *tr)
1010{
1011 if (tr->trace_flags & TRACE_ITER_BRANCH)
1012 return enable_branch_tracing(tr);
1013 return 0;
1014}
1015static inline void trace_branch_disable(void)
1016{
1017
1018 disable_branch_tracing();
1019}
1020#else
1021static inline int trace_branch_enable(struct trace_array *tr)
1022{
1023 return 0;
1024}
1025static inline void trace_branch_disable(void)
1026{
1027}
1028#endif
1029
1030
1031int tracing_update_buffers(void);
1032
1033struct ftrace_event_field {
1034 struct list_head link;
1035 const char *name;
1036 const char *type;
1037 int filter_type;
1038 int offset;
1039 int size;
1040 int is_signed;
1041};
1042
1043struct event_filter {
1044 int n_preds;
1045 int a_preds;
1046 struct filter_pred *preds;
1047 struct filter_pred *root;
1048 char *filter_string;
1049};
1050
1051struct event_subsystem {
1052 struct list_head list;
1053 const char *name;
1054 struct event_filter *filter;
1055 int ref_count;
1056};
1057
1058struct trace_subsystem_dir {
1059 struct list_head list;
1060 struct event_subsystem *subsystem;
1061 struct trace_array *tr;
1062 struct dentry *entry;
1063 int ref_count;
1064 int nr_events;
1065};
1066
1067#define FILTER_PRED_INVALID ((unsigned short)-1)
1068#define FILTER_PRED_IS_RIGHT (1 << 15)
1069#define FILTER_PRED_FOLD (1 << 15)
1070
1071
1072
1073
1074
1075
1076
1077
1078#define MAX_FILTER_PRED 16384
1079
1080struct filter_pred;
1081struct regex;
1082
1083typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1084
1085typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1086
1087enum regex_type {
1088 MATCH_FULL = 0,
1089 MATCH_FRONT_ONLY,
1090 MATCH_MIDDLE_ONLY,
1091 MATCH_END_ONLY,
1092};
1093
1094struct regex {
1095 char pattern[MAX_FILTER_STR_VAL];
1096 int len;
1097 int field_len;
1098 regex_match_func match;
1099};
1100
1101struct filter_pred {
1102 filter_pred_fn_t fn;
1103 u64 val;
1104 struct regex regex;
1105 unsigned short *ops;
1106 struct ftrace_event_field *field;
1107 int offset;
1108 int not;
1109 int op;
1110 unsigned short index;
1111 unsigned short parent;
1112 unsigned short left;
1113 unsigned short right;
1114};
1115
1116static inline bool is_string_field(struct ftrace_event_field *field)
1117{
1118 return field->filter_type == FILTER_DYN_STRING ||
1119 field->filter_type == FILTER_STATIC_STRING ||
1120 field->filter_type == FILTER_PTR_STRING;
1121}
1122
1123static inline bool is_function_field(struct ftrace_event_field *field)
1124{
1125 return field->filter_type == FILTER_TRACE_FN;
1126}
1127
1128extern enum regex_type
1129filter_parse_regex(char *buff, int len, char **search, int *not);
1130extern void print_event_filter(struct trace_event_file *file,
1131 struct trace_seq *s);
1132extern int apply_event_filter(struct trace_event_file *file,
1133 char *filter_string);
1134extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1135 char *filter_string);
1136extern void print_subsystem_event_filter(struct event_subsystem *system,
1137 struct trace_seq *s);
1138extern int filter_assign_type(const char *type);
1139extern int create_event_filter(struct trace_event_call *call,
1140 char *filter_str, bool set_str,
1141 struct event_filter **filterp);
1142extern void free_event_filter(struct event_filter *filter);
1143
1144struct ftrace_event_field *
1145trace_find_event_field(struct trace_event_call *call, char *name);
1146
1147extern void trace_event_enable_cmd_record(bool enable);
1148extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1149extern int event_trace_del_tracer(struct trace_array *tr);
1150
1151extern struct trace_event_file *find_event_file(struct trace_array *tr,
1152 const char *system,
1153 const char *event);
1154
1155static inline void *event_file_data(struct file *filp)
1156{
1157 return ACCESS_ONCE(file_inode(filp)->i_private);
1158}
1159
1160extern struct mutex event_mutex;
1161extern struct list_head ftrace_events;
1162
1163extern const struct file_operations event_trigger_fops;
1164
1165extern int register_trigger_cmds(void);
1166extern void clear_event_triggers(struct trace_array *tr);
1167
1168struct event_trigger_data {
1169 unsigned long count;
1170 int ref;
1171 struct event_trigger_ops *ops;
1172 struct event_command *cmd_ops;
1173 struct event_filter __rcu *filter;
1174 char *filter_str;
1175 void *private_data;
1176 bool paused;
1177 struct list_head list;
1178};
1179
1180extern void trigger_data_free(struct event_trigger_data *data);
1181extern int event_trigger_init(struct event_trigger_ops *ops,
1182 struct event_trigger_data *data);
1183extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1184 int trigger_enable);
1185extern void update_cond_flag(struct trace_event_file *file);
1186extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
1187 struct event_trigger_data *test,
1188 struct trace_event_file *file);
1189extern int set_trigger_filter(char *filter_str,
1190 struct event_trigger_data *trigger_data,
1191 struct trace_event_file *file);
1192extern int register_event_command(struct event_command *cmd);
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231struct event_trigger_ops {
1232 void (*func)(struct event_trigger_data *data,
1233 void *rec);
1234 int (*init)(struct event_trigger_ops *ops,
1235 struct event_trigger_data *data);
1236 void (*free)(struct event_trigger_ops *ops,
1237 struct event_trigger_data *data);
1238 int (*print)(struct seq_file *m,
1239 struct event_trigger_ops *ops,
1240 struct event_trigger_data *data);
1241};
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318struct event_command {
1319 struct list_head list;
1320 char *name;
1321 enum event_trigger_type trigger_type;
1322 int flags;
1323 int (*func)(struct event_command *cmd_ops,
1324 struct trace_event_file *file,
1325 char *glob, char *cmd, char *params);
1326 int (*reg)(char *glob,
1327 struct event_trigger_ops *ops,
1328 struct event_trigger_data *data,
1329 struct trace_event_file *file);
1330 void (*unreg)(char *glob,
1331 struct event_trigger_ops *ops,
1332 struct event_trigger_data *data,
1333 struct trace_event_file *file);
1334 void (*unreg_all)(struct trace_event_file *file);
1335 int (*set_filter)(char *filter_str,
1336 struct event_trigger_data *data,
1337 struct trace_event_file *file);
1338 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1339};
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369enum event_command_flags {
1370 EVENT_CMD_FL_POST_TRIGGER = 1,
1371 EVENT_CMD_FL_NEEDS_REC = 2,
1372};
1373
1374static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1375{
1376 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1377}
1378
1379static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1380{
1381 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1382}
1383
1384extern int trace_event_enable_disable(struct trace_event_file *file,
1385 int enable, int soft_disable);
1386extern int tracing_alloc_snapshot(void);
1387
1388extern const char *__start___trace_bprintk_fmt[];
1389extern const char *__stop___trace_bprintk_fmt[];
1390
1391extern const char *__start___tracepoint_str[];
1392extern const char *__stop___tracepoint_str[];
1393
1394void trace_printk_control(bool enabled);
1395void trace_printk_init_buffers(void);
1396void trace_printk_start_comm(void);
1397int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1398int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1410
1411#undef FTRACE_ENTRY
1412#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1413 extern struct trace_event_call \
1414 __aligned(4) event_##call;
1415#undef FTRACE_ENTRY_DUP
1416#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1417 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1418 filter)
1419#include "trace_entries.h"
1420
1421#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1422int perf_ftrace_event_register(struct trace_event_call *call,
1423 enum trace_reg type, void *data);
1424#else
1425#define perf_ftrace_event_register NULL
1426#endif
1427
1428#ifdef CONFIG_FTRACE_SYSCALLS
1429void init_ftrace_syscalls(void);
1430const char *get_syscall_name(int syscall);
1431#else
1432static inline void init_ftrace_syscalls(void) { }
1433static inline const char *get_syscall_name(int syscall)
1434{
1435 return NULL;
1436}
1437#endif
1438
1439#ifdef CONFIG_EVENT_TRACING
1440void trace_event_init(void);
1441void trace_event_enum_update(struct trace_enum_map **map, int len);
1442#else
1443static inline void __init trace_event_init(void) { }
1444static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1445#endif
1446
1447extern struct trace_iterator *tracepoint_print_iter;
1448
1449#endif
1450