1
2
3#ifndef _LINUX_KERNEL_TRACE_H
4#define _LINUX_KERNEL_TRACE_H
5
6#include <linux/fs.h>
7#include <linux/atomic.h>
8#include <linux/sched.h>
9#include <linux/clocksource.h>
10#include <linux/ring_buffer.h>
11#include <linux/mmiotrace.h>
12#include <linux/tracepoint.h>
13#include <linux/ftrace.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/trace_seq.h>
16#include <linux/trace_events.h>
17#include <linux/compiler.h>
18#include <linux/trace_seq.h>
19#include <linux/glob.h>
20
21#ifdef CONFIG_FTRACE_SYSCALLS
22#include <asm/unistd.h>
23#include <asm/syscall.h>
24#endif
25
26enum trace_type {
27 __TRACE_FIRST_TYPE = 0,
28
29 TRACE_FN,
30 TRACE_CTX,
31 TRACE_WAKE,
32 TRACE_STACK,
33 TRACE_PRINT,
34 TRACE_BPRINT,
35 TRACE_MMIO_RW,
36 TRACE_MMIO_MAP,
37 TRACE_BRANCH,
38 TRACE_GRAPH_RET,
39 TRACE_GRAPH_ENT,
40 TRACE_USER_STACK,
41 TRACE_BLK,
42 TRACE_BPUTS,
43 TRACE_HWLAT,
44 TRACE_RAW_DATA,
45
46 __TRACE_LAST_TYPE,
47};
48
49
50#undef __field
51#define __field(type, item) type item;
52
53#undef __field_struct
54#define __field_struct(type, item) __field(type, item)
55
56#undef __field_desc
57#define __field_desc(type, container, item)
58
59#undef __array
60#define __array(type, item, size) type item[size];
61
62#undef __array_desc
63#define __array_desc(type, container, item, size)
64
65#undef __dynamic_array
66#define __dynamic_array(type, item) type item[];
67
68#undef F_STRUCT
69#define F_STRUCT(args...) args
70
71#undef FTRACE_ENTRY
72#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
73 struct struct_name { \
74 struct trace_entry ent; \
75 tstruct \
76 }
77
78#undef FTRACE_ENTRY_DUP
79#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
80
81#undef FTRACE_ENTRY_REG
82#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
83 filter, regfn) \
84 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
85 filter)
86
87#undef FTRACE_ENTRY_PACKED
88#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
89 filter) \
90 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
91 filter) __packed
92
93#include "trace_entries.h"
94
95
96
97
98
99struct syscall_trace_enter {
100 struct trace_entry ent;
101 int nr;
102 unsigned long args[];
103};
104
105struct syscall_trace_exit {
106 struct trace_entry ent;
107 int nr;
108 long ret;
109};
110
111struct kprobe_trace_entry_head {
112 struct trace_entry ent;
113 unsigned long ip;
114};
115
116struct kretprobe_trace_entry_head {
117 struct trace_entry ent;
118 unsigned long func;
119 unsigned long ret_ip;
120};
121
122
123
124
125
126
127
128
129
130
131enum trace_flag_type {
132 TRACE_FLAG_IRQS_OFF = 0x01,
133 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
134 TRACE_FLAG_NEED_RESCHED = 0x04,
135 TRACE_FLAG_HARDIRQ = 0x08,
136 TRACE_FLAG_SOFTIRQ = 0x10,
137 TRACE_FLAG_PREEMPT_RESCHED = 0x20,
138 TRACE_FLAG_NMI = 0x40,
139};
140
141#define TRACE_BUF_SIZE 1024
142
143struct trace_array;
144
145
146
147
148
149
150struct trace_array_cpu {
151 atomic_t disabled;
152 void *buffer_page;
153
154 unsigned long entries;
155 unsigned long saved_latency;
156 unsigned long critical_start;
157 unsigned long critical_end;
158 unsigned long critical_sequence;
159 unsigned long nice;
160 unsigned long policy;
161 unsigned long rt_priority;
162 unsigned long skipped_entries;
163 u64 preempt_timestamp;
164 pid_t pid;
165 kuid_t uid;
166 char comm[TASK_COMM_LEN];
167
168 bool ignore_pid;
169#ifdef CONFIG_FUNCTION_TRACER
170 bool ftrace_ignore_pid;
171#endif
172};
173
174struct tracer;
175struct trace_option_dentry;
176
177struct trace_buffer {
178 struct trace_array *tr;
179 struct ring_buffer *buffer;
180 struct trace_array_cpu __percpu *data;
181 u64 time_start;
182 int cpu;
183};
184
185#define TRACE_FLAGS_MAX_SIZE 32
186
187struct trace_options {
188 struct tracer *tracer;
189 struct trace_option_dentry *topts;
190};
191
192struct trace_pid_list {
193 int pid_max;
194 unsigned long *pids;
195};
196
197
198
199
200
201
202struct trace_array {
203 struct list_head list;
204 char *name;
205 struct trace_buffer trace_buffer;
206#ifdef CONFIG_TRACER_MAX_TRACE
207
208
209
210
211
212
213
214
215
216
217
218 struct trace_buffer max_buffer;
219 bool allocated_snapshot;
220#endif
221#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
222 unsigned long max_latency;
223#endif
224 struct trace_pid_list __rcu *filtered_pids;
225
226
227
228
229
230
231
232
233
234
235
236
237
238 arch_spinlock_t max_lock;
239 int buffer_disabled;
240#ifdef CONFIG_FTRACE_SYSCALLS
241 int sys_refcount_enter;
242 int sys_refcount_exit;
243 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
244 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
245#endif
246 int stop_count;
247 int clock_id;
248 int nr_topts;
249 bool clear_trace;
250 struct tracer *current_trace;
251 unsigned int trace_flags;
252 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
253 unsigned int flags;
254 raw_spinlock_t start_lock;
255 struct dentry *dir;
256 struct dentry *options;
257 struct dentry *percpu_dir;
258 struct dentry *event_dir;
259 struct trace_options *topts;
260 struct list_head systems;
261 struct list_head events;
262 cpumask_var_t tracing_cpumask;
263 int ref;
264#ifdef CONFIG_FUNCTION_TRACER
265 struct ftrace_ops *ops;
266 struct trace_pid_list __rcu *function_pids;
267#ifdef CONFIG_DYNAMIC_FTRACE
268
269 struct list_head func_probes;
270 struct list_head mod_trace;
271 struct list_head mod_notrace;
272#endif
273
274 int function_enabled;
275#endif
276};
277
278enum {
279 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
280};
281
282extern struct list_head ftrace_trace_arrays;
283
284extern struct mutex trace_types_lock;
285
286extern int trace_array_get(struct trace_array *tr);
287extern void trace_array_put(struct trace_array *tr);
288
289
290
291
292
293static inline struct trace_array *top_trace_array(void)
294{
295 struct trace_array *tr;
296
297 if (list_empty(&ftrace_trace_arrays))
298 return NULL;
299
300 tr = list_entry(ftrace_trace_arrays.prev,
301 typeof(*tr), list);
302 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
303 return tr;
304}
305
306#define FTRACE_CMP_TYPE(var, type) \
307 __builtin_types_compatible_p(typeof(var), type *)
308
309#undef IF_ASSIGN
310#define IF_ASSIGN(var, entry, etype, id) \
311 if (FTRACE_CMP_TYPE(var, etype)) { \
312 var = (typeof(var))(entry); \
313 WARN_ON(id && (entry)->type != id); \
314 break; \
315 }
316
317
318extern void __ftrace_bad_type(void);
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333#define trace_assign_type(var, ent) \
334 do { \
335 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
336 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
337 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
338 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
339 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
340 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
341 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
342 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
343 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
344 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
345 TRACE_MMIO_RW); \
346 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
347 TRACE_MMIO_MAP); \
348 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
349 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
350 TRACE_GRAPH_ENT); \
351 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
352 TRACE_GRAPH_RET); \
353 __ftrace_bad_type(); \
354 } while (0)
355
356
357
358
359
360
361struct tracer_opt {
362 const char *name;
363 u32 bit;
364};
365
366
367
368
369
370struct tracer_flags {
371 u32 val;
372 struct tracer_opt *opts;
373 struct tracer *trace;
374};
375
376
377#define TRACER_OPT(s, b) .name = #s, .bit = b
378
379
380struct trace_option_dentry {
381 struct tracer_opt *opt;
382 struct tracer_flags *flags;
383 struct trace_array *tr;
384 struct dentry *entry;
385};
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407struct tracer {
408 const char *name;
409 int (*init)(struct trace_array *tr);
410 void (*reset)(struct trace_array *tr);
411 void (*start)(struct trace_array *tr);
412 void (*stop)(struct trace_array *tr);
413 int (*update_thresh)(struct trace_array *tr);
414 void (*open)(struct trace_iterator *iter);
415 void (*pipe_open)(struct trace_iterator *iter);
416 void (*close)(struct trace_iterator *iter);
417 void (*pipe_close)(struct trace_iterator *iter);
418 ssize_t (*read)(struct trace_iterator *iter,
419 struct file *filp, char __user *ubuf,
420 size_t cnt, loff_t *ppos);
421 ssize_t (*splice_read)(struct trace_iterator *iter,
422 struct file *filp,
423 loff_t *ppos,
424 struct pipe_inode_info *pipe,
425 size_t len,
426 unsigned int flags);
427#ifdef CONFIG_FTRACE_STARTUP_TEST
428 int (*selftest)(struct tracer *trace,
429 struct trace_array *tr);
430#endif
431 void (*print_header)(struct seq_file *m);
432 enum print_line_t (*print_line)(struct trace_iterator *iter);
433
434 int (*set_flag)(struct trace_array *tr,
435 u32 old_flags, u32 bit, int set);
436
437 int (*flag_changed)(struct trace_array *tr,
438 u32 mask, int set);
439 struct tracer *next;
440 struct tracer_flags *flags;
441 int enabled;
442 int ref;
443 bool print_max;
444 bool allow_instances;
445#ifdef CONFIG_TRACER_MAX_TRACE
446 bool use_max_tr;
447#endif
448
449 bool noboot;
450};
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480enum {
481 TRACE_BUFFER_BIT,
482 TRACE_BUFFER_NMI_BIT,
483 TRACE_BUFFER_IRQ_BIT,
484 TRACE_BUFFER_SIRQ_BIT,
485
486
487 TRACE_FTRACE_BIT,
488 TRACE_FTRACE_NMI_BIT,
489 TRACE_FTRACE_IRQ_BIT,
490 TRACE_FTRACE_SIRQ_BIT,
491
492
493 TRACE_INTERNAL_BIT,
494 TRACE_INTERNAL_NMI_BIT,
495 TRACE_INTERNAL_IRQ_BIT,
496 TRACE_INTERNAL_SIRQ_BIT,
497
498 TRACE_BRANCH_BIT,
499
500
501
502
503
504
505
506 TRACE_IRQ_BIT,
507};
508
509#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
510#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
511#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
512
513#define TRACE_CONTEXT_BITS 4
514
515#define TRACE_FTRACE_START TRACE_FTRACE_BIT
516#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
517
518#define TRACE_LIST_START TRACE_INTERNAL_BIT
519#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
520
521#define TRACE_CONTEXT_MASK TRACE_LIST_MAX
522
523static __always_inline int trace_get_context_bit(void)
524{
525 int bit;
526
527 if (in_interrupt()) {
528 if (in_nmi())
529 bit = 0;
530
531 else if (in_irq())
532 bit = 1;
533 else
534 bit = 2;
535 } else
536 bit = 3;
537
538 return bit;
539}
540
541static __always_inline int trace_test_and_set_recursion(int start, int max)
542{
543 unsigned int val = current->trace_recursion;
544 int bit;
545
546
547 if ((val & TRACE_CONTEXT_MASK) > max)
548 return 0;
549
550 bit = trace_get_context_bit() + start;
551 if (unlikely(val & (1 << bit)))
552 return -1;
553
554 val |= 1 << bit;
555 current->trace_recursion = val;
556 barrier();
557
558 return bit;
559}
560
561static __always_inline void trace_clear_recursion(int bit)
562{
563 unsigned int val = current->trace_recursion;
564
565 if (!bit)
566 return;
567
568 bit = 1 << bit;
569 val &= ~bit;
570
571 barrier();
572 current->trace_recursion = val;
573}
574
575static inline struct ring_buffer_iter *
576trace_buffer_iter(struct trace_iterator *iter, int cpu)
577{
578 if (iter->buffer_iter && iter->buffer_iter[cpu])
579 return iter->buffer_iter[cpu];
580 return NULL;
581}
582
583int tracer_init(struct tracer *t, struct trace_array *tr);
584int tracing_is_enabled(void);
585void tracing_reset(struct trace_buffer *buf, int cpu);
586void tracing_reset_online_cpus(struct trace_buffer *buf);
587void tracing_reset_current(int cpu);
588void tracing_reset_all_online_cpus(void);
589int tracing_open_generic(struct inode *inode, struct file *filp);
590bool tracing_is_disabled(void);
591int tracer_tracing_is_on(struct trace_array *tr);
592void tracer_tracing_on(struct trace_array *tr);
593void tracer_tracing_off(struct trace_array *tr);
594struct dentry *trace_create_file(const char *name,
595 umode_t mode,
596 struct dentry *parent,
597 void *data,
598 const struct file_operations *fops);
599
600struct dentry *tracing_init_dentry(void);
601
602struct ring_buffer_event;
603
604struct ring_buffer_event *
605trace_buffer_lock_reserve(struct ring_buffer *buffer,
606 int type,
607 unsigned long len,
608 unsigned long flags,
609 int pc);
610
611struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
612 struct trace_array_cpu *data);
613
614struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
615 int *ent_cpu, u64 *ent_ts);
616
617void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
618 struct ring_buffer_event *event);
619
620int trace_empty(struct trace_iterator *iter);
621
622void *trace_find_next_entry_inc(struct trace_iterator *iter);
623
624void trace_init_global_iter(struct trace_iterator *iter);
625
626void tracing_iter_reset(struct trace_iterator *iter, int cpu);
627
628void trace_function(struct trace_array *tr,
629 unsigned long ip,
630 unsigned long parent_ip,
631 unsigned long flags, int pc);
632void trace_graph_function(struct trace_array *tr,
633 unsigned long ip,
634 unsigned long parent_ip,
635 unsigned long flags, int pc);
636void trace_latency_header(struct seq_file *m);
637void trace_default_header(struct seq_file *m);
638void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
639int trace_empty(struct trace_iterator *iter);
640
641void trace_graph_return(struct ftrace_graph_ret *trace);
642int trace_graph_entry(struct ftrace_graph_ent *trace);
643void set_graph_array(struct trace_array *tr);
644
645void tracing_start_cmdline_record(void);
646void tracing_stop_cmdline_record(void);
647void tracing_start_tgid_record(void);
648void tracing_stop_tgid_record(void);
649
650int register_tracer(struct tracer *type);
651int is_tracing_stopped(void);
652
653loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
654
655extern cpumask_var_t __read_mostly tracing_buffer_mask;
656
657#define for_each_tracing_cpu(cpu) \
658 for_each_cpu(cpu, tracing_buffer_mask)
659
660extern unsigned long nsecs_to_usecs(unsigned long nsecs);
661
662extern unsigned long tracing_thresh;
663
664
665
666extern int pid_max;
667
668bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
669 pid_t search_pid);
670bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
671 struct task_struct *task);
672void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
673 struct task_struct *self,
674 struct task_struct *task);
675void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
676void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
677int trace_pid_show(struct seq_file *m, void *v);
678void trace_free_pid_list(struct trace_pid_list *pid_list);
679int trace_pid_write(struct trace_pid_list *filtered_pids,
680 struct trace_pid_list **new_pid_list,
681 const char __user *ubuf, size_t cnt);
682
683#ifdef CONFIG_TRACER_MAX_TRACE
684void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
685void update_max_tr_single(struct trace_array *tr,
686 struct task_struct *tsk, int cpu);
687#endif
688
689#ifdef CONFIG_STACKTRACE
690void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
691 int pc);
692
693void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
694 int pc);
695#else
696static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
697 unsigned long flags, int pc)
698{
699}
700
701static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
702 int skip, int pc)
703{
704}
705#endif
706
707extern u64 ftrace_now(int cpu);
708
709extern void trace_find_cmdline(int pid, char comm[]);
710extern int trace_find_tgid(int pid);
711extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
712
713#ifdef CONFIG_DYNAMIC_FTRACE
714extern unsigned long ftrace_update_tot_cnt;
715void ftrace_init_trace_array(struct trace_array *tr);
716#else
717static inline void ftrace_init_trace_array(struct trace_array *tr) { }
718#endif
719#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
720extern int DYN_FTRACE_TEST_NAME(void);
721#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
722extern int DYN_FTRACE_TEST_NAME2(void);
723
724extern bool ring_buffer_expanded;
725extern bool tracing_selftest_disabled;
726
727#ifdef CONFIG_FTRACE_STARTUP_TEST
728extern int trace_selftest_startup_function(struct tracer *trace,
729 struct trace_array *tr);
730extern int trace_selftest_startup_function_graph(struct tracer *trace,
731 struct trace_array *tr);
732extern int trace_selftest_startup_irqsoff(struct tracer *trace,
733 struct trace_array *tr);
734extern int trace_selftest_startup_preemptoff(struct tracer *trace,
735 struct trace_array *tr);
736extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
737 struct trace_array *tr);
738extern int trace_selftest_startup_wakeup(struct tracer *trace,
739 struct trace_array *tr);
740extern int trace_selftest_startup_nop(struct tracer *trace,
741 struct trace_array *tr);
742extern int trace_selftest_startup_branch(struct tracer *trace,
743 struct trace_array *tr);
744
745
746
747
748
749#define __tracer_data __refdata
750#else
751
752#define __tracer_data __read_mostly
753#endif
754
755extern void *head_page(struct trace_array_cpu *data);
756extern unsigned long long ns2usecs(u64 nsec);
757extern int
758trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
759extern int
760trace_vprintk(unsigned long ip, const char *fmt, va_list args);
761extern int
762trace_array_vprintk(struct trace_array *tr,
763 unsigned long ip, const char *fmt, va_list args);
764int trace_array_printk(struct trace_array *tr,
765 unsigned long ip, const char *fmt, ...);
766int trace_array_printk_buf(struct ring_buffer *buffer,
767 unsigned long ip, const char *fmt, ...);
768void trace_printk_seq(struct trace_seq *s);
769enum print_line_t print_trace_line(struct trace_iterator *iter);
770
771extern char trace_find_mark(unsigned long long duration);
772
773struct ftrace_hash;
774
775struct ftrace_mod_load {
776 struct list_head list;
777 char *func;
778 char *module;
779 int enable;
780};
781
782enum {
783 FTRACE_HASH_FL_MOD = (1 << 0),
784};
785
786struct ftrace_hash {
787 unsigned long size_bits;
788 struct hlist_head *buckets;
789 unsigned long count;
790 unsigned long flags;
791 struct rcu_head rcu;
792};
793
794struct ftrace_func_entry *
795ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
796
797static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
798{
799 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
800}
801
802
803#ifdef CONFIG_FUNCTION_GRAPH_TRACER
804
805
806#define TRACE_GRAPH_PRINT_OVERRUN 0x1
807#define TRACE_GRAPH_PRINT_CPU 0x2
808#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
809#define TRACE_GRAPH_PRINT_PROC 0x8
810#define TRACE_GRAPH_PRINT_DURATION 0x10
811#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
812#define TRACE_GRAPH_PRINT_IRQS 0x40
813#define TRACE_GRAPH_PRINT_TAIL 0x80
814#define TRACE_GRAPH_SLEEP_TIME 0x100
815#define TRACE_GRAPH_GRAPH_TIME 0x200
816#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
817#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
818
819extern void ftrace_graph_sleep_time_control(bool enable);
820extern void ftrace_graph_graph_time_control(bool enable);
821
822extern enum print_line_t
823print_graph_function_flags(struct trace_iterator *iter, u32 flags);
824extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
825extern void
826trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
827extern void graph_trace_open(struct trace_iterator *iter);
828extern void graph_trace_close(struct trace_iterator *iter);
829extern int __trace_graph_entry(struct trace_array *tr,
830 struct ftrace_graph_ent *trace,
831 unsigned long flags, int pc);
832extern void __trace_graph_return(struct trace_array *tr,
833 struct ftrace_graph_ret *trace,
834 unsigned long flags, int pc);
835
836#ifdef CONFIG_DYNAMIC_FTRACE
837extern struct ftrace_hash *ftrace_graph_hash;
838extern struct ftrace_hash *ftrace_graph_notrace_hash;
839
840static inline int ftrace_graph_addr(unsigned long addr)
841{
842 int ret = 0;
843
844 preempt_disable_notrace();
845
846 if (ftrace_hash_empty(ftrace_graph_hash)) {
847 ret = 1;
848 goto out;
849 }
850
851 if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
852
853
854
855
856
857 if (in_irq())
858 trace_recursion_set(TRACE_IRQ_BIT);
859 else
860 trace_recursion_clear(TRACE_IRQ_BIT);
861 ret = 1;
862 }
863
864out:
865 preempt_enable_notrace();
866 return ret;
867}
868
869static inline int ftrace_graph_notrace_addr(unsigned long addr)
870{
871 int ret = 0;
872
873 preempt_disable_notrace();
874
875 if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
876 ret = 1;
877
878 preempt_enable_notrace();
879 return ret;
880}
881#else
882static inline int ftrace_graph_addr(unsigned long addr)
883{
884 return 1;
885}
886
887static inline int ftrace_graph_notrace_addr(unsigned long addr)
888{
889 return 0;
890}
891#endif
892
893extern unsigned int fgraph_max_depth;
894
895static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
896{
897
898 return !(trace->depth || ftrace_graph_addr(trace->func)) ||
899 (trace->depth < 0) ||
900 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
901}
902
903#else
904static inline enum print_line_t
905print_graph_function_flags(struct trace_iterator *iter, u32 flags)
906{
907 return TRACE_TYPE_UNHANDLED;
908}
909#endif
910
911extern struct list_head ftrace_pids;
912
913#ifdef CONFIG_FUNCTION_TRACER
914struct ftrace_func_command {
915 struct list_head list;
916 char *name;
917 int (*func)(struct trace_array *tr,
918 struct ftrace_hash *hash,
919 char *func, char *cmd,
920 char *params, int enable);
921};
922extern bool ftrace_filter_param __initdata;
923static inline int ftrace_trace_task(struct trace_array *tr)
924{
925 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
926}
927extern int ftrace_is_dead(void);
928int ftrace_create_function_files(struct trace_array *tr,
929 struct dentry *parent);
930void ftrace_destroy_function_files(struct trace_array *tr);
931void ftrace_init_global_array_ops(struct trace_array *tr);
932void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
933void ftrace_reset_array_ops(struct trace_array *tr);
934int using_ftrace_ops_list_func(void);
935void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
936void ftrace_init_tracefs_toplevel(struct trace_array *tr,
937 struct dentry *d_tracer);
938void ftrace_clear_pids(struct trace_array *tr);
939int init_function_trace(void);
940void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
941#else
942static inline int ftrace_trace_task(struct trace_array *tr)
943{
944 return 1;
945}
946static inline int ftrace_is_dead(void) { return 0; }
947static inline int
948ftrace_create_function_files(struct trace_array *tr,
949 struct dentry *parent)
950{
951 return 0;
952}
953static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
954static inline __init void
955ftrace_init_global_array_ops(struct trace_array *tr) { }
956static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
957static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
958static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
959static inline void ftrace_clear_pids(struct trace_array *tr) { }
960static inline int init_function_trace(void) { return 0; }
961static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
962
963#define ftrace_init_array_ops(tr, func) do { } while (0)
964#endif
965
966#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
967
968struct ftrace_probe_ops {
969 void (*func)(unsigned long ip,
970 unsigned long parent_ip,
971 struct trace_array *tr,
972 struct ftrace_probe_ops *ops,
973 void *data);
974 int (*init)(struct ftrace_probe_ops *ops,
975 struct trace_array *tr,
976 unsigned long ip, void *init_data,
977 void **data);
978 void (*free)(struct ftrace_probe_ops *ops,
979 struct trace_array *tr,
980 unsigned long ip, void *data);
981 int (*print)(struct seq_file *m,
982 unsigned long ip,
983 struct ftrace_probe_ops *ops,
984 void *data);
985};
986
987struct ftrace_func_mapper;
988typedef int (*ftrace_mapper_func)(void *data);
989
990struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
991void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
992 unsigned long ip);
993int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
994 unsigned long ip, void *data);
995void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
996 unsigned long ip);
997void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
998 ftrace_mapper_func free_func);
999
1000extern int
1001register_ftrace_function_probe(char *glob, struct trace_array *tr,
1002 struct ftrace_probe_ops *ops, void *data);
1003extern int
1004unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1005 struct ftrace_probe_ops *ops);
1006extern void clear_ftrace_function_probes(struct trace_array *tr);
1007
1008int register_ftrace_command(struct ftrace_func_command *cmd);
1009int unregister_ftrace_command(struct ftrace_func_command *cmd);
1010
1011void ftrace_create_filter_files(struct ftrace_ops *ops,
1012 struct dentry *parent);
1013void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1014#else
1015struct ftrace_func_command;
1016
1017static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1018{
1019 return -EINVAL;
1020}
1021static inline __init int unregister_ftrace_command(char *cmd_name)
1022{
1023 return -EINVAL;
1024}
1025static inline void clear_ftrace_function_probes(struct trace_array *tr)
1026{
1027}
1028
1029
1030
1031
1032
1033#define ftrace_create_filter_files(ops, parent) do { } while (0)
1034#define ftrace_destroy_filter_files(ops) do { } while (0)
1035#endif
1036
1037bool ftrace_event_is_function(struct trace_event_call *call);
1038
1039
1040
1041
1042
1043
1044
1045
1046struct trace_parser {
1047 bool cont;
1048 char *buffer;
1049 unsigned idx;
1050 unsigned size;
1051};
1052
1053static inline bool trace_parser_loaded(struct trace_parser *parser)
1054{
1055 return (parser->idx != 0);
1056}
1057
1058static inline bool trace_parser_cont(struct trace_parser *parser)
1059{
1060 return parser->cont;
1061}
1062
1063static inline void trace_parser_clear(struct trace_parser *parser)
1064{
1065 parser->cont = false;
1066 parser->idx = 0;
1067}
1068
1069extern int trace_parser_get_init(struct trace_parser *parser, int size);
1070extern void trace_parser_put(struct trace_parser *parser);
1071extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1072 size_t cnt, loff_t *ppos);
1073
1074
1075
1076
1077#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1078# define FGRAPH_FLAGS \
1079 C(DISPLAY_GRAPH, "display-graph"),
1080#else
1081# define FGRAPH_FLAGS
1082#endif
1083
1084#ifdef CONFIG_BRANCH_TRACER
1085# define BRANCH_FLAGS \
1086 C(BRANCH, "branch"),
1087#else
1088# define BRANCH_FLAGS
1089#endif
1090
1091#ifdef CONFIG_FUNCTION_TRACER
1092# define FUNCTION_FLAGS \
1093 C(FUNCTION, "function-trace"), \
1094 C(FUNC_FORK, "function-fork"),
1095# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1096#else
1097# define FUNCTION_FLAGS
1098# define FUNCTION_DEFAULT_FLAGS 0UL
1099# define TRACE_ITER_FUNC_FORK 0UL
1100#endif
1101
1102#ifdef CONFIG_STACKTRACE
1103# define STACK_FLAGS \
1104 C(STACKTRACE, "stacktrace"),
1105#else
1106# define STACK_FLAGS
1107#endif
1108
1109
1110
1111
1112
1113
1114
1115
1116#define TRACE_FLAGS \
1117 C(PRINT_PARENT, "print-parent"), \
1118 C(SYM_OFFSET, "sym-offset"), \
1119 C(SYM_ADDR, "sym-addr"), \
1120 C(VERBOSE, "verbose"), \
1121 C(RAW, "raw"), \
1122 C(HEX, "hex"), \
1123 C(BIN, "bin"), \
1124 C(BLOCK, "block"), \
1125 C(PRINTK, "trace_printk"), \
1126 C(ANNOTATE, "annotate"), \
1127 C(USERSTACKTRACE, "userstacktrace"), \
1128 C(SYM_USEROBJ, "sym-userobj"), \
1129 C(PRINTK_MSGONLY, "printk-msg-only"), \
1130 C(CONTEXT_INFO, "context-info"), \
1131 C(LATENCY_FMT, "latency-format"), \
1132 C(RECORD_CMD, "record-cmd"), \
1133 C(RECORD_TGID, "record-tgid"), \
1134 C(OVERWRITE, "overwrite"), \
1135 C(STOP_ON_FREE, "disable_on_free"), \
1136 C(IRQ_INFO, "irq-info"), \
1137 C(MARKERS, "markers"), \
1138 C(EVENT_FORK, "event-fork"), \
1139 FUNCTION_FLAGS \
1140 FGRAPH_FLAGS \
1141 STACK_FLAGS \
1142 BRANCH_FLAGS
1143
1144
1145
1146
1147
1148#undef C
1149#define C(a, b) TRACE_ITER_##a##_BIT
1150
1151enum trace_iterator_bits {
1152 TRACE_FLAGS
1153
1154 TRACE_ITER_LAST_BIT
1155};
1156
1157
1158
1159
1160
1161#undef C
1162#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1163
1164enum trace_iterator_flags { TRACE_FLAGS };
1165
1166
1167
1168
1169
1170#define TRACE_ITER_SYM_MASK \
1171 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1172
1173extern struct tracer nop_trace;
1174
1175#ifdef CONFIG_BRANCH_TRACER
1176extern int enable_branch_tracing(struct trace_array *tr);
1177extern void disable_branch_tracing(void);
1178static inline int trace_branch_enable(struct trace_array *tr)
1179{
1180 if (tr->trace_flags & TRACE_ITER_BRANCH)
1181 return enable_branch_tracing(tr);
1182 return 0;
1183}
1184static inline void trace_branch_disable(void)
1185{
1186
1187 disable_branch_tracing();
1188}
1189#else
1190static inline int trace_branch_enable(struct trace_array *tr)
1191{
1192 return 0;
1193}
1194static inline void trace_branch_disable(void)
1195{
1196}
1197#endif
1198
1199
1200int tracing_update_buffers(void);
1201
1202struct ftrace_event_field {
1203 struct list_head link;
1204 const char *name;
1205 const char *type;
1206 int filter_type;
1207 int offset;
1208 int size;
1209 int is_signed;
1210};
1211
1212struct event_filter {
1213 int n_preds;
1214 int a_preds;
1215 struct filter_pred __rcu *preds;
1216 struct filter_pred __rcu *root;
1217 char *filter_string;
1218};
1219
1220struct event_subsystem {
1221 struct list_head list;
1222 const char *name;
1223 struct event_filter *filter;
1224 int ref_count;
1225};
1226
1227struct trace_subsystem_dir {
1228 struct list_head list;
1229 struct event_subsystem *subsystem;
1230 struct trace_array *tr;
1231 struct dentry *entry;
1232 int ref_count;
1233 int nr_events;
1234};
1235
1236extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1237 struct ring_buffer *buffer,
1238 struct ring_buffer_event *event);
1239
1240void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1241 struct ring_buffer *buffer,
1242 struct ring_buffer_event *event,
1243 unsigned long flags, int pc,
1244 struct pt_regs *regs);
1245
1246static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1247 struct ring_buffer *buffer,
1248 struct ring_buffer_event *event,
1249 unsigned long flags, int pc)
1250{
1251 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1252}
1253
1254DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1255DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1256void trace_buffered_event_disable(void);
1257void trace_buffered_event_enable(void);
1258
1259static inline void
1260__trace_event_discard_commit(struct ring_buffer *buffer,
1261 struct ring_buffer_event *event)
1262{
1263 if (this_cpu_read(trace_buffered_event) == event) {
1264
1265 this_cpu_dec(trace_buffered_event_cnt);
1266 return;
1267 }
1268 ring_buffer_discard_commit(buffer, event);
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static inline bool
1285__event_trigger_test_discard(struct trace_event_file *file,
1286 struct ring_buffer *buffer,
1287 struct ring_buffer_event *event,
1288 void *entry,
1289 enum event_trigger_type *tt)
1290{
1291 unsigned long eflags = file->flags;
1292
1293 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1294 *tt = event_triggers_call(file, entry);
1295
1296 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1297 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1298 !filter_match_preds(file->filter, entry))) {
1299 __trace_event_discard_commit(buffer, event);
1300 return true;
1301 }
1302
1303 return false;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static inline void
1320event_trigger_unlock_commit(struct trace_event_file *file,
1321 struct ring_buffer *buffer,
1322 struct ring_buffer_event *event,
1323 void *entry, unsigned long irq_flags, int pc)
1324{
1325 enum event_trigger_type tt = ETT_NONE;
1326
1327 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1328 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1329
1330 if (tt)
1331 event_triggers_post_call(file, tt, entry);
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350static inline void
1351event_trigger_unlock_commit_regs(struct trace_event_file *file,
1352 struct ring_buffer *buffer,
1353 struct ring_buffer_event *event,
1354 void *entry, unsigned long irq_flags, int pc,
1355 struct pt_regs *regs)
1356{
1357 enum event_trigger_type tt = ETT_NONE;
1358
1359 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1360 trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1361 irq_flags, pc, regs);
1362
1363 if (tt)
1364 event_triggers_post_call(file, tt, entry);
1365}
1366
1367#define FILTER_PRED_INVALID ((unsigned short)-1)
1368#define FILTER_PRED_IS_RIGHT (1 << 15)
1369#define FILTER_PRED_FOLD (1 << 15)
1370
1371
1372
1373
1374
1375
1376
1377
1378#define MAX_FILTER_PRED 16384
1379
1380struct filter_pred;
1381struct regex;
1382
1383typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1384
1385typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1386
1387enum regex_type {
1388 MATCH_FULL = 0,
1389 MATCH_FRONT_ONLY,
1390 MATCH_MIDDLE_ONLY,
1391 MATCH_END_ONLY,
1392 MATCH_GLOB,
1393};
1394
1395struct regex {
1396 char pattern[MAX_FILTER_STR_VAL];
1397 int len;
1398 int field_len;
1399 regex_match_func match;
1400};
1401
1402struct filter_pred {
1403 filter_pred_fn_t fn;
1404 u64 val;
1405 struct regex regex;
1406 unsigned short *ops;
1407 struct ftrace_event_field *field;
1408 int offset;
1409 int not;
1410 int op;
1411 unsigned short index;
1412 unsigned short parent;
1413 unsigned short left;
1414 unsigned short right;
1415};
1416
1417static inline bool is_string_field(struct ftrace_event_field *field)
1418{
1419 return field->filter_type == FILTER_DYN_STRING ||
1420 field->filter_type == FILTER_STATIC_STRING ||
1421 field->filter_type == FILTER_PTR_STRING ||
1422 field->filter_type == FILTER_COMM;
1423}
1424
1425static inline bool is_function_field(struct ftrace_event_field *field)
1426{
1427 return field->filter_type == FILTER_TRACE_FN;
1428}
1429
1430extern enum regex_type
1431filter_parse_regex(char *buff, int len, char **search, int *not);
1432extern void print_event_filter(struct trace_event_file *file,
1433 struct trace_seq *s);
1434extern int apply_event_filter(struct trace_event_file *file,
1435 char *filter_string);
1436extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1437 char *filter_string);
1438extern void print_subsystem_event_filter(struct event_subsystem *system,
1439 struct trace_seq *s);
1440extern int filter_assign_type(const char *type);
1441extern int create_event_filter(struct trace_event_call *call,
1442 char *filter_str, bool set_str,
1443 struct event_filter **filterp);
1444extern void free_event_filter(struct event_filter *filter);
1445
1446struct ftrace_event_field *
1447trace_find_event_field(struct trace_event_call *call, char *name);
1448
1449extern void trace_event_enable_cmd_record(bool enable);
1450extern void trace_event_enable_tgid_record(bool enable);
1451
1452extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1453extern int event_trace_del_tracer(struct trace_array *tr);
1454
1455extern struct trace_event_file *find_event_file(struct trace_array *tr,
1456 const char *system,
1457 const char *event);
1458
1459static inline void *event_file_data(struct file *filp)
1460{
1461 return READ_ONCE(file_inode(filp)->i_private);
1462}
1463
1464extern struct mutex event_mutex;
1465extern struct list_head ftrace_events;
1466
1467extern const struct file_operations event_trigger_fops;
1468extern const struct file_operations event_hist_fops;
1469
1470#ifdef CONFIG_HIST_TRIGGERS
1471extern int register_trigger_hist_cmd(void);
1472extern int register_trigger_hist_enable_disable_cmds(void);
1473#else
1474static inline int register_trigger_hist_cmd(void) { return 0; }
1475static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1476#endif
1477
1478extern int register_trigger_cmds(void);
1479extern void clear_event_triggers(struct trace_array *tr);
1480
1481struct event_trigger_data {
1482 unsigned long count;
1483 int ref;
1484 struct event_trigger_ops *ops;
1485 struct event_command *cmd_ops;
1486 struct event_filter __rcu *filter;
1487 char *filter_str;
1488 void *private_data;
1489 bool paused;
1490 bool paused_tmp;
1491 struct list_head list;
1492 char *name;
1493 struct list_head named_list;
1494 struct event_trigger_data *named_data;
1495};
1496
1497
1498#define ENABLE_EVENT_STR "enable_event"
1499#define DISABLE_EVENT_STR "disable_event"
1500#define ENABLE_HIST_STR "enable_hist"
1501#define DISABLE_HIST_STR "disable_hist"
1502
1503struct enable_trigger_data {
1504 struct trace_event_file *file;
1505 bool enable;
1506 bool hist;
1507};
1508
1509extern int event_enable_trigger_print(struct seq_file *m,
1510 struct event_trigger_ops *ops,
1511 struct event_trigger_data *data);
1512extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1513 struct event_trigger_data *data);
1514extern int event_enable_trigger_func(struct event_command *cmd_ops,
1515 struct trace_event_file *file,
1516 char *glob, char *cmd, char *param);
1517extern int event_enable_register_trigger(char *glob,
1518 struct event_trigger_ops *ops,
1519 struct event_trigger_data *data,
1520 struct trace_event_file *file);
1521extern void event_enable_unregister_trigger(char *glob,
1522 struct event_trigger_ops *ops,
1523 struct event_trigger_data *test,
1524 struct trace_event_file *file);
1525extern void trigger_data_free(struct event_trigger_data *data);
1526extern int event_trigger_init(struct event_trigger_ops *ops,
1527 struct event_trigger_data *data);
1528extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1529 int trigger_enable);
1530extern void update_cond_flag(struct trace_event_file *file);
1531extern void unregister_trigger(char *glob, struct event_trigger_ops *ops,
1532 struct event_trigger_data *test,
1533 struct trace_event_file *file);
1534extern int set_trigger_filter(char *filter_str,
1535 struct event_trigger_data *trigger_data,
1536 struct trace_event_file *file);
1537extern struct event_trigger_data *find_named_trigger(const char *name);
1538extern bool is_named_trigger(struct event_trigger_data *test);
1539extern int save_named_trigger(const char *name,
1540 struct event_trigger_data *data);
1541extern void del_named_trigger(struct event_trigger_data *data);
1542extern void pause_named_trigger(struct event_trigger_data *data);
1543extern void unpause_named_trigger(struct event_trigger_data *data);
1544extern void set_named_trigger_data(struct event_trigger_data *data,
1545 struct event_trigger_data *named_data);
1546extern int register_event_command(struct event_command *cmd);
1547extern int unregister_event_command(struct event_command *cmd);
1548extern int register_trigger_hist_enable_disable_cmds(void);
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587struct event_trigger_ops {
1588 void (*func)(struct event_trigger_data *data,
1589 void *rec);
1590 int (*init)(struct event_trigger_ops *ops,
1591 struct event_trigger_data *data);
1592 void (*free)(struct event_trigger_ops *ops,
1593 struct event_trigger_data *data);
1594 int (*print)(struct seq_file *m,
1595 struct event_trigger_ops *ops,
1596 struct event_trigger_data *data);
1597};
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674struct event_command {
1675 struct list_head list;
1676 char *name;
1677 enum event_trigger_type trigger_type;
1678 int flags;
1679 int (*func)(struct event_command *cmd_ops,
1680 struct trace_event_file *file,
1681 char *glob, char *cmd, char *params);
1682 int (*reg)(char *glob,
1683 struct event_trigger_ops *ops,
1684 struct event_trigger_data *data,
1685 struct trace_event_file *file);
1686 void (*unreg)(char *glob,
1687 struct event_trigger_ops *ops,
1688 struct event_trigger_data *data,
1689 struct trace_event_file *file);
1690 void (*unreg_all)(struct trace_event_file *file);
1691 int (*set_filter)(char *filter_str,
1692 struct event_trigger_data *data,
1693 struct trace_event_file *file);
1694 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1695};
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725enum event_command_flags {
1726 EVENT_CMD_FL_POST_TRIGGER = 1,
1727 EVENT_CMD_FL_NEEDS_REC = 2,
1728};
1729
1730static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1731{
1732 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1733}
1734
1735static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1736{
1737 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1738}
1739
1740extern int trace_event_enable_disable(struct trace_event_file *file,
1741 int enable, int soft_disable);
1742extern int tracing_alloc_snapshot(void);
1743
1744extern const char *__start___trace_bprintk_fmt[];
1745extern const char *__stop___trace_bprintk_fmt[];
1746
1747extern const char *__start___tracepoint_str[];
1748extern const char *__stop___tracepoint_str[];
1749
1750void trace_printk_control(bool enabled);
1751void trace_printk_init_buffers(void);
1752void trace_printk_start_comm(void);
1753int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1754int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1755
1756#define MAX_EVENT_NAME_LEN 64
1757
1758extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1759extern ssize_t trace_parse_run_command(struct file *file,
1760 const char __user *buffer, size_t count, loff_t *ppos,
1761 int (*createfn)(int, char**));
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1773
1774#undef FTRACE_ENTRY
1775#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
1776 extern struct trace_event_call \
1777 __aligned(4) event_##call;
1778#undef FTRACE_ENTRY_DUP
1779#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1780 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1781 filter)
1782#undef FTRACE_ENTRY_PACKED
1783#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1784 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1785 filter)
1786
1787#include "trace_entries.h"
1788
1789#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1790int perf_ftrace_event_register(struct trace_event_call *call,
1791 enum trace_reg type, void *data);
1792#else
1793#define perf_ftrace_event_register NULL
1794#endif
1795
1796#ifdef CONFIG_FTRACE_SYSCALLS
1797void init_ftrace_syscalls(void);
1798const char *get_syscall_name(int syscall);
1799#else
1800static inline void init_ftrace_syscalls(void) { }
1801static inline const char *get_syscall_name(int syscall)
1802{
1803 return NULL;
1804}
1805#endif
1806
1807#ifdef CONFIG_EVENT_TRACING
1808void trace_event_init(void);
1809void trace_event_eval_update(struct trace_eval_map **map, int len);
1810#else
1811static inline void __init trace_event_init(void) { }
1812static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1813#endif
1814
1815extern struct trace_iterator *tracepoint_print_iter;
1816
1817#endif
1818