1
2
3#ifndef _LINUX_KERNEL_TRACE_H
4#define _LINUX_KERNEL_TRACE_H
5
6#include <linux/fs.h>
7#include <linux/atomic.h>
8#include <linux/sched.h>
9#include <linux/clocksource.h>
10#include <linux/ring_buffer.h>
11#include <linux/mmiotrace.h>
12#include <linux/tracepoint.h>
13#include <linux/ftrace.h>
14#include <linux/trace.h>
15#include <linux/hw_breakpoint.h>
16#include <linux/trace_seq.h>
17#include <linux/trace_events.h>
18#include <linux/compiler.h>
19#include <linux/glob.h>
20#include <linux/irq_work.h>
21#include <linux/workqueue.h>
22#include <linux/ctype.h>
23#include <linux/once_lite.h>
24
25#ifdef CONFIG_FTRACE_SYSCALLS
26#include <asm/unistd.h>
27#include <asm/syscall.h>
28#endif
29
30enum trace_type {
31 __TRACE_FIRST_TYPE = 0,
32
33 TRACE_FN,
34 TRACE_CTX,
35 TRACE_WAKE,
36 TRACE_STACK,
37 TRACE_PRINT,
38 TRACE_BPRINT,
39 TRACE_MMIO_RW,
40 TRACE_MMIO_MAP,
41 TRACE_BRANCH,
42 TRACE_GRAPH_RET,
43 TRACE_GRAPH_ENT,
44 TRACE_USER_STACK,
45 TRACE_BLK,
46 TRACE_BPUTS,
47 TRACE_HWLAT,
48 TRACE_OSNOISE,
49 TRACE_TIMERLAT,
50 TRACE_RAW_DATA,
51 TRACE_FUNC_REPEATS,
52
53 __TRACE_LAST_TYPE,
54};
55
56
57#undef __field
58#define __field(type, item) type item;
59
60#undef __field_fn
61#define __field_fn(type, item) type item;
62
63#undef __field_struct
64#define __field_struct(type, item) __field(type, item)
65
66#undef __field_desc
67#define __field_desc(type, container, item)
68
69#undef __field_packed
70#define __field_packed(type, container, item)
71
72#undef __array
73#define __array(type, item, size) type item[size];
74
75#undef __array_desc
76#define __array_desc(type, container, item, size)
77
78#undef __dynamic_array
79#define __dynamic_array(type, item) type item[];
80
81#undef F_STRUCT
82#define F_STRUCT(args...) args
83
84#undef FTRACE_ENTRY
85#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \
86 struct struct_name { \
87 struct trace_entry ent; \
88 tstruct \
89 }
90
91#undef FTRACE_ENTRY_DUP
92#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
93
94#undef FTRACE_ENTRY_REG
95#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \
96 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
97
98#undef FTRACE_ENTRY_PACKED
99#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \
100 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
101
102#include "trace_entries.h"
103
104
105#define MEM_FAIL(condition, fmt, ...) \
106 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
107
108
109
110
111
112struct syscall_trace_enter {
113 struct trace_entry ent;
114 int nr;
115 unsigned long args[];
116};
117
118struct syscall_trace_exit {
119 struct trace_entry ent;
120 int nr;
121 long ret;
122};
123
124struct kprobe_trace_entry_head {
125 struct trace_entry ent;
126 unsigned long ip;
127};
128
129struct eprobe_trace_entry_head {
130 struct trace_entry ent;
131 unsigned int type;
132};
133
134struct kretprobe_trace_entry_head {
135 struct trace_entry ent;
136 unsigned long func;
137 unsigned long ret_ip;
138};
139
140#define TRACE_BUF_SIZE 1024
141
142struct trace_array;
143
144
145
146
147
148
149struct trace_array_cpu {
150 atomic_t disabled;
151 void *buffer_page;
152
153 unsigned long entries;
154 unsigned long saved_latency;
155 unsigned long critical_start;
156 unsigned long critical_end;
157 unsigned long critical_sequence;
158 unsigned long nice;
159 unsigned long policy;
160 unsigned long rt_priority;
161 unsigned long skipped_entries;
162 u64 preempt_timestamp;
163 pid_t pid;
164 kuid_t uid;
165 char comm[TASK_COMM_LEN];
166
167#ifdef CONFIG_FUNCTION_TRACER
168 int ftrace_ignore_pid;
169#endif
170 bool ignore_pid;
171};
172
173struct tracer;
174struct trace_option_dentry;
175
176struct array_buffer {
177 struct trace_array *tr;
178 struct trace_buffer *buffer;
179 struct trace_array_cpu __percpu *data;
180 u64 time_start;
181 int cpu;
182};
183
184#define TRACE_FLAGS_MAX_SIZE 32
185
186struct trace_options {
187 struct tracer *tracer;
188 struct trace_option_dentry *topts;
189};
190
191struct trace_pid_list {
192 int pid_max;
193 unsigned long *pids;
194};
195
196enum {
197 TRACE_PIDS = BIT(0),
198 TRACE_NO_PIDS = BIT(1),
199};
200
201static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
202 struct trace_pid_list *no_pid_list)
203{
204
205 return ((type & TRACE_PIDS) && pid_list) ||
206 ((type & TRACE_NO_PIDS) && no_pid_list);
207}
208
209static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
210 struct trace_pid_list *no_pid_list)
211{
212
213
214
215
216 return (!(type & TRACE_PIDS) && pid_list) ||
217 (!(type & TRACE_NO_PIDS) && no_pid_list);
218}
219
220typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260struct cond_snapshot {
261 void *cond_data;
262 cond_update_fn_t update;
263};
264
265
266
267
268
269struct trace_func_repeats {
270 unsigned long ip;
271 unsigned long parent_ip;
272 unsigned long count;
273 u64 ts_last_call;
274};
275
276
277
278
279
280
281struct trace_array {
282 struct list_head list;
283 char *name;
284 struct array_buffer array_buffer;
285#ifdef CONFIG_TRACER_MAX_TRACE
286
287
288
289
290
291
292
293
294
295
296
297 struct array_buffer max_buffer;
298 bool allocated_snapshot;
299#endif
300#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
301 || defined(CONFIG_OSNOISE_TRACER)
302 unsigned long max_latency;
303#ifdef CONFIG_FSNOTIFY
304 struct dentry *d_max_latency;
305 struct work_struct fsnotify_work;
306 struct irq_work fsnotify_irqwork;
307#endif
308#endif
309 struct trace_pid_list __rcu *filtered_pids;
310 struct trace_pid_list __rcu *filtered_no_pids;
311
312
313
314
315
316
317
318
319
320
321
322
323
324 arch_spinlock_t max_lock;
325 int buffer_disabled;
326#ifdef CONFIG_FTRACE_SYSCALLS
327 int sys_refcount_enter;
328 int sys_refcount_exit;
329 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
330 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
331#endif
332 int stop_count;
333 int clock_id;
334 int nr_topts;
335 bool clear_trace;
336 int buffer_percent;
337 unsigned int n_err_log_entries;
338 struct tracer *current_trace;
339 unsigned int trace_flags;
340 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE];
341 unsigned int flags;
342 raw_spinlock_t start_lock;
343 struct list_head err_log;
344 struct dentry *dir;
345 struct dentry *options;
346 struct dentry *percpu_dir;
347 struct dentry *event_dir;
348 struct trace_options *topts;
349 struct list_head systems;
350 struct list_head events;
351 struct trace_event_file *trace_marker_file;
352 cpumask_var_t tracing_cpumask;
353 int ref;
354 int trace_ref;
355#ifdef CONFIG_FUNCTION_TRACER
356 struct ftrace_ops *ops;
357 struct trace_pid_list __rcu *function_pids;
358 struct trace_pid_list __rcu *function_no_pids;
359#ifdef CONFIG_DYNAMIC_FTRACE
360
361 struct list_head func_probes;
362 struct list_head mod_trace;
363 struct list_head mod_notrace;
364#endif
365
366 int function_enabled;
367#endif
368 int no_filter_buffering_ref;
369 struct list_head hist_vars;
370#ifdef CONFIG_TRACER_SNAPSHOT
371 struct cond_snapshot *cond_snapshot;
372#endif
373 struct trace_func_repeats __percpu *last_func_repeats;
374};
375
376enum {
377 TRACE_ARRAY_FL_GLOBAL = (1 << 0)
378};
379
380extern struct list_head ftrace_trace_arrays;
381
382extern struct mutex trace_types_lock;
383
384extern int trace_array_get(struct trace_array *tr);
385extern int tracing_check_open_get_tr(struct trace_array *tr);
386extern struct trace_array *trace_array_find(const char *instance);
387extern struct trace_array *trace_array_find_get(const char *instance);
388
389extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
390extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
391extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
392
393extern bool trace_clock_in_ns(struct trace_array *tr);
394
395
396
397
398
399static inline struct trace_array *top_trace_array(void)
400{
401 struct trace_array *tr;
402
403 if (list_empty(&ftrace_trace_arrays))
404 return NULL;
405
406 tr = list_entry(ftrace_trace_arrays.prev,
407 typeof(*tr), list);
408 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
409 return tr;
410}
411
412#define FTRACE_CMP_TYPE(var, type) \
413 __builtin_types_compatible_p(typeof(var), type *)
414
415#undef IF_ASSIGN
416#define IF_ASSIGN(var, entry, etype, id) \
417 if (FTRACE_CMP_TYPE(var, etype)) { \
418 var = (typeof(var))(entry); \
419 WARN_ON(id != 0 && (entry)->type != id); \
420 break; \
421 }
422
423
424extern void __ftrace_bad_type(void);
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439#define trace_assign_type(var, ent) \
440 do { \
441 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
442 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
443 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
444 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
445 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
446 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
447 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
448 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
449 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
450 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
451 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
452 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
453 TRACE_MMIO_RW); \
454 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
455 TRACE_MMIO_MAP); \
456 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
457 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
458 TRACE_GRAPH_ENT); \
459 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
460 TRACE_GRAPH_RET); \
461 IF_ASSIGN(var, ent, struct func_repeats_entry, \
462 TRACE_FUNC_REPEATS); \
463 __ftrace_bad_type(); \
464 } while (0)
465
466
467
468
469
470
471struct tracer_opt {
472 const char *name;
473 u32 bit;
474};
475
476
477
478
479
480struct tracer_flags {
481 u32 val;
482 struct tracer_opt *opts;
483 struct tracer *trace;
484};
485
486
487#define TRACER_OPT(s, b) .name = #s, .bit = b
488
489
490struct trace_option_dentry {
491 struct tracer_opt *opt;
492 struct tracer_flags *flags;
493 struct trace_array *tr;
494 struct dentry *entry;
495};
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517struct tracer {
518 const char *name;
519 int (*init)(struct trace_array *tr);
520 void (*reset)(struct trace_array *tr);
521 void (*start)(struct trace_array *tr);
522 void (*stop)(struct trace_array *tr);
523 int (*update_thresh)(struct trace_array *tr);
524 void (*open)(struct trace_iterator *iter);
525 void (*pipe_open)(struct trace_iterator *iter);
526 void (*close)(struct trace_iterator *iter);
527 void (*pipe_close)(struct trace_iterator *iter);
528 ssize_t (*read)(struct trace_iterator *iter,
529 struct file *filp, char __user *ubuf,
530 size_t cnt, loff_t *ppos);
531 ssize_t (*splice_read)(struct trace_iterator *iter,
532 struct file *filp,
533 loff_t *ppos,
534 struct pipe_inode_info *pipe,
535 size_t len,
536 unsigned int flags);
537#ifdef CONFIG_FTRACE_STARTUP_TEST
538 int (*selftest)(struct tracer *trace,
539 struct trace_array *tr);
540#endif
541 void (*print_header)(struct seq_file *m);
542 enum print_line_t (*print_line)(struct trace_iterator *iter);
543
544 int (*set_flag)(struct trace_array *tr,
545 u32 old_flags, u32 bit, int set);
546
547 int (*flag_changed)(struct trace_array *tr,
548 u32 mask, int set);
549 struct tracer *next;
550 struct tracer_flags *flags;
551 int enabled;
552 bool print_max;
553 bool allow_instances;
554#ifdef CONFIG_TRACER_MAX_TRACE
555 bool use_max_tr;
556#endif
557
558 bool noboot;
559};
560
561static inline struct ring_buffer_iter *
562trace_buffer_iter(struct trace_iterator *iter, int cpu)
563{
564 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
565}
566
567int tracer_init(struct tracer *t, struct trace_array *tr);
568int tracing_is_enabled(void);
569void tracing_reset_online_cpus(struct array_buffer *buf);
570void tracing_reset_current(int cpu);
571void tracing_reset_all_online_cpus(void);
572int tracing_open_generic(struct inode *inode, struct file *filp);
573int tracing_open_generic_tr(struct inode *inode, struct file *filp);
574bool tracing_is_disabled(void);
575bool tracer_tracing_is_on(struct trace_array *tr);
576void tracer_tracing_on(struct trace_array *tr);
577void tracer_tracing_off(struct trace_array *tr);
578struct dentry *trace_create_file(const char *name,
579 umode_t mode,
580 struct dentry *parent,
581 void *data,
582 const struct file_operations *fops);
583
584int tracing_init_dentry(void);
585
586struct ring_buffer_event;
587
588struct ring_buffer_event *
589trace_buffer_lock_reserve(struct trace_buffer *buffer,
590 int type,
591 unsigned long len,
592 unsigned int trace_ctx);
593
594struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
595 struct trace_array_cpu *data);
596
597struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
598 int *ent_cpu, u64 *ent_ts);
599
600void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
601 struct ring_buffer_event *event);
602
603bool trace_is_tracepoint_string(const char *str);
604const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
605void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
606 va_list ap);
607
608int trace_empty(struct trace_iterator *iter);
609
610void *trace_find_next_entry_inc(struct trace_iterator *iter);
611
612void trace_init_global_iter(struct trace_iterator *iter);
613
614void tracing_iter_reset(struct trace_iterator *iter, int cpu);
615
616unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
617unsigned long trace_total_entries(struct trace_array *tr);
618
619void trace_function(struct trace_array *tr,
620 unsigned long ip,
621 unsigned long parent_ip,
622 unsigned int trace_ctx);
623void trace_graph_function(struct trace_array *tr,
624 unsigned long ip,
625 unsigned long parent_ip,
626 unsigned int trace_ctx);
627void trace_latency_header(struct seq_file *m);
628void trace_default_header(struct seq_file *m);
629void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
630
631void trace_graph_return(struct ftrace_graph_ret *trace);
632int trace_graph_entry(struct ftrace_graph_ent *trace);
633void set_graph_array(struct trace_array *tr);
634
635void tracing_start_cmdline_record(void);
636void tracing_stop_cmdline_record(void);
637void tracing_start_tgid_record(void);
638void tracing_stop_tgid_record(void);
639
640int register_tracer(struct tracer *type);
641int is_tracing_stopped(void);
642
643loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
644
645extern cpumask_var_t __read_mostly tracing_buffer_mask;
646
647#define for_each_tracing_cpu(cpu) \
648 for_each_cpu(cpu, tracing_buffer_mask)
649
650extern unsigned long nsecs_to_usecs(unsigned long nsecs);
651
652extern unsigned long tracing_thresh;
653
654
655
656extern int pid_max;
657
658bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
659 pid_t search_pid);
660bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
661 struct trace_pid_list *filtered_no_pids,
662 struct task_struct *task);
663void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
664 struct task_struct *self,
665 struct task_struct *task);
666void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
667void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
668int trace_pid_show(struct seq_file *m, void *v);
669void trace_free_pid_list(struct trace_pid_list *pid_list);
670int trace_pid_write(struct trace_pid_list *filtered_pids,
671 struct trace_pid_list **new_pid_list,
672 const char __user *ubuf, size_t cnt);
673
674#ifdef CONFIG_TRACER_MAX_TRACE
675void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
676 void *cond_data);
677void update_max_tr_single(struct trace_array *tr,
678 struct task_struct *tsk, int cpu);
679#endif
680
681#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
682 || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY)
683#define LATENCY_FS_NOTIFY
684#endif
685
686#ifdef LATENCY_FS_NOTIFY
687void latency_fsnotify(struct trace_array *tr);
688#else
689static inline void latency_fsnotify(struct trace_array *tr) { }
690#endif
691
692#ifdef CONFIG_STACKTRACE
693void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
694#else
695static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
696 int skip)
697{
698}
699#endif
700
701void trace_last_func_repeats(struct trace_array *tr,
702 struct trace_func_repeats *last_info,
703 unsigned int trace_ctx);
704
705extern u64 ftrace_now(int cpu);
706
707extern void trace_find_cmdline(int pid, char comm[]);
708extern int trace_find_tgid(int pid);
709extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
710
711#ifdef CONFIG_DYNAMIC_FTRACE
712extern unsigned long ftrace_update_tot_cnt;
713extern unsigned long ftrace_number_of_pages;
714extern unsigned long ftrace_number_of_groups;
715void ftrace_init_trace_array(struct trace_array *tr);
716#else
717static inline void ftrace_init_trace_array(struct trace_array *tr) { }
718#endif
719#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
720extern int DYN_FTRACE_TEST_NAME(void);
721#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
722extern int DYN_FTRACE_TEST_NAME2(void);
723
724extern bool ring_buffer_expanded;
725extern bool tracing_selftest_disabled;
726
727#ifdef CONFIG_FTRACE_STARTUP_TEST
728extern void __init disable_tracing_selftest(const char *reason);
729
730extern int trace_selftest_startup_function(struct tracer *trace,
731 struct trace_array *tr);
732extern int trace_selftest_startup_function_graph(struct tracer *trace,
733 struct trace_array *tr);
734extern int trace_selftest_startup_irqsoff(struct tracer *trace,
735 struct trace_array *tr);
736extern int trace_selftest_startup_preemptoff(struct tracer *trace,
737 struct trace_array *tr);
738extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
739 struct trace_array *tr);
740extern int trace_selftest_startup_wakeup(struct tracer *trace,
741 struct trace_array *tr);
742extern int trace_selftest_startup_nop(struct tracer *trace,
743 struct trace_array *tr);
744extern int trace_selftest_startup_branch(struct tracer *trace,
745 struct trace_array *tr);
746
747
748
749
750
751#define __tracer_data __refdata
752#else
753static inline void __init disable_tracing_selftest(const char *reason)
754{
755}
756
757#define __tracer_data __read_mostly
758#endif
759
760extern void *head_page(struct trace_array_cpu *data);
761extern unsigned long long ns2usecs(u64 nsec);
762extern int
763trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
764extern int
765trace_vprintk(unsigned long ip, const char *fmt, va_list args);
766extern int
767trace_array_vprintk(struct trace_array *tr,
768 unsigned long ip, const char *fmt, va_list args);
769int trace_array_printk_buf(struct trace_buffer *buffer,
770 unsigned long ip, const char *fmt, ...);
771void trace_printk_seq(struct trace_seq *s);
772enum print_line_t print_trace_line(struct trace_iterator *iter);
773
774extern char trace_find_mark(unsigned long long duration);
775
776struct ftrace_hash;
777
778struct ftrace_mod_load {
779 struct list_head list;
780 char *func;
781 char *module;
782 int enable;
783};
784
785enum {
786 FTRACE_HASH_FL_MOD = (1 << 0),
787};
788
789struct ftrace_hash {
790 unsigned long size_bits;
791 struct hlist_head *buckets;
792 unsigned long count;
793 unsigned long flags;
794 struct rcu_head rcu;
795};
796
797struct ftrace_func_entry *
798ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
799
800static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
801{
802 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
803}
804
805
806#ifdef CONFIG_FUNCTION_GRAPH_TRACER
807
808
809#define TRACE_GRAPH_PRINT_OVERRUN 0x1
810#define TRACE_GRAPH_PRINT_CPU 0x2
811#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
812#define TRACE_GRAPH_PRINT_PROC 0x8
813#define TRACE_GRAPH_PRINT_DURATION 0x10
814#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
815#define TRACE_GRAPH_PRINT_REL_TIME 0x40
816#define TRACE_GRAPH_PRINT_IRQS 0x80
817#define TRACE_GRAPH_PRINT_TAIL 0x100
818#define TRACE_GRAPH_SLEEP_TIME 0x200
819#define TRACE_GRAPH_GRAPH_TIME 0x400
820#define TRACE_GRAPH_PRINT_FILL_SHIFT 28
821#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
822
823extern void ftrace_graph_sleep_time_control(bool enable);
824
825#ifdef CONFIG_FUNCTION_PROFILER
826extern void ftrace_graph_graph_time_control(bool enable);
827#else
828static inline void ftrace_graph_graph_time_control(bool enable) { }
829#endif
830
831extern enum print_line_t
832print_graph_function_flags(struct trace_iterator *iter, u32 flags);
833extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
834extern void
835trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
836extern void graph_trace_open(struct trace_iterator *iter);
837extern void graph_trace_close(struct trace_iterator *iter);
838extern int __trace_graph_entry(struct trace_array *tr,
839 struct ftrace_graph_ent *trace,
840 unsigned int trace_ctx);
841extern void __trace_graph_return(struct trace_array *tr,
842 struct ftrace_graph_ret *trace,
843 unsigned int trace_ctx);
844
845#ifdef CONFIG_DYNAMIC_FTRACE
846extern struct ftrace_hash __rcu *ftrace_graph_hash;
847extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
848
849static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
850{
851 unsigned long addr = trace->func;
852 int ret = 0;
853 struct ftrace_hash *hash;
854
855 preempt_disable_notrace();
856
857
858
859
860
861
862
863 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
864
865 if (ftrace_hash_empty(hash)) {
866 ret = 1;
867 goto out;
868 }
869
870 if (ftrace_lookup_ip(hash, addr)) {
871
872
873
874
875
876 trace_recursion_set(TRACE_GRAPH_BIT);
877 trace_recursion_set_depth(trace->depth);
878
879
880
881
882
883
884 if (in_irq())
885 trace_recursion_set(TRACE_IRQ_BIT);
886 else
887 trace_recursion_clear(TRACE_IRQ_BIT);
888 ret = 1;
889 }
890
891out:
892 preempt_enable_notrace();
893 return ret;
894}
895
896static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
897{
898 if (trace_recursion_test(TRACE_GRAPH_BIT) &&
899 trace->depth == trace_recursion_depth())
900 trace_recursion_clear(TRACE_GRAPH_BIT);
901}
902
903static inline int ftrace_graph_notrace_addr(unsigned long addr)
904{
905 int ret = 0;
906 struct ftrace_hash *notrace_hash;
907
908 preempt_disable_notrace();
909
910
911
912
913
914
915
916 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
917 !preemptible());
918
919 if (ftrace_lookup_ip(notrace_hash, addr))
920 ret = 1;
921
922 preempt_enable_notrace();
923 return ret;
924}
925#else
926static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
927{
928 return 1;
929}
930
931static inline int ftrace_graph_notrace_addr(unsigned long addr)
932{
933 return 0;
934}
935static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
936{ }
937#endif
938
939extern unsigned int fgraph_max_depth;
940
941static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
942{
943
944 return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
945 ftrace_graph_addr(trace)) ||
946 (trace->depth < 0) ||
947 (fgraph_max_depth && trace->depth >= fgraph_max_depth);
948}
949
950#else
951static inline enum print_line_t
952print_graph_function_flags(struct trace_iterator *iter, u32 flags)
953{
954 return TRACE_TYPE_UNHANDLED;
955}
956#endif
957
958extern struct list_head ftrace_pids;
959
960#ifdef CONFIG_FUNCTION_TRACER
961
962#define FTRACE_PID_IGNORE -1
963#define FTRACE_PID_TRACE -2
964
965struct ftrace_func_command {
966 struct list_head list;
967 char *name;
968 int (*func)(struct trace_array *tr,
969 struct ftrace_hash *hash,
970 char *func, char *cmd,
971 char *params, int enable);
972};
973extern bool ftrace_filter_param __initdata;
974static inline int ftrace_trace_task(struct trace_array *tr)
975{
976 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
977 FTRACE_PID_IGNORE;
978}
979extern int ftrace_is_dead(void);
980int ftrace_create_function_files(struct trace_array *tr,
981 struct dentry *parent);
982void ftrace_destroy_function_files(struct trace_array *tr);
983int ftrace_allocate_ftrace_ops(struct trace_array *tr);
984void ftrace_free_ftrace_ops(struct trace_array *tr);
985void ftrace_init_global_array_ops(struct trace_array *tr);
986void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
987void ftrace_reset_array_ops(struct trace_array *tr);
988void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
989void ftrace_init_tracefs_toplevel(struct trace_array *tr,
990 struct dentry *d_tracer);
991void ftrace_clear_pids(struct trace_array *tr);
992int init_function_trace(void);
993void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
994#else
995static inline int ftrace_trace_task(struct trace_array *tr)
996{
997 return 1;
998}
999static inline int ftrace_is_dead(void) { return 0; }
1000static inline int
1001ftrace_create_function_files(struct trace_array *tr,
1002 struct dentry *parent)
1003{
1004 return 0;
1005}
1006static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1007{
1008 return 0;
1009}
1010static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
1011static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1012static inline __init void
1013ftrace_init_global_array_ops(struct trace_array *tr) { }
1014static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1015static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1016static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1017static inline void ftrace_clear_pids(struct trace_array *tr) { }
1018static inline int init_function_trace(void) { return 0; }
1019static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1020
1021#define ftrace_init_array_ops(tr, func) do { } while (0)
1022#endif
1023
1024#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1025
1026struct ftrace_probe_ops {
1027 void (*func)(unsigned long ip,
1028 unsigned long parent_ip,
1029 struct trace_array *tr,
1030 struct ftrace_probe_ops *ops,
1031 void *data);
1032 int (*init)(struct ftrace_probe_ops *ops,
1033 struct trace_array *tr,
1034 unsigned long ip, void *init_data,
1035 void **data);
1036 void (*free)(struct ftrace_probe_ops *ops,
1037 struct trace_array *tr,
1038 unsigned long ip, void *data);
1039 int (*print)(struct seq_file *m,
1040 unsigned long ip,
1041 struct ftrace_probe_ops *ops,
1042 void *data);
1043};
1044
1045struct ftrace_func_mapper;
1046typedef int (*ftrace_mapper_func)(void *data);
1047
1048struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1049void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1050 unsigned long ip);
1051int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1052 unsigned long ip, void *data);
1053void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1054 unsigned long ip);
1055void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1056 ftrace_mapper_func free_func);
1057
1058extern int
1059register_ftrace_function_probe(char *glob, struct trace_array *tr,
1060 struct ftrace_probe_ops *ops, void *data);
1061extern int
1062unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1063 struct ftrace_probe_ops *ops);
1064extern void clear_ftrace_function_probes(struct trace_array *tr);
1065
1066int register_ftrace_command(struct ftrace_func_command *cmd);
1067int unregister_ftrace_command(struct ftrace_func_command *cmd);
1068
1069void ftrace_create_filter_files(struct ftrace_ops *ops,
1070 struct dentry *parent);
1071void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1072
1073extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1074 int len, int reset);
1075extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1076 int len, int reset);
1077#else
1078struct ftrace_func_command;
1079
1080static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1081{
1082 return -EINVAL;
1083}
1084static inline __init int unregister_ftrace_command(char *cmd_name)
1085{
1086 return -EINVAL;
1087}
1088static inline void clear_ftrace_function_probes(struct trace_array *tr)
1089{
1090}
1091
1092
1093
1094
1095
1096#define ftrace_create_filter_files(ops, parent) do { } while (0)
1097#define ftrace_destroy_filter_files(ops) do { } while (0)
1098#endif
1099
1100bool ftrace_event_is_function(struct trace_event_call *call);
1101
1102
1103
1104
1105
1106
1107
1108
1109struct trace_parser {
1110 bool cont;
1111 char *buffer;
1112 unsigned idx;
1113 unsigned size;
1114};
1115
1116static inline bool trace_parser_loaded(struct trace_parser *parser)
1117{
1118 return (parser->idx != 0);
1119}
1120
1121static inline bool trace_parser_cont(struct trace_parser *parser)
1122{
1123 return parser->cont;
1124}
1125
1126static inline void trace_parser_clear(struct trace_parser *parser)
1127{
1128 parser->cont = false;
1129 parser->idx = 0;
1130}
1131
1132extern int trace_parser_get_init(struct trace_parser *parser, int size);
1133extern void trace_parser_put(struct trace_parser *parser);
1134extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1135 size_t cnt, loff_t *ppos);
1136
1137
1138
1139
1140#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1141# define FGRAPH_FLAGS \
1142 C(DISPLAY_GRAPH, "display-graph"),
1143#else
1144# define FGRAPH_FLAGS
1145#endif
1146
1147#ifdef CONFIG_BRANCH_TRACER
1148# define BRANCH_FLAGS \
1149 C(BRANCH, "branch"),
1150#else
1151# define BRANCH_FLAGS
1152#endif
1153
1154#ifdef CONFIG_FUNCTION_TRACER
1155# define FUNCTION_FLAGS \
1156 C(FUNCTION, "function-trace"), \
1157 C(FUNC_FORK, "function-fork"),
1158# define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION
1159#else
1160# define FUNCTION_FLAGS
1161# define FUNCTION_DEFAULT_FLAGS 0UL
1162# define TRACE_ITER_FUNC_FORK 0UL
1163#endif
1164
1165#ifdef CONFIG_STACKTRACE
1166# define STACK_FLAGS \
1167 C(STACKTRACE, "stacktrace"),
1168#else
1169# define STACK_FLAGS
1170#endif
1171
1172
1173
1174
1175
1176
1177
1178
1179#define TRACE_FLAGS \
1180 C(PRINT_PARENT, "print-parent"), \
1181 C(SYM_OFFSET, "sym-offset"), \
1182 C(SYM_ADDR, "sym-addr"), \
1183 C(VERBOSE, "verbose"), \
1184 C(RAW, "raw"), \
1185 C(HEX, "hex"), \
1186 C(BIN, "bin"), \
1187 C(BLOCK, "block"), \
1188 C(PRINTK, "trace_printk"), \
1189 C(ANNOTATE, "annotate"), \
1190 C(USERSTACKTRACE, "userstacktrace"), \
1191 C(SYM_USEROBJ, "sym-userobj"), \
1192 C(PRINTK_MSGONLY, "printk-msg-only"), \
1193 C(CONTEXT_INFO, "context-info"), \
1194 C(LATENCY_FMT, "latency-format"), \
1195 C(RECORD_CMD, "record-cmd"), \
1196 C(RECORD_TGID, "record-tgid"), \
1197 C(OVERWRITE, "overwrite"), \
1198 C(STOP_ON_FREE, "disable_on_free"), \
1199 C(IRQ_INFO, "irq-info"), \
1200 C(MARKERS, "markers"), \
1201 C(EVENT_FORK, "event-fork"), \
1202 C(PAUSE_ON_TRACE, "pause-on-trace"), \
1203 C(HASH_PTR, "hash-ptr"), \
1204 FUNCTION_FLAGS \
1205 FGRAPH_FLAGS \
1206 STACK_FLAGS \
1207 BRANCH_FLAGS
1208
1209
1210
1211
1212
1213#undef C
1214#define C(a, b) TRACE_ITER_##a##_BIT
1215
1216enum trace_iterator_bits {
1217 TRACE_FLAGS
1218
1219 TRACE_ITER_LAST_BIT
1220};
1221
1222
1223
1224
1225
1226#undef C
1227#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1228
1229enum trace_iterator_flags { TRACE_FLAGS };
1230
1231
1232
1233
1234
1235#define TRACE_ITER_SYM_MASK \
1236 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1237
1238extern struct tracer nop_trace;
1239
1240#ifdef CONFIG_BRANCH_TRACER
1241extern int enable_branch_tracing(struct trace_array *tr);
1242extern void disable_branch_tracing(void);
1243static inline int trace_branch_enable(struct trace_array *tr)
1244{
1245 if (tr->trace_flags & TRACE_ITER_BRANCH)
1246 return enable_branch_tracing(tr);
1247 return 0;
1248}
1249static inline void trace_branch_disable(void)
1250{
1251
1252 disable_branch_tracing();
1253}
1254#else
1255static inline int trace_branch_enable(struct trace_array *tr)
1256{
1257 return 0;
1258}
1259static inline void trace_branch_disable(void)
1260{
1261}
1262#endif
1263
1264
1265int tracing_update_buffers(void);
1266
1267struct ftrace_event_field {
1268 struct list_head link;
1269 const char *name;
1270 const char *type;
1271 int filter_type;
1272 int offset;
1273 int size;
1274 int is_signed;
1275};
1276
1277struct prog_entry;
1278
1279struct event_filter {
1280 struct prog_entry __rcu *prog;
1281 char *filter_string;
1282};
1283
1284struct event_subsystem {
1285 struct list_head list;
1286 const char *name;
1287 struct event_filter *filter;
1288 int ref_count;
1289};
1290
1291struct trace_subsystem_dir {
1292 struct list_head list;
1293 struct event_subsystem *subsystem;
1294 struct trace_array *tr;
1295 struct dentry *entry;
1296 int ref_count;
1297 int nr_events;
1298};
1299
1300extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1301 struct trace_buffer *buffer,
1302 struct ring_buffer_event *event);
1303
1304void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1305 struct trace_buffer *buffer,
1306 struct ring_buffer_event *event,
1307 unsigned int trcace_ctx,
1308 struct pt_regs *regs);
1309
1310static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1311 struct trace_buffer *buffer,
1312 struct ring_buffer_event *event,
1313 unsigned int trace_ctx)
1314{
1315 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1316}
1317
1318DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1319DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1320void trace_buffered_event_disable(void);
1321void trace_buffered_event_enable(void);
1322
1323static inline void
1324__trace_event_discard_commit(struct trace_buffer *buffer,
1325 struct ring_buffer_event *event)
1326{
1327 if (this_cpu_read(trace_buffered_event) == event) {
1328
1329 this_cpu_dec(trace_buffered_event_cnt);
1330 return;
1331 }
1332 ring_buffer_discard_commit(buffer, event);
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static inline bool
1349__event_trigger_test_discard(struct trace_event_file *file,
1350 struct trace_buffer *buffer,
1351 struct ring_buffer_event *event,
1352 void *entry,
1353 enum event_trigger_type *tt)
1354{
1355 unsigned long eflags = file->flags;
1356
1357 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1358 *tt = event_triggers_call(file, buffer, entry, event);
1359
1360 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1361 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1362 !filter_match_preds(file->filter, entry))) {
1363 __trace_event_discard_commit(buffer, event);
1364 return true;
1365 }
1366
1367 return false;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static inline void
1383event_trigger_unlock_commit(struct trace_event_file *file,
1384 struct trace_buffer *buffer,
1385 struct ring_buffer_event *event,
1386 void *entry, unsigned int trace_ctx)
1387{
1388 enum event_trigger_type tt = ETT_NONE;
1389
1390 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1391 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1392
1393 if (tt)
1394 event_triggers_post_call(file, tt);
1395}
1396
1397#define FILTER_PRED_INVALID ((unsigned short)-1)
1398#define FILTER_PRED_IS_RIGHT (1 << 15)
1399#define FILTER_PRED_FOLD (1 << 15)
1400
1401
1402
1403
1404
1405
1406
1407
1408#define MAX_FILTER_PRED 16384
1409
1410struct filter_pred;
1411struct regex;
1412
1413typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1414
1415typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1416
1417enum regex_type {
1418 MATCH_FULL = 0,
1419 MATCH_FRONT_ONLY,
1420 MATCH_MIDDLE_ONLY,
1421 MATCH_END_ONLY,
1422 MATCH_GLOB,
1423 MATCH_INDEX,
1424};
1425
1426struct regex {
1427 char pattern[MAX_FILTER_STR_VAL];
1428 int len;
1429 int field_len;
1430 regex_match_func match;
1431};
1432
1433struct filter_pred {
1434 filter_pred_fn_t fn;
1435 u64 val;
1436 struct regex regex;
1437 unsigned short *ops;
1438 struct ftrace_event_field *field;
1439 int offset;
1440 int not;
1441 int op;
1442};
1443
1444static inline bool is_string_field(struct ftrace_event_field *field)
1445{
1446 return field->filter_type == FILTER_DYN_STRING ||
1447 field->filter_type == FILTER_STATIC_STRING ||
1448 field->filter_type == FILTER_PTR_STRING ||
1449 field->filter_type == FILTER_COMM;
1450}
1451
1452static inline bool is_function_field(struct ftrace_event_field *field)
1453{
1454 return field->filter_type == FILTER_TRACE_FN;
1455}
1456
1457extern enum regex_type
1458filter_parse_regex(char *buff, int len, char **search, int *not);
1459extern void print_event_filter(struct trace_event_file *file,
1460 struct trace_seq *s);
1461extern int apply_event_filter(struct trace_event_file *file,
1462 char *filter_string);
1463extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1464 char *filter_string);
1465extern void print_subsystem_event_filter(struct event_subsystem *system,
1466 struct trace_seq *s);
1467extern int filter_assign_type(const char *type);
1468extern int create_event_filter(struct trace_array *tr,
1469 struct trace_event_call *call,
1470 char *filter_str, bool set_str,
1471 struct event_filter **filterp);
1472extern void free_event_filter(struct event_filter *filter);
1473
1474struct ftrace_event_field *
1475trace_find_event_field(struct trace_event_call *call, char *name);
1476
1477extern void trace_event_enable_cmd_record(bool enable);
1478extern void trace_event_enable_tgid_record(bool enable);
1479
1480extern int event_trace_init(void);
1481extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1482extern int event_trace_del_tracer(struct trace_array *tr);
1483extern void __trace_early_add_events(struct trace_array *tr);
1484
1485extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1486 const char *system,
1487 const char *event);
1488extern struct trace_event_file *find_event_file(struct trace_array *tr,
1489 const char *system,
1490 const char *event);
1491
1492static inline void *event_file_data(struct file *filp)
1493{
1494 return READ_ONCE(file_inode(filp)->i_private);
1495}
1496
1497extern struct mutex event_mutex;
1498extern struct list_head ftrace_events;
1499
1500extern const struct file_operations event_trigger_fops;
1501extern const struct file_operations event_hist_fops;
1502extern const struct file_operations event_hist_debug_fops;
1503extern const struct file_operations event_inject_fops;
1504
1505#ifdef CONFIG_HIST_TRIGGERS
1506extern int register_trigger_hist_cmd(void);
1507extern int register_trigger_hist_enable_disable_cmds(void);
1508#else
1509static inline int register_trigger_hist_cmd(void) { return 0; }
1510static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1511#endif
1512
1513extern int register_trigger_cmds(void);
1514extern void clear_event_triggers(struct trace_array *tr);
1515
1516enum {
1517 EVENT_TRIGGER_FL_PROBE = BIT(0),
1518};
1519
1520struct event_trigger_data {
1521 unsigned long count;
1522 int ref;
1523 int flags;
1524 struct event_trigger_ops *ops;
1525 struct event_command *cmd_ops;
1526 struct event_filter __rcu *filter;
1527 char *filter_str;
1528 void *private_data;
1529 bool paused;
1530 bool paused_tmp;
1531 struct list_head list;
1532 char *name;
1533 struct list_head named_list;
1534 struct event_trigger_data *named_data;
1535};
1536
1537
1538#define ENABLE_EVENT_STR "enable_event"
1539#define DISABLE_EVENT_STR "disable_event"
1540#define ENABLE_HIST_STR "enable_hist"
1541#define DISABLE_HIST_STR "disable_hist"
1542
1543struct enable_trigger_data {
1544 struct trace_event_file *file;
1545 bool enable;
1546 bool hist;
1547};
1548
1549extern int event_enable_trigger_print(struct seq_file *m,
1550 struct event_trigger_ops *ops,
1551 struct event_trigger_data *data);
1552extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1553 struct event_trigger_data *data);
1554extern int event_enable_trigger_func(struct event_command *cmd_ops,
1555 struct trace_event_file *file,
1556 char *glob, char *cmd, char *param);
1557extern int event_enable_register_trigger(char *glob,
1558 struct event_trigger_ops *ops,
1559 struct event_trigger_data *data,
1560 struct trace_event_file *file);
1561extern void event_enable_unregister_trigger(char *glob,
1562 struct event_trigger_ops *ops,
1563 struct event_trigger_data *test,
1564 struct trace_event_file *file);
1565extern void trigger_data_free(struct event_trigger_data *data);
1566extern int event_trigger_init(struct event_trigger_ops *ops,
1567 struct event_trigger_data *data);
1568extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1569 int trigger_enable);
1570extern void update_cond_flag(struct trace_event_file *file);
1571extern int set_trigger_filter(char *filter_str,
1572 struct event_trigger_data *trigger_data,
1573 struct trace_event_file *file);
1574extern struct event_trigger_data *find_named_trigger(const char *name);
1575extern bool is_named_trigger(struct event_trigger_data *test);
1576extern int save_named_trigger(const char *name,
1577 struct event_trigger_data *data);
1578extern void del_named_trigger(struct event_trigger_data *data);
1579extern void pause_named_trigger(struct event_trigger_data *data);
1580extern void unpause_named_trigger(struct event_trigger_data *data);
1581extern void set_named_trigger_data(struct event_trigger_data *data,
1582 struct event_trigger_data *named_data);
1583extern struct event_trigger_data *
1584get_named_trigger_data(struct event_trigger_data *data);
1585extern int register_event_command(struct event_command *cmd);
1586extern int unregister_event_command(struct event_command *cmd);
1587extern int register_trigger_hist_enable_disable_cmds(void);
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626struct event_trigger_ops {
1627 void (*func)(struct event_trigger_data *data,
1628 struct trace_buffer *buffer, void *rec,
1629 struct ring_buffer_event *rbe);
1630 int (*init)(struct event_trigger_ops *ops,
1631 struct event_trigger_data *data);
1632 void (*free)(struct event_trigger_ops *ops,
1633 struct event_trigger_data *data);
1634 int (*print)(struct seq_file *m,
1635 struct event_trigger_ops *ops,
1636 struct event_trigger_data *data);
1637};
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714struct event_command {
1715 struct list_head list;
1716 char *name;
1717 enum event_trigger_type trigger_type;
1718 int flags;
1719 int (*func)(struct event_command *cmd_ops,
1720 struct trace_event_file *file,
1721 char *glob, char *cmd, char *params);
1722 int (*reg)(char *glob,
1723 struct event_trigger_ops *ops,
1724 struct event_trigger_data *data,
1725 struct trace_event_file *file);
1726 void (*unreg)(char *glob,
1727 struct event_trigger_ops *ops,
1728 struct event_trigger_data *data,
1729 struct trace_event_file *file);
1730 void (*unreg_all)(struct trace_event_file *file);
1731 int (*set_filter)(char *filter_str,
1732 struct event_trigger_data *data,
1733 struct trace_event_file *file);
1734 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1735};
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765enum event_command_flags {
1766 EVENT_CMD_FL_POST_TRIGGER = 1,
1767 EVENT_CMD_FL_NEEDS_REC = 2,
1768};
1769
1770static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1771{
1772 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1773}
1774
1775static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1776{
1777 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1778}
1779
1780extern int trace_event_enable_disable(struct trace_event_file *file,
1781 int enable, int soft_disable);
1782extern int tracing_alloc_snapshot(void);
1783extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1784extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1785
1786extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1787extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1788
1789extern const char *__start___trace_bprintk_fmt[];
1790extern const char *__stop___trace_bprintk_fmt[];
1791
1792extern const char *__start___tracepoint_str[];
1793extern const char *__stop___tracepoint_str[];
1794
1795void trace_printk_control(bool enabled);
1796void trace_printk_start_comm(void);
1797int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1798int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1799
1800
1801extern int trace_set_options(struct trace_array *tr, char *option);
1802extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1803extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1804 unsigned long size, int cpu_id);
1805extern int tracing_set_cpumask(struct trace_array *tr,
1806 cpumask_var_t tracing_cpumask_new);
1807
1808
1809#define MAX_EVENT_NAME_LEN 64
1810
1811extern ssize_t trace_parse_run_command(struct file *file,
1812 const char __user *buffer, size_t count, loff_t *ppos,
1813 int (*createfn)(const char *));
1814
1815extern unsigned int err_pos(char *cmd, const char *str);
1816extern void tracing_log_err(struct trace_array *tr,
1817 const char *loc, const char *cmd,
1818 const char **errs, u8 type, u8 pos);
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1830
1831#undef FTRACE_ENTRY
1832#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
1833 extern struct trace_event_call \
1834 __aligned(4) event_##call;
1835#undef FTRACE_ENTRY_DUP
1836#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1837 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1838#undef FTRACE_ENTRY_PACKED
1839#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1840 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1841
1842#include "trace_entries.h"
1843
1844#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1845int perf_ftrace_event_register(struct trace_event_call *call,
1846 enum trace_reg type, void *data);
1847#else
1848#define perf_ftrace_event_register NULL
1849#endif
1850
1851#ifdef CONFIG_FTRACE_SYSCALLS
1852void init_ftrace_syscalls(void);
1853const char *get_syscall_name(int syscall);
1854#else
1855static inline void init_ftrace_syscalls(void) { }
1856static inline const char *get_syscall_name(int syscall)
1857{
1858 return NULL;
1859}
1860#endif
1861
1862#ifdef CONFIG_EVENT_TRACING
1863void trace_event_init(void);
1864void trace_event_eval_update(struct trace_eval_map **map, int len);
1865
1866extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
1867extern int trigger_process_regex(struct trace_event_file *file, char *buff);
1868#else
1869static inline void __init trace_event_init(void) { }
1870static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1871#endif
1872
1873#ifdef CONFIG_TRACER_SNAPSHOT
1874void tracing_snapshot_instance(struct trace_array *tr);
1875int tracing_alloc_snapshot_instance(struct trace_array *tr);
1876#else
1877static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1878static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1879{
1880 return 0;
1881}
1882#endif
1883
1884#ifdef CONFIG_PREEMPT_TRACER
1885void tracer_preempt_on(unsigned long a0, unsigned long a1);
1886void tracer_preempt_off(unsigned long a0, unsigned long a1);
1887#else
1888static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1889static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1890#endif
1891#ifdef CONFIG_IRQSOFF_TRACER
1892void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1893void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1894#else
1895static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1896static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1897#endif
1898
1899extern struct trace_iterator *tracepoint_print_iter;
1900
1901
1902
1903
1904
1905
1906static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1907{
1908 const size_t offset = offsetof(struct trace_iterator, seq);
1909
1910
1911
1912
1913
1914 memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1915
1916 iter->pos = -1;
1917}
1918
1919
1920static inline bool is_good_name(const char *name)
1921{
1922 if (!isalpha(*name) && *name != '_')
1923 return false;
1924 while (*++name != '\0') {
1925 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
1926 return false;
1927 }
1928 return true;
1929}
1930
1931
1932static inline void sanitize_event_name(char *name)
1933{
1934 while (*name++ != '\0')
1935 if (*name == ':' || *name == '.')
1936 *name = '_';
1937}
1938
1939
1940
1941
1942
1943
1944
1945
1946struct trace_min_max_param {
1947 struct mutex *lock;
1948 u64 *val;
1949 u64 *min;
1950 u64 *max;
1951};
1952
1953#define U64_STR_SIZE 24
1954
1955extern const struct file_operations trace_min_max_fops;
1956
1957#endif
1958