1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/ftrace_event.h>
20
21
22
23
24
25
26
27
28
29
30#undef TRACE_EVENT
31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
33 PARAMS(proto), \
34 PARAMS(args), \
35 PARAMS(tstruct), \
36 PARAMS(assign), \
37 PARAMS(print)); \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39
40
41#undef __field
42#define __field(type, item) type item;
43
44#undef __field_ext
45#define __field_ext(type, item, filter_type) type item;
46
47#undef __array
48#define __array(type, item, len) type item[len];
49
50#undef __dynamic_array
51#define __dynamic_array(type, item, len) u32 __data_loc_##item;
52
53#undef __string
54#define __string(item, src) __dynamic_array(char, item, -1)
55
56#undef TP_STRUCT__entry
57#define TP_STRUCT__entry(args...) args
58
59#undef DECLARE_EVENT_CLASS
60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61 struct ftrace_raw_##name { \
62 struct trace_entry ent; \
63 tstruct \
64 char __data[0]; \
65 }; \
66 \
67 static struct ftrace_event_class event_class_##name;
68
69#undef DEFINE_EVENT
70#define DEFINE_EVENT(template, name, proto, args) \
71 static struct ftrace_event_call __used \
72 __attribute__((__aligned__(4))) event_##name
73
74#undef DEFINE_EVENT_PRINT
75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
77
78
79#undef TRACE_EVENT_FN
80#define TRACE_EVENT_FN(name, proto, args, tstruct, \
81 assign, print, reg, unreg) \
82 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
83 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
84
85#undef TRACE_EVENT_FLAGS
86#define TRACE_EVENT_FLAGS(name, value) \
87 __TRACE_EVENT_FLAGS(name, value)
88
89#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108#undef __field
109#define __field(type, item)
110
111#undef __field_ext
112#define __field_ext(type, item, filter_type)
113
114#undef __array
115#define __array(type, item, len)
116
117#undef __dynamic_array
118#define __dynamic_array(type, item, len) u32 item;
119
120#undef __string
121#define __string(item, src) __dynamic_array(char, item, -1)
122
123#undef DECLARE_EVENT_CLASS
124#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
125 struct ftrace_data_offsets_##call { \
126 tstruct; \
127 };
128
129#undef DEFINE_EVENT
130#define DEFINE_EVENT(template, name, proto, args)
131
132#undef DEFINE_EVENT_PRINT
133#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
134 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
135
136#undef TRACE_EVENT_FLAGS
137#define TRACE_EVENT_FLAGS(event, flag)
138
139#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179#undef __entry
180#define __entry field
181
182#undef TP_printk
183#define TP_printk(fmt, args...) fmt "\n", args
184
185#undef __get_dynamic_array
186#define __get_dynamic_array(field) \
187 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
188
189#undef __get_str
190#define __get_str(field) (char *)__get_dynamic_array(field)
191
192#undef __print_flags
193#define __print_flags(flag, delim, flag_array...) \
194 ({ \
195 static const struct trace_print_flags __flags[] = \
196 { flag_array, { -1, NULL }}; \
197 ftrace_print_flags_seq(p, delim, flag, __flags); \
198 })
199
200#undef __print_symbolic
201#define __print_symbolic(value, symbol_array...) \
202 ({ \
203 static const struct trace_print_flags symbols[] = \
204 { symbol_array, { -1, NULL }}; \
205 ftrace_print_symbols_seq(p, value, symbols); \
206 })
207
208#undef __print_symbolic_u64
209#if BITS_PER_LONG == 32
210#define __print_symbolic_u64(value, symbol_array...) \
211 ({ \
212 static const struct trace_print_flags_u64 symbols[] = \
213 { symbol_array, { -1, NULL } }; \
214 ftrace_print_symbols_seq_u64(p, value, symbols); \
215 })
216#else
217#define __print_symbolic_u64(value, symbol_array...) \
218 __print_symbolic(value, symbol_array)
219#endif
220
221#undef __print_hex
222#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
223
224#undef DECLARE_EVENT_CLASS
225#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
226static notrace enum print_line_t \
227ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
228 struct trace_event *trace_event) \
229{ \
230 struct trace_seq *s = &iter->seq; \
231 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
232 struct ftrace_raw_##call *field; \
233 int ret; \
234 \
235 field = (typeof(field))iter->ent; \
236 \
237 ret = ftrace_raw_output_prep(iter, trace_event); \
238 if (ret) \
239 return ret; \
240 \
241 ret = trace_seq_printf(s, print); \
242 if (!ret) \
243 return TRACE_TYPE_PARTIAL_LINE; \
244 \
245 return TRACE_TYPE_HANDLED; \
246} \
247static struct trace_event_functions ftrace_event_type_funcs_##call = { \
248 .trace = ftrace_raw_output_##call, \
249};
250
251#undef DEFINE_EVENT_PRINT
252#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
253static notrace enum print_line_t \
254ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
255 struct trace_event *event) \
256{ \
257 struct trace_seq *s = &iter->seq; \
258 struct ftrace_raw_##template *field; \
259 struct trace_entry *entry; \
260 struct trace_seq *p = &iter->tmp_seq; \
261 int ret; \
262 \
263 entry = iter->ent; \
264 \
265 if (entry->type != event_##call.event.type) { \
266 WARN_ON_ONCE(1); \
267 return TRACE_TYPE_UNHANDLED; \
268 } \
269 \
270 field = (typeof(field))entry; \
271 \
272 trace_seq_init(p); \
273 ret = trace_seq_printf(s, "%s: ", #call); \
274 if (ret) \
275 ret = trace_seq_printf(s, print); \
276 if (!ret) \
277 return TRACE_TYPE_PARTIAL_LINE; \
278 \
279 return TRACE_TYPE_HANDLED; \
280} \
281static struct trace_event_functions ftrace_event_type_funcs_##call = { \
282 .trace = ftrace_raw_output_##call, \
283};
284
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286
287#undef __field_ext
288#define __field_ext(type, item, filter_type) \
289 ret = trace_define_field(event_call, #type, #item, \
290 offsetof(typeof(field), item), \
291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
293 if (ret) \
294 return ret;
295
296#undef __field
297#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
298
299#undef __array
300#define __array(type, item, len) \
301 do { \
302 mutex_lock(&event_storage_mutex); \
303 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
304 snprintf(event_storage, sizeof(event_storage), \
305 "%s[%d]", #type, len); \
306 ret = trace_define_field(event_call, event_storage, #item, \
307 offsetof(typeof(field), item), \
308 sizeof(field.item), \
309 is_signed_type(type), FILTER_OTHER); \
310 mutex_unlock(&event_storage_mutex); \
311 if (ret) \
312 return ret; \
313 } while (0);
314
315#undef __dynamic_array
316#define __dynamic_array(type, item, len) \
317 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
318 offsetof(typeof(field), __data_loc_##item), \
319 sizeof(field.__data_loc_##item), \
320 is_signed_type(type), FILTER_OTHER);
321
322#undef __string
323#define __string(item, src) __dynamic_array(char, item, -1)
324
325#undef DECLARE_EVENT_CLASS
326#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
327static int notrace __init \
328ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
329{ \
330 struct ftrace_raw_##call field; \
331 int ret; \
332 \
333 tstruct; \
334 \
335 return ret; \
336}
337
338#undef DEFINE_EVENT
339#define DEFINE_EVENT(template, name, proto, args)
340
341#undef DEFINE_EVENT_PRINT
342#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
343 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
344
345#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
346
347
348
349
350
351#undef __entry
352#define __entry entry
353
354#undef __field
355#define __field(type, item)
356
357#undef __field_ext
358#define __field_ext(type, item, filter_type)
359
360#undef __array
361#define __array(type, item, len)
362
363#undef __dynamic_array
364#define __dynamic_array(type, item, len) \
365 __data_offsets->item = __data_size + \
366 offsetof(typeof(*entry), __data); \
367 __data_offsets->item |= (len * sizeof(type)) << 16; \
368 __data_size += (len) * sizeof(type);
369
370#undef __string
371#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
372
373#undef DECLARE_EVENT_CLASS
374#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
375static inline notrace int ftrace_get_offsets_##call( \
376 struct ftrace_data_offsets_##call *__data_offsets, proto) \
377{ \
378 int __data_size = 0; \
379 struct ftrace_raw_##call __maybe_unused *entry; \
380 \
381 tstruct; \
382 \
383 return __data_size; \
384}
385
386#undef DEFINE_EVENT
387#define DEFINE_EVENT(template, name, proto, args)
388
389#undef DEFINE_EVENT_PRINT
390#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
391 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
392
393#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469#ifdef CONFIG_PERF_EVENTS
470
471#define _TRACE_PERF_PROTO(call, proto) \
472 static notrace void \
473 perf_trace_##call(void *__data, proto);
474
475#define _TRACE_PERF_INIT(call) \
476 .perf_probe = perf_trace_##call,
477
478#else
479#define _TRACE_PERF_PROTO(call, proto)
480#define _TRACE_PERF_INIT(call)
481#endif
482
483#undef __entry
484#define __entry entry
485
486#undef __field
487#define __field(type, item)
488
489#undef __array
490#define __array(type, item, len)
491
492#undef __dynamic_array
493#define __dynamic_array(type, item, len) \
494 __entry->__data_loc_##item = __data_offsets.item;
495
496#undef __string
497#define __string(item, src) __dynamic_array(char, item, -1) \
498
499#undef __assign_str
500#define __assign_str(dst, src) \
501 strcpy(__get_str(dst), src);
502
503#undef TP_fast_assign
504#define TP_fast_assign(args...) args
505
506#undef TP_perf_assign
507#define TP_perf_assign(args...)
508
509#undef DECLARE_EVENT_CLASS
510#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
511 \
512static notrace void \
513ftrace_raw_event_##call(void *__data, proto) \
514{ \
515 struct ftrace_event_file *ftrace_file = __data; \
516 struct ftrace_event_call *event_call = ftrace_file->event_call; \
517 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
518 struct ring_buffer_event *event; \
519 struct ftrace_raw_##call *entry; \
520 struct ring_buffer *buffer; \
521 unsigned long irq_flags; \
522 int __data_size; \
523 int pc; \
524 \
525 if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
526 &ftrace_file->flags)) \
527 return; \
528 \
529 local_save_flags(irq_flags); \
530 pc = preempt_count(); \
531 \
532 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
533 \
534 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
535 event_call->event.type, \
536 sizeof(*entry) + __data_size, \
537 irq_flags, pc); \
538 if (!event) \
539 return; \
540 entry = ring_buffer_event_data(event); \
541 \
542 tstruct \
543 \
544 { assign; } \
545 \
546 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
547 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
548}
549
550
551
552
553
554
555#undef DEFINE_EVENT
556#define DEFINE_EVENT(template, call, proto, args) \
557static inline void ftrace_test_probe_##call(void) \
558{ \
559 check_trace_callback_type_##call(ftrace_raw_event_##template); \
560}
561
562#undef DEFINE_EVENT_PRINT
563#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
564
565#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
566
567#undef __entry
568#define __entry REC
569
570#undef __print_flags
571#undef __print_symbolic
572#undef __print_hex
573#undef __get_dynamic_array
574#undef __get_str
575
576#undef TP_printk
577#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
578
579#undef DECLARE_EVENT_CLASS
580#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
581_TRACE_PERF_PROTO(call, PARAMS(proto)); \
582static const char print_fmt_##call[] = print; \
583static struct ftrace_event_class __used __refdata event_class_##call = { \
584 .system = __stringify(TRACE_SYSTEM), \
585 .define_fields = ftrace_define_fields_##call, \
586 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
587 .raw_init = trace_event_raw_init, \
588 .probe = ftrace_raw_event_##call, \
589 .reg = ftrace_event_reg, \
590 _TRACE_PERF_INIT(call) \
591};
592
593#undef DEFINE_EVENT
594#define DEFINE_EVENT(template, call, proto, args) \
595 \
596static struct ftrace_event_call __used event_##call = { \
597 .name = #call, \
598 .class = &event_class_##template, \
599 .event.funcs = &ftrace_event_type_funcs_##template, \
600 .print_fmt = print_fmt_##template, \
601}; \
602static struct ftrace_event_call __used \
603__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
604
605#undef DEFINE_EVENT_PRINT
606#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
607 \
608static const char print_fmt_##call[] = print; \
609 \
610static struct ftrace_event_call __used event_##call = { \
611 .name = #call, \
612 .class = &event_class_##template, \
613 .event.funcs = &ftrace_event_type_funcs_##call, \
614 .print_fmt = print_fmt_##call, \
615}; \
616static struct ftrace_event_call __used \
617__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
618
619#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
620
621
622#ifdef CONFIG_PERF_EVENTS
623
624#undef __entry
625#define __entry entry
626
627#undef __get_dynamic_array
628#define __get_dynamic_array(field) \
629 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
630
631#undef __get_str
632#define __get_str(field) (char *)__get_dynamic_array(field)
633
634#undef __perf_addr
635#define __perf_addr(a) __addr = (a)
636
637#undef __perf_count
638#define __perf_count(c) __count = (c)
639
640#undef __perf_task
641#define __perf_task(t) __task = (t)
642
643#undef TP_perf_assign
644#define TP_perf_assign(args...) args
645
646#undef DECLARE_EVENT_CLASS
647#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
648static notrace void \
649perf_trace_##call(void *__data, proto) \
650{ \
651 struct ftrace_event_call *event_call = __data; \
652 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
653 struct ftrace_raw_##call *entry; \
654 struct pt_regs __regs; \
655 u64 __addr = 0, __count = 1; \
656 struct task_struct *__task = NULL; \
657 struct hlist_head *head; \
658 int __entry_size; \
659 int __data_size; \
660 int rctx; \
661 \
662 perf_fetch_caller_regs(&__regs); \
663 \
664 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
665 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
666 sizeof(u64)); \
667 __entry_size -= sizeof(u32); \
668 \
669 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
670 "profile buffer not large enough")) \
671 return; \
672 \
673 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
674 __entry_size, event_call->event.type, &__regs, &rctx); \
675 if (!entry) \
676 return; \
677 \
678 tstruct \
679 \
680 { assign; } \
681 \
682 head = this_cpu_ptr(event_call->perf_events); \
683 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
684 __count, &__regs, head, __task); \
685}
686
687
688
689
690
691
692#undef DEFINE_EVENT
693#define DEFINE_EVENT(template, call, proto, args) \
694static inline void perf_test_probe_##call(void) \
695{ \
696 check_trace_callback_type_##call(perf_trace_##template); \
697}
698
699
700#undef DEFINE_EVENT_PRINT
701#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
702 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
703
704#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
705#endif
706
707