1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/ftrace_event.h>
20
21#undef __field
22#define __field(type, item) type item;
23
24#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
27#undef __array
28#define __array(type, item, len) type item[len];
29
30#undef __dynamic_array
31#define __dynamic_array(type, item, len) u32 __data_loc_##item;
32
33#undef __string
34#define __string(item, src) __dynamic_array(char, item, -1)
35
36#undef TP_STRUCT__entry
37#define TP_STRUCT__entry(args...) args
38
39#undef TRACE_EVENT
40#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
41 struct ftrace_raw_##name { \
42 struct trace_entry ent; \
43 tstruct \
44 char __data[0]; \
45 }; \
46 static struct ftrace_event_call event_##name
47
48#undef __cpparg
49#define __cpparg(arg...) arg
50
51
52#undef TRACE_EVENT_FN
53#define TRACE_EVENT_FN(name, proto, args, tstruct, \
54 assign, print, reg, unreg) \
55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
57
58#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77#undef __field
78#define __field(type, item)
79
80#undef __field_ext
81#define __field_ext(type, item, filter_type)
82
83#undef __array
84#define __array(type, item, len)
85
86#undef __dynamic_array
87#define __dynamic_array(type, item, len) u32 item;
88
89#undef __string
90#define __string(item, src) __dynamic_array(char, item, -1)
91
92#undef TRACE_EVENT
93#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
94 struct ftrace_data_offsets_##call { \
95 tstruct; \
96 };
97
98#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#undef TP_STRUCT__entry
118#define TP_STRUCT__entry(args...) args
119
120#undef __field
121#define __field(type, item) \
122 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
123 "offset:%u;\tsize:%u;\n", \
124 (unsigned int)offsetof(typeof(field), item), \
125 (unsigned int)sizeof(field.item)); \
126 if (!ret) \
127 return 0;
128
129#undef __field_ext
130#define __field_ext(type, item, filter_type) __field(type, item)
131
132#undef __array
133#define __array(type, item, len) \
134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
135 "offset:%u;\tsize:%u;\n", \
136 (unsigned int)offsetof(typeof(field), item), \
137 (unsigned int)sizeof(field.item)); \
138 if (!ret) \
139 return 0;
140
141#undef __dynamic_array
142#define __dynamic_array(type, item, len) \
143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
144 "offset:%u;\tsize:%u;\n", \
145 (unsigned int)offsetof(typeof(field), \
146 __data_loc_##item), \
147 (unsigned int)sizeof(field.__data_loc_##item)); \
148 if (!ret) \
149 return 0;
150
151#undef __string
152#define __string(item, src) __dynamic_array(char, item, -1)
153
154#undef __entry
155#define __entry REC
156
157#undef __print_symbolic
158#undef __get_dynamic_array
159#undef __get_str
160
161#undef TP_printk
162#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
163
164#undef TP_fast_assign
165#define TP_fast_assign(args...) args
166
167#undef TP_perf_assign
168#define TP_perf_assign(args...)
169
170#undef TRACE_EVENT
171#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
172static int \
173ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
175{ \
176 struct ftrace_raw_##call field __attribute__((unused)); \
177 int ret = 0; \
178 \
179 tstruct; \
180 \
181 trace_seq_printf(s, "\nprint fmt: " print); \
182 \
183 return ret; \
184}
185
186#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226#undef __entry
227#define __entry field
228
229#undef TP_printk
230#define TP_printk(fmt, args...) fmt "\n", args
231
232#undef __get_dynamic_array
233#define __get_dynamic_array(field) \
234 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
235
236#undef __get_str
237#define __get_str(field) (char *)__get_dynamic_array(field)
238
239#undef __print_flags
240#define __print_flags(flag, delim, flag_array...) \
241 ({ \
242 static const struct trace_print_flags __flags[] = \
243 { flag_array, { -1, NULL }}; \
244 ftrace_print_flags_seq(p, delim, flag, __flags); \
245 })
246
247#undef __print_symbolic
248#define __print_symbolic(value, symbol_array...) \
249 ({ \
250 static const struct trace_print_flags symbols[] = \
251 { symbol_array, { -1, NULL }}; \
252 ftrace_print_symbols_seq(p, value, symbols); \
253 })
254
255#undef TRACE_EVENT
256#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
257static enum print_line_t \
258ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
259{ \
260 struct trace_seq *s = &iter->seq; \
261 struct ftrace_raw_##call *field; \
262 struct trace_entry *entry; \
263 struct trace_seq *p; \
264 int ret; \
265 \
266 entry = iter->ent; \
267 \
268 if (entry->type != event_##call.id) { \
269 WARN_ON_ONCE(1); \
270 return TRACE_TYPE_UNHANDLED; \
271 } \
272 \
273 field = (typeof(field))entry; \
274 \
275 p = &get_cpu_var(ftrace_event_seq); \
276 trace_seq_init(p); \
277 ret = trace_seq_printf(s, #call ": " print); \
278 put_cpu(); \
279 if (!ret) \
280 return TRACE_TYPE_PARTIAL_LINE; \
281 \
282 return TRACE_TYPE_HANDLED; \
283}
284
285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
286
287#undef __field_ext
288#define __field_ext(type, item, filter_type) \
289 ret = trace_define_field(event_call, #type, #item, \
290 offsetof(typeof(field), item), \
291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
293 if (ret) \
294 return ret;
295
296#undef __field
297#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
298
299#undef __array
300#define __array(type, item, len) \
301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
303 offsetof(typeof(field), item), \
304 sizeof(field.item), 0, FILTER_OTHER); \
305 if (ret) \
306 return ret;
307
308#undef __dynamic_array
309#define __dynamic_array(type, item, len) \
310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
311 offsetof(typeof(field), __data_loc_##item), \
312 sizeof(field.__data_loc_##item), 0, \
313 FILTER_OTHER);
314
315#undef __string
316#define __string(item, src) __dynamic_array(char, item, -1)
317
318#undef TRACE_EVENT
319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
320static int \
321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
322{ \
323 struct ftrace_raw_##call field; \
324 int ret; \
325 \
326 ret = trace_define_common_fields(event_call); \
327 if (ret) \
328 return ret; \
329 \
330 tstruct; \
331 \
332 return ret; \
333}
334
335#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
336
337
338
339
340
341#undef __entry
342#define __entry entry
343
344#undef __field
345#define __field(type, item)
346
347#undef __field_ext
348#define __field_ext(type, item, filter_type)
349
350#undef __array
351#define __array(type, item, len)
352
353#undef __dynamic_array
354#define __dynamic_array(type, item, len) \
355 __data_offsets->item = __data_size + \
356 offsetof(typeof(*entry), __data); \
357 __data_offsets->item |= (len * sizeof(type)) << 16; \
358 __data_size += (len) * sizeof(type);
359
360#undef __string
361#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
362
363#undef TRACE_EVENT
364#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
365static inline int ftrace_get_offsets_##call( \
366 struct ftrace_data_offsets_##call *__data_offsets, proto) \
367{ \
368 int __data_size = 0; \
369 struct ftrace_raw_##call __maybe_unused *entry; \
370 \
371 tstruct; \
372 \
373 return __data_size; \
374}
375
376#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
377
378#ifdef CONFIG_EVENT_PROFILE
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397#undef TRACE_EVENT
398#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
399 \
400static void ftrace_profile_##call(proto); \
401 \
402static int ftrace_profile_enable_##call(void) \
403{ \
404 return register_trace_##call(ftrace_profile_##call); \
405} \
406 \
407static void ftrace_profile_disable_##call(void) \
408{ \
409 unregister_trace_##call(ftrace_profile_##call); \
410}
411
412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
413
414#endif
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516#undef TP_FMT
517#define TP_FMT(fmt, args...) fmt "\n", ##args
518
519#ifdef CONFIG_EVENT_PROFILE
520
521#define _TRACE_PROFILE_INIT(call) \
522 .profile_count = ATOMIC_INIT(-1), \
523 .profile_enable = ftrace_profile_enable_##call, \
524 .profile_disable = ftrace_profile_disable_##call,
525
526#else
527#define _TRACE_PROFILE_INIT(call)
528#endif
529
530#undef __entry
531#define __entry entry
532
533#undef __field
534#define __field(type, item)
535
536#undef __array
537#define __array(type, item, len)
538
539#undef __dynamic_array
540#define __dynamic_array(type, item, len) \
541 __entry->__data_loc_##item = __data_offsets.item;
542
543#undef __string
544#define __string(item, src) __dynamic_array(char, item, -1) \
545
546#undef __assign_str
547#define __assign_str(dst, src) \
548 strcpy(__get_str(dst), src);
549
550#undef TRACE_EVENT
551#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
552 \
553static struct ftrace_event_call event_##call; \
554 \
555static void ftrace_raw_event_##call(proto) \
556{ \
557 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
558 struct ftrace_event_call *event_call = &event_##call; \
559 struct ring_buffer_event *event; \
560 struct ftrace_raw_##call *entry; \
561 struct ring_buffer *buffer; \
562 unsigned long irq_flags; \
563 int __data_size; \
564 int pc; \
565 \
566 local_save_flags(irq_flags); \
567 pc = preempt_count(); \
568 \
569 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
570 \
571 event = trace_current_buffer_lock_reserve(&buffer, \
572 event_##call.id, \
573 sizeof(*entry) + __data_size, \
574 irq_flags, pc); \
575 if (!event) \
576 return; \
577 entry = ring_buffer_event_data(event); \
578 \
579 \
580 tstruct \
581 \
582 { assign; } \
583 \
584 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
585 trace_nowake_buffer_unlock_commit(buffer, \
586 event, irq_flags, pc); \
587} \
588 \
589static int ftrace_raw_reg_event_##call(void *ptr) \
590{ \
591 int ret; \
592 \
593 ret = register_trace_##call(ftrace_raw_event_##call); \
594 if (ret) \
595 pr_info("event trace: Could not activate trace point " \
596 "probe to " #call "\n"); \
597 return ret; \
598} \
599 \
600static void ftrace_raw_unreg_event_##call(void *ptr) \
601{ \
602 unregister_trace_##call(ftrace_raw_event_##call); \
603} \
604 \
605static struct trace_event ftrace_event_type_##call = { \
606 .trace = ftrace_raw_output_##call, \
607}; \
608 \
609static int ftrace_raw_init_event_##call(void) \
610{ \
611 int id; \
612 \
613 id = register_ftrace_event(&ftrace_event_type_##call); \
614 if (!id) \
615 return -ENODEV; \
616 event_##call.id = id; \
617 INIT_LIST_HEAD(&event_##call.fields); \
618 return 0; \
619} \
620 \
621static struct ftrace_event_call __used \
622__attribute__((__aligned__(4))) \
623__attribute__((section("_ftrace_events"))) event_##call = { \
624 .name = #call, \
625 .system = __stringify(TRACE_SYSTEM), \
626 .event = &ftrace_event_type_##call, \
627 .raw_init = ftrace_raw_init_event_##call, \
628 .regfunc = ftrace_raw_reg_event_##call, \
629 .unregfunc = ftrace_raw_unreg_event_##call, \
630 .show_format = ftrace_format_##call, \
631 .define_fields = ftrace_define_fields_##call, \
632 _TRACE_PROFILE_INIT(call) \
633}
634
635#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699#ifdef CONFIG_EVENT_PROFILE
700
701#undef __perf_addr
702#define __perf_addr(a) __addr = (a)
703
704#undef __perf_count
705#define __perf_count(c) __count = (c)
706
707#undef TRACE_EVENT
708#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
709static void ftrace_profile_##call(proto) \
710{ \
711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
712 struct ftrace_event_call *event_call = &event_##call; \
713 extern void perf_tp_event(int, u64, u64, void *, int); \
714 struct ftrace_raw_##call *entry; \
715 u64 __addr = 0, __count = 1; \
716 unsigned long irq_flags; \
717 struct trace_entry *ent; \
718 int __entry_size; \
719 int __data_size; \
720 char *raw_data; \
721 int __cpu; \
722 int pc; \
723 \
724 pc = preempt_count(); \
725 \
726 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
727 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
728 sizeof(u64)); \
729 __entry_size -= sizeof(u32); \
730 \
731 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
732 "profile buffer not large enough")) \
733 return; \
734 \
735 local_irq_save(irq_flags); \
736 __cpu = smp_processor_id(); \
737 \
738 if (in_nmi()) \
739 raw_data = rcu_dereference(trace_profile_buf_nmi); \
740 else \
741 raw_data = rcu_dereference(trace_profile_buf); \
742 \
743 if (!raw_data) \
744 goto end; \
745 \
746 raw_data = per_cpu_ptr(raw_data, __cpu); \
747 \
748 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
749 entry = (struct ftrace_raw_##call *)raw_data; \
750 ent = &entry->ent; \
751 tracing_generic_entry_update(ent, irq_flags, pc); \
752 ent->type = event_call->id; \
753 \
754 tstruct \
755 \
756 { assign; } \
757 \
758 perf_tp_event(event_call->id, __addr, __count, entry, \
759 __entry_size); \
760 \
761end: \
762 local_irq_restore(irq_flags); \
763 \
764}
765
766#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
767#endif
768
769#undef _TRACE_PROFILE_INIT
770
771