linux/include/trace/ftrace.h
<<
>>
Prefs
   1/*
   2 * Stage 1 of the trace events.
   3 *
   4 * Override the macros in <trace/trace_events.h> to include the following:
   5 *
   6 * struct ftrace_raw_<call> {
   7 *      struct trace_entry              ent;
   8 *      <type>                          <item>;
   9 *      <type2>                         <item2>[<len>];
  10 *      [...]
  11 * };
  12 *
  13 * The <type> <item> is created by the __field(type, item) macro or
  14 * the __array(type2, item2, len) macro.
  15 * We simply do "type item;", and that will create the fields
  16 * in the structure.
  17 */
  18
  19#include <linux/ftrace_event.h>
  20
  21#ifndef TRACE_SYSTEM_VAR
  22#define TRACE_SYSTEM_VAR TRACE_SYSTEM
  23#endif
  24
  25#define __app__(x, y) str__##x##y
  26#define __app(x, y) __app__(x, y)
  27
  28#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
  29
  30#define TRACE_MAKE_SYSTEM_STR()                         \
  31        static const char TRACE_SYSTEM_STRING[] =       \
  32                __stringify(TRACE_SYSTEM)
  33
  34TRACE_MAKE_SYSTEM_STR();
  35
  36#undef TRACE_DEFINE_ENUM
  37#define TRACE_DEFINE_ENUM(a)                            \
  38        static struct trace_enum_map __used __initdata  \
  39        __##TRACE_SYSTEM##_##a =                        \
  40        {                                               \
  41                .system = TRACE_SYSTEM_STRING,          \
  42                .enum_string = #a,                      \
  43                .enum_value = a                         \
  44        };                                              \
  45        static struct trace_enum_map __used             \
  46        __attribute__((section("_ftrace_enum_map")))    \
  47        *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  48
  49/*
  50 * DECLARE_EVENT_CLASS can be used to add a generic function
  51 * handlers for events. That is, if all events have the same
  52 * parameters and just have distinct trace points.
  53 * Each tracepoint can be defined with DEFINE_EVENT and that
  54 * will map the DECLARE_EVENT_CLASS to the tracepoint.
  55 *
  56 * TRACE_EVENT is a one to one mapping between tracepoint and template.
  57 */
  58#undef TRACE_EVENT
  59#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  60        DECLARE_EVENT_CLASS(name,                              \
  61                             PARAMS(proto),                    \
  62                             PARAMS(args),                     \
  63                             PARAMS(tstruct),                  \
  64                             PARAMS(assign),                   \
  65                             PARAMS(print));                   \
  66        DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  67
  68
  69#undef __field
  70#define __field(type, item)             type    item;
  71
  72#undef __field_ext
  73#define __field_ext(type, item, filter_type)    type    item;
  74
  75#undef __field_struct
  76#define __field_struct(type, item)      type    item;
  77
  78#undef __field_struct_ext
  79#define __field_struct_ext(type, item, filter_type)     type    item;
  80
  81#undef __array
  82#define __array(type, item, len)        type    item[len];
  83
  84#undef __dynamic_array
  85#define __dynamic_array(type, item, len) u32 __data_loc_##item;
  86
  87#undef __string
  88#define __string(item, src) __dynamic_array(char, item, -1)
  89
  90#undef TP_STRUCT__entry
  91#define TP_STRUCT__entry(args...) args
  92
  93#undef DECLARE_EVENT_CLASS
  94#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)  \
  95        struct ftrace_raw_##name {                                      \
  96                struct trace_entry      ent;                            \
  97                tstruct                                                 \
  98                char                    __data[0];                      \
  99        };                                                              \
 100                                                                        \
 101        static struct ftrace_event_class event_class_##name;
 102
 103#undef DEFINE_EVENT
 104#define DEFINE_EVENT(template, name, proto, args)       \
 105        static struct ftrace_event_call __used          \
 106        __attribute__((__aligned__(4))) event_##name
 107
 108#undef DEFINE_EVENT_FN
 109#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)        \
 110        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 111
 112#undef DEFINE_EVENT_PRINT
 113#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 114        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 115
 116/* Callbacks are meaningless to ftrace. */
 117#undef TRACE_EVENT_FN
 118#define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
 119                assign, print, reg, unreg)                              \
 120        TRACE_EVENT(name, PARAMS(proto), PARAMS(args),                  \
 121                PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
 122
 123#undef TRACE_EVENT_FLAGS
 124#define TRACE_EVENT_FLAGS(name, value)                                  \
 125        __TRACE_EVENT_FLAGS(name, value)
 126
 127#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 128
 129/*
 130 * Stage 2 of the trace events.
 131 *
 132 * Include the following:
 133 *
 134 * struct ftrace_data_offsets_<call> {
 135 *      u32                             <item1>;
 136 *      u32                             <item2>;
 137 *      [...]
 138 * };
 139 *
 140 * The __dynamic_array() macro will create each u32 <item>, this is
 141 * to keep the offset of each array from the beginning of the event.
 142 * The size of an array is also encoded, in the higher 16 bits of <item>.
 143 */
 144
 145#undef TRACE_DEFINE_ENUM
 146#define TRACE_DEFINE_ENUM(a)
 147
 148#undef __field
 149#define __field(type, item)
 150
 151#undef __field_ext
 152#define __field_ext(type, item, filter_type)
 153
 154#undef __field_struct
 155#define __field_struct(type, item)
 156
 157#undef __field_struct_ext
 158#define __field_struct_ext(type, item, filter_type)
 159
 160#undef __array
 161#define __array(type, item, len)
 162
 163#undef __dynamic_array
 164#define __dynamic_array(type, item, len)        u32 item;
 165
 166#undef __string
 167#define __string(item, src) __dynamic_array(char, item, -1)
 168
 169#undef DECLARE_EVENT_CLASS
 170#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 171        struct ftrace_data_offsets_##call {                             \
 172                tstruct;                                                \
 173        };
 174
 175#undef DEFINE_EVENT
 176#define DEFINE_EVENT(template, name, proto, args)
 177
 178#undef DEFINE_EVENT_PRINT
 179#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 180        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 181
 182#undef TRACE_EVENT_FLAGS
 183#define TRACE_EVENT_FLAGS(event, flag)
 184
 185#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 186
 187/*
 188 * Stage 3 of the trace events.
 189 *
 190 * Override the macros in <trace/trace_events.h> to include the following:
 191 *
 192 * enum print_line_t
 193 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 194 * {
 195 *      struct trace_seq *s = &iter->seq;
 196 *      struct ftrace_raw_<call> *field; <-- defined in stage 1
 197 *      struct trace_entry *entry;
 198 *      struct trace_seq *p = &iter->tmp_seq;
 199 *      int ret;
 200 *
 201 *      entry = iter->ent;
 202 *
 203 *      if (entry->type != event_<call>->event.type) {
 204 *              WARN_ON_ONCE(1);
 205 *              return TRACE_TYPE_UNHANDLED;
 206 *      }
 207 *
 208 *      field = (typeof(field))entry;
 209 *
 210 *      trace_seq_init(p);
 211 *      ret = trace_seq_printf(s, "%s: ", <call>);
 212 *      if (ret)
 213 *              ret = trace_seq_printf(s, <TP_printk> "\n");
 214 *      if (!ret)
 215 *              return TRACE_TYPE_PARTIAL_LINE;
 216 *
 217 *      return TRACE_TYPE_HANDLED;
 218 * }
 219 *
 220 * This is the method used to print the raw event to the trace
 221 * output format. Note, this is not needed if the data is read
 222 * in binary.
 223 */
 224
 225#undef __entry
 226#define __entry field
 227
 228#undef TP_printk
 229#define TP_printk(fmt, args...) fmt "\n", args
 230
 231#undef __get_dynamic_array
 232#define __get_dynamic_array(field)      \
 233                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 234
 235#undef __get_dynamic_array_len
 236#define __get_dynamic_array_len(field)  \
 237                ((__entry->__data_loc_##field >> 16) & 0xffff)
 238
 239#undef __get_str
 240#define __get_str(field) (char *)__get_dynamic_array(field)
 241
 242#undef __print_flags
 243#define __print_flags(flag, delim, flag_array...)                       \
 244        ({                                                              \
 245                static const struct trace_print_flags __flags[] =       \
 246                        { flag_array, { -1, NULL }};                    \
 247                ftrace_print_flags_seq(p, delim, flag, __flags);        \
 248        })
 249
 250#undef __print_symbolic
 251#define __print_symbolic(value, symbol_array...)                        \
 252        ({                                                              \
 253                static const struct trace_print_flags symbols[] =       \
 254                        { symbol_array, { -1, NULL }};                  \
 255                ftrace_print_symbols_seq(p, value, symbols);            \
 256        })
 257
 258#undef __print_flags_u64
 259#undef __print_symbolic_u64
 260#if BITS_PER_LONG == 32
 261#define __print_flags_u64(flag, delim, flag_array...)                   \
 262        ({                                                              \
 263                static const struct trace_print_flags_u64 __flags[] =   \
 264                        { flag_array, { -1, NULL } };                   \
 265                ftrace_print_flags_seq_u64(p, delim, flag, __flags);    \
 266        })
 267
 268#define __print_symbolic_u64(value, symbol_array...)                    \
 269        ({                                                              \
 270                static const struct trace_print_flags_u64 symbols[] =   \
 271                        { symbol_array, { -1, NULL } };                 \
 272                ftrace_print_symbols_seq_u64(p, value, symbols);        \
 273        })
 274#else
 275#define __print_flags_u64(flag, delim, flag_array...)                   \
 276                        __print_flags(flag, delim, flag_array)
 277
 278#define __print_symbolic_u64(value, symbol_array...)                    \
 279                        __print_symbolic(value, symbol_array)
 280#endif
 281
 282#undef __print_hex
 283#define __print_hex(buf, buf_len)                                       \
 284        ftrace_print_hex_seq(p, buf, buf_len, true)
 285
 286#undef __print_hex_str
 287#define __print_hex_str(buf, buf_len)                                   \
 288        ftrace_print_hex_seq(p, buf, buf_len, false)
 289
 290#undef __print_array
 291#define __print_array(array, count, el_size)                            \
 292        ({                                                              \
 293                BUILD_BUG_ON(el_size != 1 && el_size != 2 &&            \
 294                             el_size != 4 && el_size != 8);             \
 295                ftrace_print_array_seq(p, array, count, el_size);       \
 296        })
 297
 298#undef DECLARE_EVENT_CLASS
 299#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 300static notrace enum print_line_t                                        \
 301ftrace_raw_output_##call(struct trace_iterator *iter, int flags,        \
 302                         struct trace_event *trace_event)               \
 303{                                                                       \
 304        struct trace_seq *s = &iter->seq;                               \
 305        struct trace_seq __maybe_unused *p = &iter->tmp_seq;            \
 306        struct ftrace_raw_##call *field;                                \
 307        int ret;                                                        \
 308                                                                        \
 309        field = (typeof(field))iter->ent;                               \
 310                                                                        \
 311        ret = ftrace_raw_output_prep(iter, trace_event);                \
 312        if (ret != TRACE_TYPE_HANDLED)                                  \
 313                return ret;                                             \
 314                                                                        \
 315        trace_seq_printf(s, print);                                     \
 316                                                                        \
 317        return trace_handle_return(s);                                  \
 318}                                                                       \
 319static struct trace_event_functions ftrace_event_type_funcs_##call = {  \
 320        .trace                  = ftrace_raw_output_##call,             \
 321};
 322
 323#undef DEFINE_EVENT_PRINT
 324#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 325static notrace enum print_line_t                                        \
 326ftrace_raw_output_##call(struct trace_iterator *iter, int flags,        \
 327                         struct trace_event *event)                     \
 328{                                                                       \
 329        struct ftrace_raw_##template *field;                            \
 330        struct trace_entry *entry;                                      \
 331        struct trace_seq *p = &iter->tmp_seq;                           \
 332                                                                        \
 333        entry = iter->ent;                                              \
 334                                                                        \
 335        if (entry->type != event_##call.event.type) {                   \
 336                WARN_ON_ONCE(1);                                        \
 337                return TRACE_TYPE_UNHANDLED;                            \
 338        }                                                               \
 339                                                                        \
 340        field = (typeof(field))entry;                                   \
 341                                                                        \
 342        trace_seq_init(p);                                              \
 343        return ftrace_output_call(iter, #call, print);                  \
 344}                                                                       \
 345static struct trace_event_functions ftrace_event_type_funcs_##call = {  \
 346        .trace                  = ftrace_raw_output_##call,             \
 347};
 348
 349#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 350
 351#undef __field_ext
 352#define __field_ext(type, item, filter_type)                            \
 353        ret = trace_define_field(event_call, #type, #item,              \
 354                                 offsetof(typeof(field), item),         \
 355                                 sizeof(field.item),                    \
 356                                 is_signed_type(type), filter_type);    \
 357        if (ret)                                                        \
 358                return ret;
 359
 360#undef __field_struct_ext
 361#define __field_struct_ext(type, item, filter_type)                     \
 362        ret = trace_define_field(event_call, #type, #item,              \
 363                                 offsetof(typeof(field), item),         \
 364                                 sizeof(field.item),                    \
 365                                 0, filter_type);                       \
 366        if (ret)                                                        \
 367                return ret;
 368
 369#undef __field
 370#define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
 371
 372#undef __field_struct
 373#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
 374
 375#undef __array
 376#define __array(type, item, len)                                        \
 377        do {                                                            \
 378                char *type_str = #type"["__stringify(len)"]";           \
 379                BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
 380                ret = trace_define_field(event_call, type_str, #item,   \
 381                                 offsetof(typeof(field), item),         \
 382                                 sizeof(field.item),                    \
 383                                 is_signed_type(type), FILTER_OTHER);   \
 384                if (ret)                                                \
 385                        return ret;                                     \
 386        } while (0);
 387
 388#undef __dynamic_array
 389#define __dynamic_array(type, item, len)                                       \
 390        ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
 391                                 offsetof(typeof(field), __data_loc_##item),   \
 392                                 sizeof(field.__data_loc_##item),              \
 393                                 is_signed_type(type), FILTER_OTHER);
 394
 395#undef __string
 396#define __string(item, src) __dynamic_array(char, item, -1)
 397
 398#undef DECLARE_EVENT_CLASS
 399#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
 400static int notrace __init                                               \
 401ftrace_define_fields_##call(struct ftrace_event_call *event_call)       \
 402{                                                                       \
 403        struct ftrace_raw_##call field;                                 \
 404        int ret;                                                        \
 405                                                                        \
 406        tstruct;                                                        \
 407                                                                        \
 408        return ret;                                                     \
 409}
 410
 411#undef DEFINE_EVENT
 412#define DEFINE_EVENT(template, name, proto, args)
 413
 414#undef DEFINE_EVENT_PRINT
 415#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 416        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 417
 418#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 419
 420/*
 421 * remember the offset of each array from the beginning of the event.
 422 */
 423
 424#undef __entry
 425#define __entry entry
 426
 427#undef __field
 428#define __field(type, item)
 429
 430#undef __field_ext
 431#define __field_ext(type, item, filter_type)
 432
 433#undef __field_struct
 434#define __field_struct(type, item)
 435
 436#undef __field_struct_ext
 437#define __field_struct_ext(type, item, filter_type)
 438
 439#undef __array
 440#define __array(type, item, len)
 441
 442#undef __dynamic_array
 443#define __dynamic_array(type, item, len)                                \
 444        __data_offsets->item = __data_size +                            \
 445                               offsetof(typeof(*entry), __data);        \
 446        __data_offsets->item |= (len * sizeof(type)) << 16;             \
 447        __data_size += (len) * sizeof(type);
 448
 449#undef __string
 450#define __string(item, src) __dynamic_array(char, item,                 \
 451                    strlen((src) ? (const char *)(src) : "(null)") + 1)
 452
 453#undef DECLARE_EVENT_CLASS
 454#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 455static inline notrace int ftrace_get_offsets_##call(                    \
 456        struct ftrace_data_offsets_##call *__data_offsets, proto)       \
 457{                                                                       \
 458        int __data_size = 0;                                            \
 459        struct ftrace_raw_##call __maybe_unused *entry;                 \
 460                                                                        \
 461        tstruct;                                                        \
 462                                                                        \
 463        return __data_size;                                             \
 464}
 465
 466#undef DEFINE_EVENT
 467#define DEFINE_EVENT(template, name, proto, args)
 468
 469#undef DEFINE_EVENT_PRINT
 470#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 471        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 472
 473#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 474
 475/*
 476 * Stage 4 of the trace events.
 477 *
 478 * Override the macros in <trace/trace_events.h> to include the following:
 479 *
 480 * For those macros defined with TRACE_EVENT:
 481 *
 482 * static struct ftrace_event_call event_<call>;
 483 *
 484 * static void ftrace_raw_event_<call>(void *__data, proto)
 485 * {
 486 *      struct ftrace_event_file *ftrace_file = __data;
 487 *      struct ftrace_event_call *event_call = ftrace_file->event_call;
 488 *      struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 489 *      struct ring_buffer_event *event;
 490 *      struct ftrace_raw_<call> *entry; <-- defined in stage 1
 491 *      struct ring_buffer *buffer;
 492 *      unsigned long irq_flags;
 493 *      int __data_size;
 494 *      int pc;
 495 *
 496 *      if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
 497 *                   &ftrace_file->flags))
 498 *              return;
 499 *
 500 *      local_save_flags(irq_flags);
 501 *      pc = preempt_count();
 502 *
 503 *      __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
 504 *
 505 *      event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
 506 *                                event_<call>->event.type,
 507 *                                sizeof(*entry) + __data_size,
 508 *                                irq_flags, pc);
 509 *      if (!event)
 510 *              return;
 511 *      entry   = ring_buffer_event_data(event);
 512 *
 513 *      { <assign>; }  <-- Here we assign the entries by the __field and
 514 *                         __array macros.
 515 *
 516 *      if (!filter_current_check_discard(buffer, event_call, entry, event))
 517 *              trace_nowake_buffer_unlock_commit(buffer,
 518 *                                                 event, irq_flags, pc);
 519 * }
 520 *
 521 * static struct trace_event ftrace_event_type_<call> = {
 522 *      .trace                  = ftrace_raw_output_<call>, <-- stage 2
 523 * };
 524 *
 525 * static char print_fmt_<call>[] = <TP_printk>;
 526 *
 527 * static struct ftrace_event_class __used event_class_<template> = {
 528 *      .system                 = "<system>",
 529 *      .define_fields          = ftrace_define_fields_<call>,
 530 *      .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
 531 *      .raw_init               = trace_event_raw_init,
 532 *      .probe                  = ftrace_raw_event_##call,
 533 *      .reg                    = ftrace_event_reg,
 534 * };
 535 *
 536 * static struct ftrace_event_call event_<call> = {
 537 *      .name                   = "<call>",
 538 *      .class                  = event_class_<template>,
 539 *      .event                  = &ftrace_event_type_<call>,
 540 *      .print_fmt              = print_fmt_<call>,
 541 *      .flags                  = TRACE_EVENT_FL_TRACEPOINT,
 542 * };
 543 * // its only safe to use pointers when doing linker tricks to
 544 * // create an array.
 545 * static struct ftrace_event_call __used
 546 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
 547 *
 548 */
 549
 550#ifdef CONFIG_PERF_EVENTS
 551
 552#define _TRACE_PERF_PROTO(call, proto)                                  \
 553        static notrace void                                             \
 554        perf_trace_##call(void *__data, proto);
 555
 556#define _TRACE_PERF_INIT(call)                                          \
 557        .perf_probe             = perf_trace_##call,
 558
 559#else
 560#define _TRACE_PERF_PROTO(call, proto)
 561#define _TRACE_PERF_INIT(call)
 562#endif /* CONFIG_PERF_EVENTS */
 563
 564#undef __entry
 565#define __entry entry
 566
 567#undef __field
 568#define __field(type, item)
 569
 570#undef __field_struct
 571#define __field_struct(type, item)
 572
 573#undef __array
 574#define __array(type, item, len)
 575
 576#undef __dynamic_array
 577#define __dynamic_array(type, item, len)                                \
 578        __entry->__data_loc_##item = __data_offsets.item;
 579
 580#undef __string
 581#define __string(item, src) __dynamic_array(char, item, -1)             \
 582
 583#undef __assign_str
 584#define __assign_str(dst, src)                                          \
 585        strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
 586
 587#undef TP_fast_assign
 588#define TP_fast_assign(args...) args
 589
 590#undef TP_perf_assign
 591#define TP_perf_assign(args...)
 592
 593#undef DECLARE_EVENT_CLASS
 594#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 595                                                                        \
 596static notrace void                                                     \
 597ftrace_raw_event_##call(void *__data, proto)                            \
 598{                                                                       \
 599        struct ftrace_event_file *ftrace_file = __data;                 \
 600        struct ftrace_event_call *event_call = ftrace_file->event_call; \
 601        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
 602        struct ring_buffer_event *event;                                \
 603        struct ftrace_raw_##call *entry;                                \
 604        struct ring_buffer *buffer;                                     \
 605        unsigned long irq_flags;                                        \
 606        int __data_size;                                                \
 607        int pc;                                                         \
 608                                                                        \
 609        if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,                 \
 610                     &ftrace_file->flags))                              \
 611                return;                                                 \
 612                                                                        \
 613        local_save_flags(irq_flags);                                    \
 614        pc = preempt_count();                                           \
 615                                                                        \
 616        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
 617                                                                        \
 618        event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,   \
 619                                 event_call->event.type,                \
 620                                 sizeof(*entry) + __data_size,          \
 621                                 irq_flags, pc);                        \
 622        if (!event)                                                     \
 623                return;                                                 \
 624        entry   = ring_buffer_event_data(event);                        \
 625                                                                        \
 626        tstruct                                                         \
 627                                                                        \
 628        { assign; }                                                     \
 629                                                                        \
 630        if (!filter_current_check_discard(buffer, event_call, entry, event)) \
 631                trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \
 632}
 633/*
 634 * The ftrace_test_probe is compiled out, it is only here as a build time check
 635 * to make sure that if the tracepoint handling changes, the ftrace probe will
 636 * fail to compile unless it too is updated.
 637 */
 638
 639#undef DEFINE_EVENT
 640#define DEFINE_EVENT(template, call, proto, args)                       \
 641static inline void ftrace_test_probe_##call(void)                       \
 642{                                                                       \
 643        check_trace_callback_type_##call(ftrace_raw_event_##template);  \
 644}
 645
 646#undef DEFINE_EVENT_PRINT
 647#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
 648
 649#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 650
 651#undef __entry
 652#define __entry REC
 653
 654#undef __print_flags
 655#undef __print_symbolic
 656#undef __print_hex
 657#undef __print_hex_str
 658#undef __get_dynamic_array
 659#undef __get_dynamic_array_len
 660#undef __get_str
 661#undef __print_array
 662
 663#undef TP_printk
 664#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
 665
 666#undef DECLARE_EVENT_CLASS
 667#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 668_TRACE_PERF_PROTO(call, PARAMS(proto));                                 \
 669static char print_fmt_##call[] = print;                                 \
 670static struct ftrace_event_class __used __refdata event_class_##call = { \
 671        .system                 = TRACE_SYSTEM_STRING,                  \
 672        .define_fields          = ftrace_define_fields_##call,          \
 673        .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
 674        .raw_init               = trace_event_raw_init,                 \
 675        .probe                  = ftrace_raw_event_##call,              \
 676        .reg                    = ftrace_event_reg,                     \
 677        _TRACE_PERF_INIT(call)                                          \
 678};
 679
 680#undef DEFINE_EVENT
 681#define DEFINE_EVENT(template, call, proto, args)                       \
 682                                                                        \
 683static struct ftrace_event_call __used event_##call = {                 \
 684        .name                   = #call,                                \
 685        .class                  = &event_class_##template,              \
 686        .event.funcs            = &ftrace_event_type_funcs_##template,  \
 687        .print_fmt              = print_fmt_##template,                 \
 688        .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 689};                                                                      \
 690static struct ftrace_event_call __used                                  \
 691__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 692
 693#undef DEFINE_EVENT_PRINT
 694#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 695                                                                        \
 696static char print_fmt_##call[] = print;                                 \
 697                                                                        \
 698static struct ftrace_event_call __used event_##call = {                 \
 699        .name                   = #call,                                \
 700        .class                  = &event_class_##template,              \
 701        .event.funcs            = &ftrace_event_type_funcs_##call,      \
 702        .print_fmt              = print_fmt_##call,                     \
 703        .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 704};                                                                      \
 705static struct ftrace_event_call __used                                  \
 706__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 707
 708#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 709
 710#undef TRACE_SYSTEM_VAR
 711
 712#ifdef CONFIG_PERF_EVENTS
 713
 714#undef __entry
 715#define __entry entry
 716
 717#undef __get_dynamic_array
 718#define __get_dynamic_array(field)      \
 719                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 720
 721#undef __get_dynamic_array_len
 722#define __get_dynamic_array_len(field)  \
 723                ((__entry->__data_loc_##field >> 16) & 0xffff)
 724
 725#undef __get_str
 726#define __get_str(field) (char *)__get_dynamic_array(field)
 727
 728#undef __perf_count
 729#define __perf_count(c) __count = (c)
 730
 731#undef __perf_task
 732#define __perf_task(t) __task = (t)
 733
 734#undef TP_perf_assign
 735#define TP_perf_assign(args...) args
 736
 737#undef DECLARE_EVENT_CLASS
 738#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 739static notrace void                                                     \
 740perf_trace_##call(void *__data, proto)                                  \
 741{                                                                       \
 742        struct ftrace_event_call *event_call = __data;                  \
 743        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
 744        struct ftrace_raw_##call *entry;                                \
 745        struct pt_regs *__regs;                                         \
 746        u64 __count = 1;                                                \
 747        struct task_struct *__task = NULL;                              \
 748        struct hlist_head *head;                                        \
 749        int __entry_size;                                               \
 750        int __data_size;                                                \
 751        int rctx;                                                       \
 752                                                                        \
 753        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
 754                                                                        \
 755        head = this_cpu_ptr(event_call->perf_events);                   \
 756        if (!bpf_prog_array_valid(event_call) &&                        \
 757            __builtin_constant_p(!__task) && !__task &&                 \
 758            hlist_empty(head))                                          \
 759                return;                                                 \
 760                                                                        \
 761        __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
 762                             sizeof(u64));                              \
 763        __entry_size -= sizeof(u32);                                    \
 764                                                                        \
 765        if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,               \
 766                      "profile buffer not large enough"))               \
 767                return;                                                 \
 768                                                                        \
 769        entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(     \
 770                __entry_size, event_call->event.type, &__regs, &rctx);  \
 771        if (!entry)                                                     \
 772                return;                                                 \
 773                                                                        \
 774        perf_fetch_caller_regs(__regs);                                 \
 775                                                                        \
 776        tstruct                                                         \
 777                                                                        \
 778        { assign; }                                                     \
 779                                                                        \
 780        perf_trace_run_bpf_submit(entry, __entry_size, rctx, 0,         \
 781                                  event_call, __count, __regs,          \
 782                                  head, __task);                        \
 783}
 784
 785/*
 786 * This part is compiled out, it is only here as a build time check
 787 * to make sure that if the tracepoint handling changes, the
 788 * perf probe will fail to compile unless it too is updated.
 789 */
 790#undef DEFINE_EVENT
 791#define DEFINE_EVENT(template, call, proto, args)                       \
 792static inline void perf_test_probe_##call(void)                         \
 793{                                                                       \
 794        check_trace_callback_type_##call(perf_trace_##template);        \
 795}
 796
 797
 798#undef DEFINE_EVENT_PRINT
 799#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 800        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 801
 802#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 803#endif /* CONFIG_PERF_EVENTS */
 804
 805