linux/include/trace/trace_events.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Stage 1 of the trace events.
   4 *
   5 * Override the macros in <trace/trace_events.h> to include the following:
   6 *
   7 * struct trace_event_raw_<call> {
   8 *      struct trace_entry              ent;
   9 *      <type>                          <item>;
  10 *      <type2>                         <item2>[<len>];
  11 *      [...]
  12 * };
  13 *
  14 * The <type> <item> is created by the __field(type, item) macro or
  15 * the __array(type2, item2, len) macro.
  16 * We simply do "type item;", and that will create the fields
  17 * in the structure.
  18 */
  19
  20#include <linux/trace_events.h>
  21
  22#ifndef TRACE_SYSTEM_VAR
  23#define TRACE_SYSTEM_VAR TRACE_SYSTEM
  24#endif
  25
  26#define __app__(x, y) str__##x##y
  27#define __app(x, y) __app__(x, y)
  28
  29#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
  30
  31#define TRACE_MAKE_SYSTEM_STR()                         \
  32        static const char TRACE_SYSTEM_STRING[] =       \
  33                __stringify(TRACE_SYSTEM)
  34
  35TRACE_MAKE_SYSTEM_STR();
  36
  37#undef TRACE_DEFINE_ENUM
  38#define TRACE_DEFINE_ENUM(a)                            \
  39        static struct trace_eval_map __used __initdata  \
  40        __##TRACE_SYSTEM##_##a =                        \
  41        {                                               \
  42                .system = TRACE_SYSTEM_STRING,          \
  43                .eval_string = #a,                      \
  44                .eval_value = a                         \
  45        };                                              \
  46        static struct trace_eval_map __used             \
  47        __attribute__((section("_ftrace_eval_map")))    \
  48        *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  49
  50#undef TRACE_DEFINE_SIZEOF
  51#define TRACE_DEFINE_SIZEOF(a)                          \
  52        static struct trace_eval_map __used __initdata  \
  53        __##TRACE_SYSTEM##_##a =                        \
  54        {                                               \
  55                .system = TRACE_SYSTEM_STRING,          \
  56                .eval_string = "sizeof(" #a ")",        \
  57                .eval_value = sizeof(a)                 \
  58        };                                              \
  59        static struct trace_eval_map __used             \
  60        __attribute__((section("_ftrace_eval_map")))    \
  61        *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  62
  63/*
  64 * DECLARE_EVENT_CLASS can be used to add a generic function
  65 * handlers for events. That is, if all events have the same
  66 * parameters and just have distinct trace points.
  67 * Each tracepoint can be defined with DEFINE_EVENT and that
  68 * will map the DECLARE_EVENT_CLASS to the tracepoint.
  69 *
  70 * TRACE_EVENT is a one to one mapping between tracepoint and template.
  71 */
  72#undef TRACE_EVENT
  73#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  74        DECLARE_EVENT_CLASS(name,                              \
  75                             PARAMS(proto),                    \
  76                             PARAMS(args),                     \
  77                             PARAMS(tstruct),                  \
  78                             PARAMS(assign),                   \
  79                             PARAMS(print));                   \
  80        DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  81
  82
  83#undef __field
  84#define __field(type, item)             type    item;
  85
  86#undef __field_ext
  87#define __field_ext(type, item, filter_type)    type    item;
  88
  89#undef __field_struct
  90#define __field_struct(type, item)      type    item;
  91
  92#undef __field_struct_ext
  93#define __field_struct_ext(type, item, filter_type)     type    item;
  94
  95#undef __array
  96#define __array(type, item, len)        type    item[len];
  97
  98#undef __dynamic_array
  99#define __dynamic_array(type, item, len) u32 __data_loc_##item;
 100
 101#undef __string
 102#define __string(item, src) __dynamic_array(char, item, -1)
 103
 104#undef __bitmask
 105#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
 106
 107#undef TP_STRUCT__entry
 108#define TP_STRUCT__entry(args...) args
 109
 110#undef DECLARE_EVENT_CLASS
 111#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)  \
 112        struct trace_event_raw_##name {                                 \
 113                struct trace_entry      ent;                            \
 114                tstruct                                                 \
 115                char                    __data[0];                      \
 116        };                                                              \
 117                                                                        \
 118        static struct trace_event_class event_class_##name;
 119
 120#undef DEFINE_EVENT
 121#define DEFINE_EVENT(template, name, proto, args)       \
 122        static struct trace_event_call  __used          \
 123        __attribute__((__aligned__(4))) event_##name
 124
 125#undef DEFINE_EVENT_FN
 126#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)        \
 127        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 128
 129#undef DEFINE_EVENT_PRINT
 130#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 131        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 132
 133/* Callbacks are meaningless to ftrace. */
 134#undef TRACE_EVENT_FN
 135#define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
 136                assign, print, reg, unreg)                              \
 137        TRACE_EVENT(name, PARAMS(proto), PARAMS(args),                  \
 138                PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
 139
 140#undef TRACE_EVENT_FN_COND
 141#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct,   \
 142                assign, print, reg, unreg)                              \
 143        TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond),          \
 144                PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
 145
 146#undef TRACE_EVENT_FLAGS
 147#define TRACE_EVENT_FLAGS(name, value)                                  \
 148        __TRACE_EVENT_FLAGS(name, value)
 149
 150#undef TRACE_EVENT_PERF_PERM
 151#define TRACE_EVENT_PERF_PERM(name, expr...)                            \
 152        __TRACE_EVENT_PERF_PERM(name, expr)
 153
 154#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 155
 156/*
 157 * Stage 2 of the trace events.
 158 *
 159 * Include the following:
 160 *
 161 * struct trace_event_data_offsets_<call> {
 162 *      u32                             <item1>;
 163 *      u32                             <item2>;
 164 *      [...]
 165 * };
 166 *
 167 * The __dynamic_array() macro will create each u32 <item>, this is
 168 * to keep the offset of each array from the beginning of the event.
 169 * The size of an array is also encoded, in the higher 16 bits of <item>.
 170 */
 171
 172#undef TRACE_DEFINE_ENUM
 173#define TRACE_DEFINE_ENUM(a)
 174
 175#undef TRACE_DEFINE_SIZEOF
 176#define TRACE_DEFINE_SIZEOF(a)
 177
 178#undef __field
 179#define __field(type, item)
 180
 181#undef __field_ext
 182#define __field_ext(type, item, filter_type)
 183
 184#undef __field_struct
 185#define __field_struct(type, item)
 186
 187#undef __field_struct_ext
 188#define __field_struct_ext(type, item, filter_type)
 189
 190#undef __array
 191#define __array(type, item, len)
 192
 193#undef __dynamic_array
 194#define __dynamic_array(type, item, len)        u32 item;
 195
 196#undef __string
 197#define __string(item, src) __dynamic_array(char, item, -1)
 198
 199#undef __bitmask
 200#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 201
 202#undef DECLARE_EVENT_CLASS
 203#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 204        struct trace_event_data_offsets_##call {                        \
 205                tstruct;                                                \
 206        };
 207
 208#undef DEFINE_EVENT
 209#define DEFINE_EVENT(template, name, proto, args)
 210
 211#undef DEFINE_EVENT_PRINT
 212#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 213        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 214
 215#undef TRACE_EVENT_FLAGS
 216#define TRACE_EVENT_FLAGS(event, flag)
 217
 218#undef TRACE_EVENT_PERF_PERM
 219#define TRACE_EVENT_PERF_PERM(event, expr...)
 220
 221#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 222
 223/*
 224 * Stage 3 of the trace events.
 225 *
 226 * Override the macros in <trace/trace_events.h> to include the following:
 227 *
 228 * enum print_line_t
 229 * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
 230 * {
 231 *      struct trace_seq *s = &iter->seq;
 232 *      struct trace_event_raw_<call> *field; <-- defined in stage 1
 233 *      struct trace_entry *entry;
 234 *      struct trace_seq *p = &iter->tmp_seq;
 235 *      int ret;
 236 *
 237 *      entry = iter->ent;
 238 *
 239 *      if (entry->type != event_<call>->event.type) {
 240 *              WARN_ON_ONCE(1);
 241 *              return TRACE_TYPE_UNHANDLED;
 242 *      }
 243 *
 244 *      field = (typeof(field))entry;
 245 *
 246 *      trace_seq_init(p);
 247 *      ret = trace_seq_printf(s, "%s: ", <call>);
 248 *      if (ret)
 249 *              ret = trace_seq_printf(s, <TP_printk> "\n");
 250 *      if (!ret)
 251 *              return TRACE_TYPE_PARTIAL_LINE;
 252 *
 253 *      return TRACE_TYPE_HANDLED;
 254 * }
 255 *
 256 * This is the method used to print the raw event to the trace
 257 * output format. Note, this is not needed if the data is read
 258 * in binary.
 259 */
 260
 261#undef __entry
 262#define __entry field
 263
 264#undef TP_printk
 265#define TP_printk(fmt, args...) fmt "\n", args
 266
 267#undef __get_dynamic_array
 268#define __get_dynamic_array(field)      \
 269                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 270
 271#undef __get_dynamic_array_len
 272#define __get_dynamic_array_len(field)  \
 273                ((__entry->__data_loc_##field >> 16) & 0xffff)
 274
 275#undef __get_str
 276#define __get_str(field) ((char *)__get_dynamic_array(field))
 277
 278#undef __get_bitmask
 279#define __get_bitmask(field)                                            \
 280        ({                                                              \
 281                void *__bitmask = __get_dynamic_array(field);           \
 282                unsigned int __bitmask_size;                            \
 283                __bitmask_size = __get_dynamic_array_len(field);        \
 284                trace_print_bitmask_seq(p, __bitmask, __bitmask_size);  \
 285        })
 286
 287#undef __print_flags
 288#define __print_flags(flag, delim, flag_array...)                       \
 289        ({                                                              \
 290                static const struct trace_print_flags __flags[] =       \
 291                        { flag_array, { -1, NULL }};                    \
 292                trace_print_flags_seq(p, delim, flag, __flags); \
 293        })
 294
 295#undef __print_symbolic
 296#define __print_symbolic(value, symbol_array...)                        \
 297        ({                                                              \
 298                static const struct trace_print_flags symbols[] =       \
 299                        { symbol_array, { -1, NULL }};                  \
 300                trace_print_symbols_seq(p, value, symbols);             \
 301        })
 302
 303#undef __print_flags_u64
 304#undef __print_symbolic_u64
 305#if BITS_PER_LONG == 32
 306#define __print_flags_u64(flag, delim, flag_array...)                   \
 307        ({                                                              \
 308                static const struct trace_print_flags_u64 __flags[] =   \
 309                        { flag_array, { -1, NULL } };                   \
 310                trace_print_flags_seq_u64(p, delim, flag, __flags);     \
 311        })
 312
 313#define __print_symbolic_u64(value, symbol_array...)                    \
 314        ({                                                              \
 315                static const struct trace_print_flags_u64 symbols[] =   \
 316                        { symbol_array, { -1, NULL } };                 \
 317                trace_print_symbols_seq_u64(p, value, symbols); \
 318        })
 319#else
 320#define __print_flags_u64(flag, delim, flag_array...)                   \
 321                        __print_flags(flag, delim, flag_array)
 322
 323#define __print_symbolic_u64(value, symbol_array...)                    \
 324                        __print_symbolic(value, symbol_array)
 325#endif
 326
 327#undef __print_hex
 328#define __print_hex(buf, buf_len)                                       \
 329        trace_print_hex_seq(p, buf, buf_len, false)
 330
 331#undef __print_hex_str
 332#define __print_hex_str(buf, buf_len)                                   \
 333        trace_print_hex_seq(p, buf, buf_len, true)
 334
 335#undef __print_array
 336#define __print_array(array, count, el_size)                            \
 337        ({                                                              \
 338                BUILD_BUG_ON(el_size != 1 && el_size != 2 &&            \
 339                             el_size != 4 && el_size != 8);             \
 340                trace_print_array_seq(p, array, count, el_size);        \
 341        })
 342
 343#undef DECLARE_EVENT_CLASS
 344#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 345static notrace enum print_line_t                                        \
 346trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
 347                        struct trace_event *trace_event)                \
 348{                                                                       \
 349        struct trace_seq *s = &iter->seq;                               \
 350        struct trace_seq __maybe_unused *p = &iter->tmp_seq;            \
 351        struct trace_event_raw_##call *field;                           \
 352        int ret;                                                        \
 353                                                                        \
 354        field = (typeof(field))iter->ent;                               \
 355                                                                        \
 356        ret = trace_raw_output_prep(iter, trace_event);                 \
 357        if (ret != TRACE_TYPE_HANDLED)                                  \
 358                return ret;                                             \
 359                                                                        \
 360        trace_seq_printf(s, print);                                     \
 361                                                                        \
 362        return trace_handle_return(s);                                  \
 363}                                                                       \
 364static struct trace_event_functions trace_event_type_funcs_##call = {   \
 365        .trace                  = trace_raw_output_##call,              \
 366};
 367
 368#undef DEFINE_EVENT_PRINT
 369#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 370static notrace enum print_line_t                                        \
 371trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
 372                         struct trace_event *event)                     \
 373{                                                                       \
 374        struct trace_event_raw_##template *field;                       \
 375        struct trace_entry *entry;                                      \
 376        struct trace_seq *p = &iter->tmp_seq;                           \
 377                                                                        \
 378        entry = iter->ent;                                              \
 379                                                                        \
 380        if (entry->type != event_##call.event.type) {                   \
 381                WARN_ON_ONCE(1);                                        \
 382                return TRACE_TYPE_UNHANDLED;                            \
 383        }                                                               \
 384                                                                        \
 385        field = (typeof(field))entry;                                   \
 386                                                                        \
 387        trace_seq_init(p);                                              \
 388        return trace_output_call(iter, #call, print);                   \
 389}                                                                       \
 390static struct trace_event_functions trace_event_type_funcs_##call = {   \
 391        .trace                  = trace_raw_output_##call,              \
 392};
 393
 394#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 395
 396#undef __field_ext
 397#define __field_ext(type, item, filter_type)                            \
 398        ret = trace_define_field(event_call, #type, #item,              \
 399                                 offsetof(typeof(field), item),         \
 400                                 sizeof(field.item),                    \
 401                                 is_signed_type(type), filter_type);    \
 402        if (ret)                                                        \
 403                return ret;
 404
 405#undef __field_struct_ext
 406#define __field_struct_ext(type, item, filter_type)                     \
 407        ret = trace_define_field(event_call, #type, #item,              \
 408                                 offsetof(typeof(field), item),         \
 409                                 sizeof(field.item),                    \
 410                                 0, filter_type);                       \
 411        if (ret)                                                        \
 412                return ret;
 413
 414#undef __field
 415#define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
 416
 417#undef __field_struct
 418#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
 419
 420#undef __array
 421#define __array(type, item, len)                                        \
 422        do {                                                            \
 423                char *type_str = #type"["__stringify(len)"]";           \
 424                BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
 425                ret = trace_define_field(event_call, type_str, #item,   \
 426                                 offsetof(typeof(field), item),         \
 427                                 sizeof(field.item),                    \
 428                                 is_signed_type(type), FILTER_OTHER);   \
 429                if (ret)                                                \
 430                        return ret;                                     \
 431        } while (0);
 432
 433#undef __dynamic_array
 434#define __dynamic_array(type, item, len)                                       \
 435        ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
 436                                 offsetof(typeof(field), __data_loc_##item),   \
 437                                 sizeof(field.__data_loc_##item),              \
 438                                 is_signed_type(type), FILTER_OTHER);
 439
 440#undef __string
 441#define __string(item, src) __dynamic_array(char, item, -1)
 442
 443#undef __bitmask
 444#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 445
 446#undef DECLARE_EVENT_CLASS
 447#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
 448static int notrace __init                                               \
 449trace_event_define_fields_##call(struct trace_event_call *event_call)   \
 450{                                                                       \
 451        struct trace_event_raw_##call field;                            \
 452        int ret;                                                        \
 453                                                                        \
 454        tstruct;                                                        \
 455                                                                        \
 456        return ret;                                                     \
 457}
 458
 459#undef DEFINE_EVENT
 460#define DEFINE_EVENT(template, name, proto, args)
 461
 462#undef DEFINE_EVENT_PRINT
 463#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 464        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 465
 466#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 467
 468/*
 469 * remember the offset of each array from the beginning of the event.
 470 */
 471
 472#undef __entry
 473#define __entry entry
 474
 475#undef __field
 476#define __field(type, item)
 477
 478#undef __field_ext
 479#define __field_ext(type, item, filter_type)
 480
 481#undef __field_struct
 482#define __field_struct(type, item)
 483
 484#undef __field_struct_ext
 485#define __field_struct_ext(type, item, filter_type)
 486
 487#undef __array
 488#define __array(type, item, len)
 489
 490#undef __dynamic_array
 491#define __dynamic_array(type, item, len)                                \
 492        __item_length = (len) * sizeof(type);                           \
 493        __data_offsets->item = __data_size +                            \
 494                               offsetof(typeof(*entry), __data);        \
 495        __data_offsets->item |= __item_length << 16;                    \
 496        __data_size += __item_length;
 497
 498#undef __string
 499#define __string(item, src) __dynamic_array(char, item,                 \
 500                    strlen((src) ? (const char *)(src) : "(null)") + 1)
 501
 502/*
 503 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
 504 * num_possible_cpus().
 505 */
 506#define __bitmask_size_in_bytes_raw(nr_bits)    \
 507        (((nr_bits) + 7) / 8)
 508
 509#define __bitmask_size_in_longs(nr_bits)                        \
 510        ((__bitmask_size_in_bytes_raw(nr_bits) +                \
 511          ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
 512
 513/*
 514 * __bitmask_size_in_bytes is the number of bytes needed to hold
 515 * num_possible_cpus() padded out to the nearest long. This is what
 516 * is saved in the buffer, just to be consistent.
 517 */
 518#define __bitmask_size_in_bytes(nr_bits)                                \
 519        (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
 520
 521#undef __bitmask
 522#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item,   \
 523                                         __bitmask_size_in_longs(nr_bits))
 524
 525#undef DECLARE_EVENT_CLASS
 526#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 527static inline notrace int trace_event_get_offsets_##call(               \
 528        struct trace_event_data_offsets_##call *__data_offsets, proto)  \
 529{                                                                       \
 530        int __data_size = 0;                                            \
 531        int __maybe_unused __item_length;                               \
 532        struct trace_event_raw_##call __maybe_unused *entry;            \
 533                                                                        \
 534        tstruct;                                                        \
 535                                                                        \
 536        return __data_size;                                             \
 537}
 538
 539#undef DEFINE_EVENT
 540#define DEFINE_EVENT(template, name, proto, args)
 541
 542#undef DEFINE_EVENT_PRINT
 543#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 544        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 545
 546#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 547
 548/*
 549 * Stage 4 of the trace events.
 550 *
 551 * Override the macros in <trace/trace_events.h> to include the following:
 552 *
 553 * For those macros defined with TRACE_EVENT:
 554 *
 555 * static struct trace_event_call event_<call>;
 556 *
 557 * static void trace_event_raw_event_<call>(void *__data, proto)
 558 * {
 559 *      struct trace_event_file *trace_file = __data;
 560 *      struct trace_event_call *event_call = trace_file->event_call;
 561 *      struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
 562 *      unsigned long eflags = trace_file->flags;
 563 *      enum event_trigger_type __tt = ETT_NONE;
 564 *      struct ring_buffer_event *event;
 565 *      struct trace_event_raw_<call> *entry; <-- defined in stage 1
 566 *      struct ring_buffer *buffer;
 567 *      unsigned long irq_flags;
 568 *      int __data_size;
 569 *      int pc;
 570 *
 571 *      if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
 572 *              if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
 573 *                      event_triggers_call(trace_file, NULL);
 574 *              if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
 575 *                      return;
 576 *      }
 577 *
 578 *      local_save_flags(irq_flags);
 579 *      pc = preempt_count();
 580 *
 581 *      __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
 582 *
 583 *      event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 584 *                                event_<call>->event.type,
 585 *                                sizeof(*entry) + __data_size,
 586 *                                irq_flags, pc);
 587 *      if (!event)
 588 *              return;
 589 *      entry   = ring_buffer_event_data(event);
 590 *
 591 *      { <assign>; }  <-- Here we assign the entries by the __field and
 592 *                         __array macros.
 593 *
 594 *      if (eflags & EVENT_FILE_FL_TRIGGER_COND)
 595 *              __tt = event_triggers_call(trace_file, entry);
 596 *
 597 *      if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
 598 *                   &trace_file->flags))
 599 *              ring_buffer_discard_commit(buffer, event);
 600 *      else if (!filter_check_discard(trace_file, entry, buffer, event))
 601 *              trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
 602 *
 603 *      if (__tt)
 604 *              event_triggers_post_call(trace_file, __tt);
 605 * }
 606 *
 607 * static struct trace_event ftrace_event_type_<call> = {
 608 *      .trace                  = trace_raw_output_<call>, <-- stage 2
 609 * };
 610 *
 611 * static char print_fmt_<call>[] = <TP_printk>;
 612 *
 613 * static struct trace_event_class __used event_class_<template> = {
 614 *      .system                 = "<system>",
 615 *      .define_fields          = trace_event_define_fields_<call>,
 616 *      .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
 617 *      .raw_init               = trace_event_raw_init,
 618 *      .probe                  = trace_event_raw_event_##call,
 619 *      .reg                    = trace_event_reg,
 620 * };
 621 *
 622 * static struct trace_event_call event_<call> = {
 623 *      .class                  = event_class_<template>,
 624 *      {
 625 *              .tp                     = &__tracepoint_<call>,
 626 *      },
 627 *      .event                  = &ftrace_event_type_<call>,
 628 *      .print_fmt              = print_fmt_<call>,
 629 *      .flags                  = TRACE_EVENT_FL_TRACEPOINT,
 630 * };
 631 * // its only safe to use pointers when doing linker tricks to
 632 * // create an array.
 633 * static struct trace_event_call __used
 634 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
 635 *
 636 */
 637
 638#ifdef CONFIG_PERF_EVENTS
 639
 640#define _TRACE_PERF_PROTO(call, proto)                                  \
 641        static notrace void                                             \
 642        perf_trace_##call(void *__data, proto);
 643
 644#define _TRACE_PERF_INIT(call)                                          \
 645        .perf_probe             = perf_trace_##call,
 646
 647#else
 648#define _TRACE_PERF_PROTO(call, proto)
 649#define _TRACE_PERF_INIT(call)
 650#endif /* CONFIG_PERF_EVENTS */
 651
 652#undef __entry
 653#define __entry entry
 654
 655#undef __field
 656#define __field(type, item)
 657
 658#undef __field_struct
 659#define __field_struct(type, item)
 660
 661#undef __array
 662#define __array(type, item, len)
 663
 664#undef __dynamic_array
 665#define __dynamic_array(type, item, len)                                \
 666        __entry->__data_loc_##item = __data_offsets.item;
 667
 668#undef __string
 669#define __string(item, src) __dynamic_array(char, item, -1)
 670
 671#undef __assign_str
 672#define __assign_str(dst, src)                                          \
 673        strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
 674
 675#undef __bitmask
 676#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 677
 678#undef __get_bitmask
 679#define __get_bitmask(field) (char *)__get_dynamic_array(field)
 680
 681#undef __assign_bitmask
 682#define __assign_bitmask(dst, src, nr_bits)                                     \
 683        memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
 684
 685#undef TP_fast_assign
 686#define TP_fast_assign(args...) args
 687
 688#undef __perf_count
 689#define __perf_count(c) (c)
 690
 691#undef __perf_task
 692#define __perf_task(t)  (t)
 693
 694#undef DECLARE_EVENT_CLASS
 695#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 696                                                                        \
 697static notrace void                                                     \
 698trace_event_raw_event_##call(void *__data, proto)                       \
 699{                                                                       \
 700        struct trace_event_file *trace_file = __data;                   \
 701        struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
 702        struct trace_event_buffer fbuffer;                              \
 703        struct trace_event_raw_##call *entry;                           \
 704        int __data_size;                                                \
 705                                                                        \
 706        if (trace_trigger_soft_disabled(trace_file))                    \
 707                return;                                                 \
 708                                                                        \
 709        __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
 710                                                                        \
 711        entry = trace_event_buffer_reserve(&fbuffer, trace_file,        \
 712                                 sizeof(*entry) + __data_size);         \
 713                                                                        \
 714        if (!entry)                                                     \
 715                return;                                                 \
 716                                                                        \
 717        tstruct                                                         \
 718                                                                        \
 719        { assign; }                                                     \
 720                                                                        \
 721        trace_event_buffer_commit(&fbuffer);                            \
 722}
 723/*
 724 * The ftrace_test_probe is compiled out, it is only here as a build time check
 725 * to make sure that if the tracepoint handling changes, the ftrace probe will
 726 * fail to compile unless it too is updated.
 727 */
 728
 729#undef DEFINE_EVENT
 730#define DEFINE_EVENT(template, call, proto, args)                       \
 731static inline void ftrace_test_probe_##call(void)                       \
 732{                                                                       \
 733        check_trace_callback_type_##call(trace_event_raw_event_##template); \
 734}
 735
 736#undef DEFINE_EVENT_PRINT
 737#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
 738
 739#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 740
 741#undef __entry
 742#define __entry REC
 743
 744#undef __print_flags
 745#undef __print_symbolic
 746#undef __print_hex
 747#undef __print_hex_str
 748#undef __get_dynamic_array
 749#undef __get_dynamic_array_len
 750#undef __get_str
 751#undef __get_bitmask
 752#undef __print_array
 753
 754#undef TP_printk
 755#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
 756
 757#undef DECLARE_EVENT_CLASS
 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 759_TRACE_PERF_PROTO(call, PARAMS(proto));                                 \
 760static char print_fmt_##call[] = print;                                 \
 761static struct trace_event_class __used __refdata event_class_##call = { \
 762        .system                 = TRACE_SYSTEM_STRING,                  \
 763        .define_fields          = trace_event_define_fields_##call,     \
 764        .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
 765        .raw_init               = trace_event_raw_init,                 \
 766        .probe                  = trace_event_raw_event_##call,         \
 767        .reg                    = trace_event_reg,                      \
 768        _TRACE_PERF_INIT(call)                                          \
 769};
 770
 771#undef DEFINE_EVENT
 772#define DEFINE_EVENT(template, call, proto, args)                       \
 773                                                                        \
 774static struct trace_event_call __used event_##call = {                  \
 775        .class                  = &event_class_##template,              \
 776        {                                                               \
 777                .tp                     = &__tracepoint_##call,         \
 778        },                                                              \
 779        .event.funcs            = &trace_event_type_funcs_##template,   \
 780        .print_fmt              = print_fmt_##template,                 \
 781        .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 782};                                                                      \
 783static struct trace_event_call __used                                   \
 784__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 785
 786#undef DEFINE_EVENT_PRINT
 787#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 788                                                                        \
 789static char print_fmt_##call[] = print;                                 \
 790                                                                        \
 791static struct trace_event_call __used event_##call = {                  \
 792        .class                  = &event_class_##template,              \
 793        {                                                               \
 794                .tp                     = &__tracepoint_##call,         \
 795        },                                                              \
 796        .event.funcs            = &trace_event_type_funcs_##call,       \
 797        .print_fmt              = print_fmt_##call,                     \
 798        .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 799};                                                                      \
 800static struct trace_event_call __used                                   \
 801__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 802
 803#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 804