linux/include/trace/trace_events.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Stage 1 of the trace events.
   4 *
   5 * Override the macros in <trace/trace_events.h> to include the following:
   6 *
   7 * struct trace_event_raw_<call> {
   8 *      struct trace_entry              ent;
   9 *      <type>                          <item>;
  10 *      <type2>                         <item2>[<len>];
  11 *      [...]
  12 * };
  13 *
  14 * The <type> <item> is created by the __field(type, item) macro or
  15 * the __array(type2, item2, len) macro.
  16 * We simply do "type item;", and that will create the fields
  17 * in the structure.
  18 */
  19
  20#include <linux/trace_events.h>
  21
  22#ifndef TRACE_SYSTEM_VAR
  23#define TRACE_SYSTEM_VAR TRACE_SYSTEM
  24#endif
  25
  26#define __app__(x, y) str__##x##y
  27#define __app(x, y) __app__(x, y)
  28
  29#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
  30
  31#define TRACE_MAKE_SYSTEM_STR()                         \
  32        static const char TRACE_SYSTEM_STRING[] =       \
  33                __stringify(TRACE_SYSTEM)
  34
  35TRACE_MAKE_SYSTEM_STR();
  36
  37#undef TRACE_DEFINE_ENUM
  38#define TRACE_DEFINE_ENUM(a)                            \
  39        static struct trace_eval_map __used __initdata  \
  40        __##TRACE_SYSTEM##_##a =                        \
  41        {                                               \
  42                .system = TRACE_SYSTEM_STRING,          \
  43                .eval_string = #a,                      \
  44                .eval_value = a                         \
  45        };                                              \
  46        static struct trace_eval_map __used             \
  47        __attribute__((section("_ftrace_eval_map")))    \
  48        *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  49
  50#undef TRACE_DEFINE_SIZEOF
  51#define TRACE_DEFINE_SIZEOF(a)                          \
  52        static struct trace_eval_map __used __initdata  \
  53        __##TRACE_SYSTEM##_##a =                        \
  54        {                                               \
  55                .system = TRACE_SYSTEM_STRING,          \
  56                .eval_string = "sizeof(" #a ")",        \
  57                .eval_value = sizeof(a)                 \
  58        };                                              \
  59        static struct trace_eval_map __used             \
  60        __attribute__((section("_ftrace_eval_map")))    \
  61        *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
  62
  63/*
  64 * DECLARE_EVENT_CLASS can be used to add a generic function
  65 * handlers for events. That is, if all events have the same
  66 * parameters and just have distinct trace points.
  67 * Each tracepoint can be defined with DEFINE_EVENT and that
  68 * will map the DECLARE_EVENT_CLASS to the tracepoint.
  69 *
  70 * TRACE_EVENT is a one to one mapping between tracepoint and template.
  71 */
  72#undef TRACE_EVENT
  73#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  74        DECLARE_EVENT_CLASS(name,                              \
  75                             PARAMS(proto),                    \
  76                             PARAMS(args),                     \
  77                             PARAMS(tstruct),                  \
  78                             PARAMS(assign),                   \
  79                             PARAMS(print));                   \
  80        DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  81
  82
  83#undef __field
  84#define __field(type, item)             type    item;
  85
  86#undef __field_ext
  87#define __field_ext(type, item, filter_type)    type    item;
  88
  89#undef __field_struct
  90#define __field_struct(type, item)      type    item;
  91
  92#undef __field_struct_ext
  93#define __field_struct_ext(type, item, filter_type)     type    item;
  94
  95#undef __array
  96#define __array(type, item, len)        type    item[len];
  97
  98#undef __dynamic_array
  99#define __dynamic_array(type, item, len) u32 __data_loc_##item;
 100
 101#undef __string
 102#define __string(item, src) __dynamic_array(char, item, -1)
 103
 104#undef __bitmask
 105#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
 106
 107#undef TP_STRUCT__entry
 108#define TP_STRUCT__entry(args...) args
 109
 110#undef DECLARE_EVENT_CLASS
 111#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)  \
 112        struct trace_event_raw_##name {                                 \
 113                struct trace_entry      ent;                            \
 114                tstruct                                                 \
 115                char                    __data[0];                      \
 116        };                                                              \
 117                                                                        \
 118        static struct trace_event_class event_class_##name;
 119
 120#undef DEFINE_EVENT
 121#define DEFINE_EVENT(template, name, proto, args)       \
 122        static struct trace_event_call  __used          \
 123        __attribute__((__aligned__(4))) event_##name
 124
 125#undef DEFINE_EVENT_FN
 126#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)        \
 127        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 128
 129#undef DEFINE_EVENT_PRINT
 130#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 131        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 132
 133/* Callbacks are meaningless to ftrace. */
 134#undef TRACE_EVENT_FN
 135#define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
 136                assign, print, reg, unreg)                              \
 137        TRACE_EVENT(name, PARAMS(proto), PARAMS(args),                  \
 138                PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
 139
 140#undef TRACE_EVENT_FN_COND
 141#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct,   \
 142                assign, print, reg, unreg)                              \
 143        TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond),          \
 144                PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
 145
 146#undef TRACE_EVENT_FLAGS
 147#define TRACE_EVENT_FLAGS(name, value)                                  \
 148        __TRACE_EVENT_FLAGS(name, value)
 149
 150#undef TRACE_EVENT_PERF_PERM
 151#define TRACE_EVENT_PERF_PERM(name, expr...)                            \
 152        __TRACE_EVENT_PERF_PERM(name, expr)
 153
 154#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 155
 156/*
 157 * Stage 2 of the trace events.
 158 *
 159 * Include the following:
 160 *
 161 * struct trace_event_data_offsets_<call> {
 162 *      u32                             <item1>;
 163 *      u32                             <item2>;
 164 *      [...]
 165 * };
 166 *
 167 * The __dynamic_array() macro will create each u32 <item>, this is
 168 * to keep the offset of each array from the beginning of the event.
 169 * The size of an array is also encoded, in the higher 16 bits of <item>.
 170 */
 171
 172#undef TRACE_DEFINE_ENUM
 173#define TRACE_DEFINE_ENUM(a)
 174
 175#undef TRACE_DEFINE_SIZEOF
 176#define TRACE_DEFINE_SIZEOF(a)
 177
 178#undef __field
 179#define __field(type, item)
 180
 181#undef __field_ext
 182#define __field_ext(type, item, filter_type)
 183
 184#undef __field_struct
 185#define __field_struct(type, item)
 186
 187#undef __field_struct_ext
 188#define __field_struct_ext(type, item, filter_type)
 189
 190#undef __array
 191#define __array(type, item, len)
 192
 193#undef __dynamic_array
 194#define __dynamic_array(type, item, len)        u32 item;
 195
 196#undef __string
 197#define __string(item, src) __dynamic_array(char, item, -1)
 198
 199#undef __bitmask
 200#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 201
 202#undef DECLARE_EVENT_CLASS
 203#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 204        struct trace_event_data_offsets_##call {                        \
 205                tstruct;                                                \
 206        };
 207
 208#undef DEFINE_EVENT
 209#define DEFINE_EVENT(template, name, proto, args)
 210
 211#undef DEFINE_EVENT_PRINT
 212#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 213        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 214
 215#undef TRACE_EVENT_FLAGS
 216#define TRACE_EVENT_FLAGS(event, flag)
 217
 218#undef TRACE_EVENT_PERF_PERM
 219#define TRACE_EVENT_PERF_PERM(event, expr...)
 220
 221#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 222
 223/*
 224 * Stage 3 of the trace events.
 225 *
 226 * Override the macros in <trace/trace_events.h> to include the following:
 227 *
 228 * enum print_line_t
 229 * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
 230 * {
 231 *      struct trace_seq *s = &iter->seq;
 232 *      struct trace_event_raw_<call> *field; <-- defined in stage 1
 233 *      struct trace_entry *entry;
 234 *      struct trace_seq *p = &iter->tmp_seq;
 235 *      int ret;
 236 *
 237 *      entry = iter->ent;
 238 *
 239 *      if (entry->type != event_<call>->event.type) {
 240 *              WARN_ON_ONCE(1);
 241 *              return TRACE_TYPE_UNHANDLED;
 242 *      }
 243 *
 244 *      field = (typeof(field))entry;
 245 *
 246 *      trace_seq_init(p);
 247 *      ret = trace_seq_printf(s, "%s: ", <call>);
 248 *      if (ret)
 249 *              ret = trace_seq_printf(s, <TP_printk> "\n");
 250 *      if (!ret)
 251 *              return TRACE_TYPE_PARTIAL_LINE;
 252 *
 253 *      return TRACE_TYPE_HANDLED;
 254 * }
 255 *
 256 * This is the method used to print the raw event to the trace
 257 * output format. Note, this is not needed if the data is read
 258 * in binary.
 259 */
 260
 261#undef __entry
 262#define __entry field
 263
 264#undef TP_printk
 265#define TP_printk(fmt, args...) fmt "\n", args
 266
 267#undef __get_dynamic_array
 268#define __get_dynamic_array(field)      \
 269                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 270
 271#undef __get_dynamic_array_len
 272#define __get_dynamic_array_len(field)  \
 273                ((__entry->__data_loc_##field >> 16) & 0xffff)
 274
 275#undef __get_str
 276#define __get_str(field) ((char *)__get_dynamic_array(field))
 277
 278#undef __get_bitmask
 279#define __get_bitmask(field)                                            \
 280        ({                                                              \
 281                void *__bitmask = __get_dynamic_array(field);           \
 282                unsigned int __bitmask_size;                            \
 283                __bitmask_size = __get_dynamic_array_len(field);        \
 284                trace_print_bitmask_seq(p, __bitmask, __bitmask_size);  \
 285        })
 286
 287#undef __print_flags
 288#define __print_flags(flag, delim, flag_array...)                       \
 289        ({                                                              \
 290                static const struct trace_print_flags __flags[] =       \
 291                        { flag_array, { -1, NULL }};                    \
 292                trace_print_flags_seq(p, delim, flag, __flags); \
 293        })
 294
 295#undef __print_symbolic
 296#define __print_symbolic(value, symbol_array...)                        \
 297        ({                                                              \
 298                static const struct trace_print_flags symbols[] =       \
 299                        { symbol_array, { -1, NULL }};                  \
 300                trace_print_symbols_seq(p, value, symbols);             \
 301        })
 302
 303#undef __print_flags_u64
 304#undef __print_symbolic_u64
 305#if BITS_PER_LONG == 32
 306#define __print_flags_u64(flag, delim, flag_array...)                   \
 307        ({                                                              \
 308                static const struct trace_print_flags_u64 __flags[] =   \
 309                        { flag_array, { -1, NULL } };                   \
 310                trace_print_flags_seq_u64(p, delim, flag, __flags);     \
 311        })
 312
 313#define __print_symbolic_u64(value, symbol_array...)                    \
 314        ({                                                              \
 315                static const struct trace_print_flags_u64 symbols[] =   \
 316                        { symbol_array, { -1, NULL } };                 \
 317                trace_print_symbols_seq_u64(p, value, symbols); \
 318        })
 319#else
 320#define __print_flags_u64(flag, delim, flag_array...)                   \
 321                        __print_flags(flag, delim, flag_array)
 322
 323#define __print_symbolic_u64(value, symbol_array...)                    \
 324                        __print_symbolic(value, symbol_array)
 325#endif
 326
 327#undef __print_hex
 328#define __print_hex(buf, buf_len)                                       \
 329        trace_print_hex_seq(p, buf, buf_len, false)
 330
 331#undef __print_hex_str
 332#define __print_hex_str(buf, buf_len)                                   \
 333        trace_print_hex_seq(p, buf, buf_len, true)
 334
 335#undef __print_array
 336#define __print_array(array, count, el_size)                            \
 337        ({                                                              \
 338                BUILD_BUG_ON(el_size != 1 && el_size != 2 &&            \
 339                             el_size != 4 && el_size != 8);             \
 340                trace_print_array_seq(p, array, count, el_size);        \
 341        })
 342
 343#undef DECLARE_EVENT_CLASS
 344#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 345static notrace enum print_line_t                                        \
 346trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
 347                        struct trace_event *trace_event)                \
 348{                                                                       \
 349        struct trace_seq *s = &iter->seq;                               \
 350        struct trace_seq __maybe_unused *p = &iter->tmp_seq;            \
 351        struct trace_event_raw_##call *field;                           \
 352        int ret;                                                        \
 353                                                                        \
 354        field = (typeof(field))iter->ent;                               \
 355                                                                        \
 356        ret = trace_raw_output_prep(iter, trace_event);                 \
 357        if (ret != TRACE_TYPE_HANDLED)                                  \
 358                return ret;                                             \
 359                                                                        \
 360        trace_seq_printf(s, print);                                     \
 361                                                                        \
 362        return trace_handle_return(s);                                  \
 363}                                                                       \
 364static struct trace_event_functions trace_event_type_funcs_##call = {   \
 365        .trace                  = trace_raw_output_##call,              \
 366};
 367
 368#undef DEFINE_EVENT_PRINT
 369#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 370static notrace enum print_line_t                                        \
 371trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
 372                         struct trace_event *event)                     \
 373{                                                                       \
 374        struct trace_event_raw_##template *field;                       \
 375        struct trace_entry *entry;                                      \
 376        struct trace_seq *p = &iter->tmp_seq;                           \
 377                                                                        \
 378        entry = iter->ent;                                              \
 379                                                                        \
 380        if (entry->type != event_##call.event.type) {                   \
 381                WARN_ON_ONCE(1);                                        \
 382                return TRACE_TYPE_UNHANDLED;                            \
 383        }                                                               \
 384                                                                        \
 385        field = (typeof(field))entry;                                   \
 386                                                                        \
 387        trace_seq_init(p);                                              \
 388        return trace_output_call(iter, #call, print);                   \
 389}                                                                       \
 390static struct trace_event_functions trace_event_type_funcs_##call = {   \
 391        .trace                  = trace_raw_output_##call,              \
 392};
 393
 394#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 395
 396#undef __field_ext
 397#define __field_ext(type, item, filter_type)                            \
 398        ret = trace_define_field(event_call, #type, #item,              \
 399                                 offsetof(typeof(field), item),         \
 400                                 sizeof(field.item),                    \
 401                                 is_signed_type(type), filter_type);    \
 402        if (ret)                                                        \
 403                return ret;
 404
 405#undef __field_struct_ext
 406#define __field_struct_ext(type, item, filter_type)                     \
 407        ret = trace_define_field(event_call, #type, #item,              \
 408                                 offsetof(typeof(field), item),         \
 409                                 sizeof(field.item),                    \
 410                                 0, filter_type);                       \
 411        if (ret)                                                        \
 412                return ret;
 413
 414#undef __field
 415#define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
 416
 417#undef __field_struct
 418#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
 419
 420#undef __array
 421#define __array(type, item, len)                                        \
 422        do {                                                            \
 423                char *type_str = #type"["__stringify(len)"]";           \
 424                BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
 425                BUILD_BUG_ON(len <= 0);                                 \
 426                ret = trace_define_field(event_call, type_str, #item,   \
 427                                 offsetof(typeof(field), item),         \
 428                                 sizeof(field.item),                    \
 429                                 is_signed_type(type), FILTER_OTHER);   \
 430                if (ret)                                                \
 431                        return ret;                                     \
 432        } while (0);
 433
 434#undef __dynamic_array
 435#define __dynamic_array(type, item, len)                                       \
 436        ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
 437                                 offsetof(typeof(field), __data_loc_##item),   \
 438                                 sizeof(field.__data_loc_##item),              \
 439                                 is_signed_type(type), FILTER_OTHER);
 440
 441#undef __string
 442#define __string(item, src) __dynamic_array(char, item, -1)
 443
 444#undef __bitmask
 445#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 446
 447#undef DECLARE_EVENT_CLASS
 448#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
 449static int notrace __init                                               \
 450trace_event_define_fields_##call(struct trace_event_call *event_call)   \
 451{                                                                       \
 452        struct trace_event_raw_##call field;                            \
 453        int ret;                                                        \
 454                                                                        \
 455        tstruct;                                                        \
 456                                                                        \
 457        return ret;                                                     \
 458}
 459
 460#undef DEFINE_EVENT
 461#define DEFINE_EVENT(template, name, proto, args)
 462
 463#undef DEFINE_EVENT_PRINT
 464#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 465        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 466
 467#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 468
 469/*
 470 * remember the offset of each array from the beginning of the event.
 471 */
 472
 473#undef __entry
 474#define __entry entry
 475
 476#undef __field
 477#define __field(type, item)
 478
 479#undef __field_ext
 480#define __field_ext(type, item, filter_type)
 481
 482#undef __field_struct
 483#define __field_struct(type, item)
 484
 485#undef __field_struct_ext
 486#define __field_struct_ext(type, item, filter_type)
 487
 488#undef __array
 489#define __array(type, item, len)
 490
 491#undef __dynamic_array
 492#define __dynamic_array(type, item, len)                                \
 493        __item_length = (len) * sizeof(type);                           \
 494        __data_offsets->item = __data_size +                            \
 495                               offsetof(typeof(*entry), __data);        \
 496        __data_offsets->item |= __item_length << 16;                    \
 497        __data_size += __item_length;
 498
 499#undef __string
 500#define __string(item, src) __dynamic_array(char, item,                 \
 501                    strlen((src) ? (const char *)(src) : "(null)") + 1)
 502
 503/*
 504 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
 505 * num_possible_cpus().
 506 */
 507#define __bitmask_size_in_bytes_raw(nr_bits)    \
 508        (((nr_bits) + 7) / 8)
 509
 510#define __bitmask_size_in_longs(nr_bits)                        \
 511        ((__bitmask_size_in_bytes_raw(nr_bits) +                \
 512          ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
 513
 514/*
 515 * __bitmask_size_in_bytes is the number of bytes needed to hold
 516 * num_possible_cpus() padded out to the nearest long. This is what
 517 * is saved in the buffer, just to be consistent.
 518 */
 519#define __bitmask_size_in_bytes(nr_bits)                                \
 520        (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
 521
 522#undef __bitmask
 523#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item,   \
 524                                         __bitmask_size_in_longs(nr_bits))
 525
 526#undef DECLARE_EVENT_CLASS
 527#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 528static inline notrace int trace_event_get_offsets_##call(               \
 529        struct trace_event_data_offsets_##call *__data_offsets, proto)  \
 530{                                                                       \
 531        int __data_size = 0;                                            \
 532        int __maybe_unused __item_length;                               \
 533        struct trace_event_raw_##call __maybe_unused *entry;            \
 534                                                                        \
 535        tstruct;                                                        \
 536                                                                        \
 537        return __data_size;                                             \
 538}
 539
 540#undef DEFINE_EVENT
 541#define DEFINE_EVENT(template, name, proto, args)
 542
 543#undef DEFINE_EVENT_PRINT
 544#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 545        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 546
 547#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 548
 549/*
 550 * Stage 4 of the trace events.
 551 *
 552 * Override the macros in <trace/trace_events.h> to include the following:
 553 *
 554 * For those macros defined with TRACE_EVENT:
 555 *
 556 * static struct trace_event_call event_<call>;
 557 *
 558 * static void trace_event_raw_event_<call>(void *__data, proto)
 559 * {
 560 *      struct trace_event_file *trace_file = __data;
 561 *      struct trace_event_call *event_call = trace_file->event_call;
 562 *      struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
 563 *      unsigned long eflags = trace_file->flags;
 564 *      enum event_trigger_type __tt = ETT_NONE;
 565 *      struct ring_buffer_event *event;
 566 *      struct trace_event_raw_<call> *entry; <-- defined in stage 1
 567 *      struct ring_buffer *buffer;
 568 *      unsigned long irq_flags;
 569 *      int __data_size;
 570 *      int pc;
 571 *
 572 *      if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
 573 *              if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
 574 *                      event_triggers_call(trace_file, NULL);
 575 *              if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
 576 *                      return;
 577 *      }
 578 *
 579 *      local_save_flags(irq_flags);
 580 *      pc = preempt_count();
 581 *
 582 *      __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
 583 *
 584 *      event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 585 *                                event_<call>->event.type,
 586 *                                sizeof(*entry) + __data_size,
 587 *                                irq_flags, pc);
 588 *      if (!event)
 589 *              return;
 590 *      entry   = ring_buffer_event_data(event);
 591 *
 592 *      { <assign>; }  <-- Here we assign the entries by the __field and
 593 *                         __array macros.
 594 *
 595 *      if (eflags & EVENT_FILE_FL_TRIGGER_COND)
 596 *              __tt = event_triggers_call(trace_file, entry);
 597 *
 598 *      if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
 599 *                   &trace_file->flags))
 600 *              ring_buffer_discard_commit(buffer, event);
 601 *      else if (!filter_check_discard(trace_file, entry, buffer, event))
 602 *              trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
 603 *
 604 *      if (__tt)
 605 *              event_triggers_post_call(trace_file, __tt);
 606 * }
 607 *
 608 * static struct trace_event ftrace_event_type_<call> = {
 609 *      .trace                  = trace_raw_output_<call>, <-- stage 2
 610 * };
 611 *
 612 * static char print_fmt_<call>[] = <TP_printk>;
 613 *
 614 * static struct trace_event_class __used event_class_<template> = {
 615 *      .system                 = "<system>",
 616 *      .define_fields          = trace_event_define_fields_<call>,
 617 *      .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
 618 *      .raw_init               = trace_event_raw_init,
 619 *      .probe                  = trace_event_raw_event_##call,
 620 *      .reg                    = trace_event_reg,
 621 * };
 622 *
 623 * static struct trace_event_call event_<call> = {
 624 *      .class                  = event_class_<template>,
 625 *      {
 626 *              .tp                     = &__tracepoint_<call>,
 627 *      },
 628 *      .event                  = &ftrace_event_type_<call>,
 629 *      .print_fmt              = print_fmt_<call>,
 630 *      .flags                  = TRACE_EVENT_FL_TRACEPOINT,
 631 * };
 632 * // its only safe to use pointers when doing linker tricks to
 633 * // create an array.
 634 * static struct trace_event_call __used
 635 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
 636 *
 637 */
 638
 639#ifdef CONFIG_PERF_EVENTS
 640
 641#define _TRACE_PERF_PROTO(call, proto)                                  \
 642        static notrace void                                             \
 643        perf_trace_##call(void *__data, proto);
 644
 645#define _TRACE_PERF_INIT(call)                                          \
 646        .perf_probe             = perf_trace_##call,
 647
 648#else
 649#define _TRACE_PERF_PROTO(call, proto)
 650#define _TRACE_PERF_INIT(call)
 651#endif /* CONFIG_PERF_EVENTS */
 652
 653#undef __entry
 654#define __entry entry
 655
 656#undef __field
 657#define __field(type, item)
 658
 659#undef __field_struct
 660#define __field_struct(type, item)
 661
 662#undef __array
 663#define __array(type, item, len)
 664
 665#undef __dynamic_array
 666#define __dynamic_array(type, item, len)                                \
 667        __entry->__data_loc_##item = __data_offsets.item;
 668
 669#undef __string
 670#define __string(item, src) __dynamic_array(char, item, -1)
 671
 672#undef __assign_str
 673#define __assign_str(dst, src)                                          \
 674        strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
 675
 676#undef __bitmask
 677#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
 678
 679#undef __get_bitmask
 680#define __get_bitmask(field) (char *)__get_dynamic_array(field)
 681
 682#undef __assign_bitmask
 683#define __assign_bitmask(dst, src, nr_bits)                                     \
 684        memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
 685
 686#undef TP_fast_assign
 687#define TP_fast_assign(args...) args
 688
 689#undef __perf_count
 690#define __perf_count(c) (c)
 691
 692#undef __perf_task
 693#define __perf_task(t)  (t)
 694
 695#undef DECLARE_EVENT_CLASS
 696#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 697                                                                        \
 698static notrace void                                                     \
 699trace_event_raw_event_##call(void *__data, proto)                       \
 700{                                                                       \
 701        struct trace_event_file *trace_file = __data;                   \
 702        struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
 703        struct trace_event_buffer fbuffer;                              \
 704        struct trace_event_raw_##call *entry;                           \
 705        int __data_size;                                                \
 706                                                                        \
 707        if (trace_trigger_soft_disabled(trace_file))                    \
 708                return;                                                 \
 709                                                                        \
 710        __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
 711                                                                        \
 712        entry = trace_event_buffer_reserve(&fbuffer, trace_file,        \
 713                                 sizeof(*entry) + __data_size);         \
 714                                                                        \
 715        if (!entry)                                                     \
 716                return;                                                 \
 717                                                                        \
 718        tstruct                                                         \
 719                                                                        \
 720        { assign; }                                                     \
 721                                                                        \
 722        trace_event_buffer_commit(&fbuffer);                            \
 723}
 724/*
 725 * The ftrace_test_probe is compiled out, it is only here as a build time check
 726 * to make sure that if the tracepoint handling changes, the ftrace probe will
 727 * fail to compile unless it too is updated.
 728 */
 729
 730#undef DEFINE_EVENT
 731#define DEFINE_EVENT(template, call, proto, args)                       \
 732static inline void ftrace_test_probe_##call(void)                       \
 733{                                                                       \
 734        check_trace_callback_type_##call(trace_event_raw_event_##template); \
 735}
 736
 737#undef DEFINE_EVENT_PRINT
 738#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
 739
 740#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 741
 742#undef __entry
 743#define __entry REC
 744
 745#undef __print_flags
 746#undef __print_symbolic
 747#undef __print_hex
 748#undef __print_hex_str
 749#undef __get_dynamic_array
 750#undef __get_dynamic_array_len
 751#undef __get_str
 752#undef __get_bitmask
 753#undef __print_array
 754
 755#undef TP_printk
 756#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
 757
 758#undef DECLARE_EVENT_CLASS
 759#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 760_TRACE_PERF_PROTO(call, PARAMS(proto));                                 \
 761static char print_fmt_##call[] = print;                                 \
 762static struct trace_event_class __used __refdata event_class_##call = { \
 763        .system                 = TRACE_SYSTEM_STRING,                  \
 764        .define_fields          = trace_event_define_fields_##call,     \
 765        .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
 766        .raw_init               = trace_event_raw_init,                 \
 767        .probe                  = trace_event_raw_event_##call,         \
 768        .reg                    = trace_event_reg,                      \
 769        _TRACE_PERF_INIT(call)                                          \
 770};
 771
 772#undef DEFINE_EVENT
 773#define DEFINE_EVENT(template, call, proto, args)                       \
 774                                                                        \
 775static struct trace_event_call __used event_##call = {                  \
 776        .class                  = &event_class_##template,              \
 777        {                                                               \
 778                .tp                     = &__tracepoint_##call,         \
 779        },                                                              \
 780        .event.funcs            = &trace_event_type_funcs_##template,   \
 781        .print_fmt              = print_fmt_##template,                 \
 782        .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 783};                                                                      \
 784static struct trace_event_call __used                                   \
 785__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 786
 787#undef DEFINE_EVENT_PRINT
 788#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 789                                                                        \
 790static char print_fmt_##call[] = print;                                 \
 791                                                                        \
 792static struct trace_event_call __used event_##call = {                  \
 793        .class                  = &event_class_##template,              \
 794        {                                                               \
 795                .tp                     = &__tracepoint_##call,         \
 796        },                                                              \
 797        .event.funcs            = &trace_event_type_funcs_##call,       \
 798        .print_fmt              = print_fmt_##call,                     \
 799        .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 800};                                                                      \
 801static struct trace_event_call __used                                   \
 802__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 803
 804#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 805