linux/include/trace/events/block.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#undef TRACE_SYSTEM
   3#define TRACE_SYSTEM block
   4
   5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
   6#define _TRACE_BLOCK_H
   7
   8#include <linux/blktrace_api.h>
   9#include <linux/blkdev.h>
  10#include <linux/buffer_head.h>
  11#include <linux/tracepoint.h>
  12
  13#define RWBS_LEN        8
  14
  15DECLARE_EVENT_CLASS(block_buffer,
  16
  17        TP_PROTO(struct buffer_head *bh),
  18
  19        TP_ARGS(bh),
  20
  21        TP_STRUCT__entry (
  22                __field(  dev_t,        dev                     )
  23                __field(  sector_t,     sector                  )
  24                __field(  size_t,       size                    )
  25        ),
  26
  27        TP_fast_assign(
  28                __entry->dev            = bh->b_bdev->bd_dev;
  29                __entry->sector         = bh->b_blocknr;
  30                __entry->size           = bh->b_size;
  31        ),
  32
  33        TP_printk("%d,%d sector=%llu size=%zu",
  34                MAJOR(__entry->dev), MINOR(__entry->dev),
  35                (unsigned long long)__entry->sector, __entry->size
  36        )
  37);
  38
  39/**
  40 * block_touch_buffer - mark a buffer accessed
  41 * @bh: buffer_head being touched
  42 *
  43 * Called from touch_buffer().
  44 */
  45DEFINE_EVENT(block_buffer, block_touch_buffer,
  46
  47        TP_PROTO(struct buffer_head *bh),
  48
  49        TP_ARGS(bh)
  50);
  51
  52/**
  53 * block_dirty_buffer - mark a buffer dirty
  54 * @bh: buffer_head being dirtied
  55 *
  56 * Called from mark_buffer_dirty().
  57 */
  58DEFINE_EVENT(block_buffer, block_dirty_buffer,
  59
  60        TP_PROTO(struct buffer_head *bh),
  61
  62        TP_ARGS(bh)
  63);
  64
  65/**
  66 * block_rq_requeue - place block IO request back on a queue
  67 * @q: queue holding operation
  68 * @rq: block IO operation request
  69 *
  70 * The block operation request @rq is being placed back into queue
  71 * @q.  For some reason the request was not completed and needs to be
  72 * put back in the queue.
  73 */
  74TRACE_EVENT(block_rq_requeue,
  75
  76        TP_PROTO(struct request_queue *q, struct request *rq),
  77
  78        TP_ARGS(q, rq),
  79
  80        TP_STRUCT__entry(
  81                __field(  dev_t,        dev                     )
  82                __field(  sector_t,     sector                  )
  83                __field(  unsigned int, nr_sector               )
  84                __array(  char,         rwbs,   RWBS_LEN        )
  85                __dynamic_array( char,  cmd,    1               )
  86        ),
  87
  88        TP_fast_assign(
  89                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
  90                __entry->sector    = blk_rq_trace_sector(rq);
  91                __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
  92
  93                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
  94                __get_str(cmd)[0] = '\0';
  95        ),
  96
  97        TP_printk("%d,%d %s (%s) %llu + %u [%d]",
  98                  MAJOR(__entry->dev), MINOR(__entry->dev),
  99                  __entry->rwbs, __get_str(cmd),
 100                  (unsigned long long)__entry->sector,
 101                  __entry->nr_sector, 0)
 102);
 103
 104/**
 105 * block_rq_complete - block IO operation completed by device driver
 106 * @rq: block operations request
 107 * @error: status code
 108 * @nr_bytes: number of completed bytes
 109 *
 110 * The block_rq_complete tracepoint event indicates that some portion
 111 * of operation request has been completed by the device driver.  If
 112 * the @rq->bio is %NULL, then there is absolutely no additional work to
 113 * do for the request. If @rq->bio is non-NULL then there is
 114 * additional work required to complete the request.
 115 */
 116TRACE_EVENT(block_rq_complete,
 117
 118        TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
 119
 120        TP_ARGS(rq, error, nr_bytes),
 121
 122        TP_STRUCT__entry(
 123                __field(  dev_t,        dev                     )
 124                __field(  sector_t,     sector                  )
 125                __field(  unsigned int, nr_sector               )
 126                __field(  int,          error                   )
 127                __array(  char,         rwbs,   RWBS_LEN        )
 128                __dynamic_array( char,  cmd,    1               )
 129        ),
 130
 131        TP_fast_assign(
 132                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 133                __entry->sector    = blk_rq_pos(rq);
 134                __entry->nr_sector = nr_bytes >> 9;
 135                __entry->error     = error;
 136
 137                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
 138                __get_str(cmd)[0] = '\0';
 139        ),
 140
 141        TP_printk("%d,%d %s (%s) %llu + %u [%d]",
 142                  MAJOR(__entry->dev), MINOR(__entry->dev),
 143                  __entry->rwbs, __get_str(cmd),
 144                  (unsigned long long)__entry->sector,
 145                  __entry->nr_sector, __entry->error)
 146);
 147
 148DECLARE_EVENT_CLASS(block_rq,
 149
 150        TP_PROTO(struct request_queue *q, struct request *rq),
 151
 152        TP_ARGS(q, rq),
 153
 154        TP_STRUCT__entry(
 155                __field(  dev_t,        dev                     )
 156                __field(  sector_t,     sector                  )
 157                __field(  unsigned int, nr_sector               )
 158                __field(  unsigned int, bytes                   )
 159                __array(  char,         rwbs,   RWBS_LEN        )
 160                __array(  char,         comm,   TASK_COMM_LEN   )
 161                __dynamic_array( char,  cmd,    1               )
 162        ),
 163
 164        TP_fast_assign(
 165                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 166                __entry->sector    = blk_rq_trace_sector(rq);
 167                __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
 168                __entry->bytes     = blk_rq_bytes(rq);
 169
 170                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 171                __get_str(cmd)[0] = '\0';
 172                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 173        ),
 174
 175        TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
 176                  MAJOR(__entry->dev), MINOR(__entry->dev),
 177                  __entry->rwbs, __entry->bytes, __get_str(cmd),
 178                  (unsigned long long)__entry->sector,
 179                  __entry->nr_sector, __entry->comm)
 180);
 181
 182/**
 183 * block_rq_insert - insert block operation request into queue
 184 * @q: target queue
 185 * @rq: block IO operation request
 186 *
 187 * Called immediately before block operation request @rq is inserted
 188 * into queue @q.  The fields in the operation request @rq struct can
 189 * be examined to determine which device and sectors the pending
 190 * operation would access.
 191 */
 192DEFINE_EVENT(block_rq, block_rq_insert,
 193
 194        TP_PROTO(struct request_queue *q, struct request *rq),
 195
 196        TP_ARGS(q, rq)
 197);
 198
 199/**
 200 * block_rq_issue - issue pending block IO request operation to device driver
 201 * @q: queue holding operation
 202 * @rq: block IO operation operation request
 203 *
 204 * Called when block operation request @rq from queue @q is sent to a
 205 * device driver for processing.
 206 */
 207DEFINE_EVENT(block_rq, block_rq_issue,
 208
 209        TP_PROTO(struct request_queue *q, struct request *rq),
 210
 211        TP_ARGS(q, rq)
 212);
 213
 214/**
 215 * block_bio_bounce - used bounce buffer when processing block operation
 216 * @q: queue holding the block operation
 217 * @bio: block operation
 218 *
 219 * A bounce buffer was used to handle the block operation @bio in @q.
 220 * This occurs when hardware limitations prevent a direct transfer of
 221 * data between the @bio data memory area and the IO device.  Use of a
 222 * bounce buffer requires extra copying of data and decreases
 223 * performance.
 224 */
 225TRACE_EVENT(block_bio_bounce,
 226
 227        TP_PROTO(struct request_queue *q, struct bio *bio),
 228
 229        TP_ARGS(q, bio),
 230
 231        TP_STRUCT__entry(
 232                __field( dev_t,         dev                     )
 233                __field( sector_t,      sector                  )
 234                __field( unsigned int,  nr_sector               )
 235                __array( char,          rwbs,   RWBS_LEN        )
 236                __array( char,          comm,   TASK_COMM_LEN   )
 237        ),
 238
 239        TP_fast_assign(
 240                __entry->dev            = bio_dev(bio);
 241                __entry->sector         = bio->bi_iter.bi_sector;
 242                __entry->nr_sector      = bio_sectors(bio);
 243                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 244                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 245        ),
 246
 247        TP_printk("%d,%d %s %llu + %u [%s]",
 248                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 249                  (unsigned long long)__entry->sector,
 250                  __entry->nr_sector, __entry->comm)
 251);
 252
 253/**
 254 * block_bio_complete - completed all work on the block operation
 255 * @q: queue holding the block operation
 256 * @bio: block operation completed
 257 *
 258 * This tracepoint indicates there is no further work to do on this
 259 * block IO operation @bio.
 260 */
 261TRACE_EVENT(block_bio_complete,
 262
 263        TP_PROTO(struct request_queue *q, struct bio *bio),
 264
 265        TP_ARGS(q, bio),
 266
 267        TP_STRUCT__entry(
 268                __field( dev_t,         dev             )
 269                __field( sector_t,      sector          )
 270                __field( unsigned,      nr_sector       )
 271                __field( int,           error           )
 272                __array( char,          rwbs,   RWBS_LEN)
 273        ),
 274
 275        TP_fast_assign(
 276                __entry->dev            = bio_dev(bio);
 277                __entry->sector         = bio->bi_iter.bi_sector;
 278                __entry->nr_sector      = bio_sectors(bio);
 279                __entry->error          = blk_status_to_errno(bio->bi_status);
 280                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 281        ),
 282
 283        TP_printk("%d,%d %s %llu + %u [%d]",
 284                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 285                  (unsigned long long)__entry->sector,
 286                  __entry->nr_sector, __entry->error)
 287);
 288
 289DECLARE_EVENT_CLASS(block_bio_merge,
 290
 291        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 292
 293        TP_ARGS(q, rq, bio),
 294
 295        TP_STRUCT__entry(
 296                __field( dev_t,         dev                     )
 297                __field( sector_t,      sector                  )
 298                __field( unsigned int,  nr_sector               )
 299                __array( char,          rwbs,   RWBS_LEN        )
 300                __array( char,          comm,   TASK_COMM_LEN   )
 301        ),
 302
 303        TP_fast_assign(
 304                __entry->dev            = bio_dev(bio);
 305                __entry->sector         = bio->bi_iter.bi_sector;
 306                __entry->nr_sector      = bio_sectors(bio);
 307                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 308                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 309        ),
 310
 311        TP_printk("%d,%d %s %llu + %u [%s]",
 312                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 313                  (unsigned long long)__entry->sector,
 314                  __entry->nr_sector, __entry->comm)
 315);
 316
 317/**
 318 * block_bio_backmerge - merging block operation to the end of an existing operation
 319 * @q: queue holding operation
 320 * @rq: request bio is being merged into
 321 * @bio: new block operation to merge
 322 *
 323 * Merging block request @bio to the end of an existing block request
 324 * in queue @q.
 325 */
 326DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
 327
 328        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 329
 330        TP_ARGS(q, rq, bio)
 331);
 332
 333/**
 334 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
 335 * @q: queue holding operation
 336 * @rq: request bio is being merged into
 337 * @bio: new block operation to merge
 338 *
 339 * Merging block IO operation @bio to the beginning of an existing block
 340 * operation in queue @q.
 341 */
 342DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
 343
 344        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 345
 346        TP_ARGS(q, rq, bio)
 347);
 348
 349/**
 350 * block_bio_queue - putting new block IO operation in queue
 351 * @q: queue holding operation
 352 * @bio: new block operation
 353 *
 354 * About to place the block IO operation @bio into queue @q.
 355 */
 356TRACE_EVENT(block_bio_queue,
 357
 358        TP_PROTO(struct request_queue *q, struct bio *bio),
 359
 360        TP_ARGS(q, bio),
 361
 362        TP_STRUCT__entry(
 363                __field( dev_t,         dev                     )
 364                __field( sector_t,      sector                  )
 365                __field( unsigned int,  nr_sector               )
 366                __array( char,          rwbs,   RWBS_LEN        )
 367                __array( char,          comm,   TASK_COMM_LEN   )
 368        ),
 369
 370        TP_fast_assign(
 371                __entry->dev            = bio_dev(bio);
 372                __entry->sector         = bio->bi_iter.bi_sector;
 373                __entry->nr_sector      = bio_sectors(bio);
 374                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 375                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 376        ),
 377
 378        TP_printk("%d,%d %s %llu + %u [%s]",
 379                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 380                  (unsigned long long)__entry->sector,
 381                  __entry->nr_sector, __entry->comm)
 382);
 383
 384DECLARE_EVENT_CLASS(block_get_rq,
 385
 386        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 387
 388        TP_ARGS(q, bio, rw),
 389
 390        TP_STRUCT__entry(
 391                __field( dev_t,         dev                     )
 392                __field( sector_t,      sector                  )
 393                __field( unsigned int,  nr_sector               )
 394                __array( char,          rwbs,   RWBS_LEN        )
 395                __array( char,          comm,   TASK_COMM_LEN   )
 396        ),
 397
 398        TP_fast_assign(
 399                __entry->dev            = bio ? bio_dev(bio) : 0;
 400                __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
 401                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
 402                blk_fill_rwbs(__entry->rwbs,
 403                              bio ? bio->bi_opf : 0, __entry->nr_sector);
 404                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 405        ),
 406
 407        TP_printk("%d,%d %s %llu + %u [%s]",
 408                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 409                  (unsigned long long)__entry->sector,
 410                  __entry->nr_sector, __entry->comm)
 411);
 412
 413/**
 414 * block_getrq - get a free request entry in queue for block IO operations
 415 * @q: queue for operations
 416 * @bio: pending block IO operation (can be %NULL)
 417 * @rw: low bit indicates a read (%0) or a write (%1)
 418 *
 419 * A request struct for queue @q has been allocated to handle the
 420 * block IO operation @bio.
 421 */
 422DEFINE_EVENT(block_get_rq, block_getrq,
 423
 424        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 425
 426        TP_ARGS(q, bio, rw)
 427);
 428
 429/**
 430 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
 431 * @q: queue for operation
 432 * @bio: pending block IO operation (can be %NULL)
 433 * @rw: low bit indicates a read (%0) or a write (%1)
 434 *
 435 * In the case where a request struct cannot be provided for queue @q
 436 * the process needs to wait for an request struct to become
 437 * available.  This tracepoint event is generated each time the
 438 * process goes to sleep waiting for request struct become available.
 439 */
 440DEFINE_EVENT(block_get_rq, block_sleeprq,
 441
 442        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 443
 444        TP_ARGS(q, bio, rw)
 445);
 446
 447/**
 448 * block_plug - keep operations requests in request queue
 449 * @q: request queue to plug
 450 *
 451 * Plug the request queue @q.  Do not allow block operation requests
 452 * to be sent to the device driver. Instead, accumulate requests in
 453 * the queue to improve throughput performance of the block device.
 454 */
 455TRACE_EVENT(block_plug,
 456
 457        TP_PROTO(struct request_queue *q),
 458
 459        TP_ARGS(q),
 460
 461        TP_STRUCT__entry(
 462                __array( char,          comm,   TASK_COMM_LEN   )
 463        ),
 464
 465        TP_fast_assign(
 466                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 467        ),
 468
 469        TP_printk("[%s]", __entry->comm)
 470);
 471
 472DECLARE_EVENT_CLASS(block_unplug,
 473
 474        TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 475
 476        TP_ARGS(q, depth, explicit),
 477
 478        TP_STRUCT__entry(
 479                __field( int,           nr_rq                   )
 480                __array( char,          comm,   TASK_COMM_LEN   )
 481        ),
 482
 483        TP_fast_assign(
 484                __entry->nr_rq = depth;
 485                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 486        ),
 487
 488        TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
 489);
 490
 491/**
 492 * block_unplug - release of operations requests in request queue
 493 * @q: request queue to unplug
 494 * @depth: number of requests just added to the queue
 495 * @explicit: whether this was an explicit unplug, or one from schedule()
 496 *
 497 * Unplug request queue @q because device driver is scheduled to work
 498 * on elements in the request queue.
 499 */
 500DEFINE_EVENT(block_unplug, block_unplug,
 501
 502        TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 503
 504        TP_ARGS(q, depth, explicit)
 505);
 506
 507/**
 508 * block_split - split a single bio struct into two bio structs
 509 * @q: queue containing the bio
 510 * @bio: block operation being split
 511 * @new_sector: The starting sector for the new bio
 512 *
 513 * The bio request @bio in request queue @q needs to be split into two
 514 * bio requests. The newly created @bio request starts at
 515 * @new_sector. This split may be required due to hardware limitation
 516 * such as operation crossing device boundaries in a RAID system.
 517 */
 518TRACE_EVENT(block_split,
 519
 520        TP_PROTO(struct request_queue *q, struct bio *bio,
 521                 unsigned int new_sector),
 522
 523        TP_ARGS(q, bio, new_sector),
 524
 525        TP_STRUCT__entry(
 526                __field( dev_t,         dev                             )
 527                __field( sector_t,      sector                          )
 528                __field( sector_t,      new_sector                      )
 529                __array( char,          rwbs,           RWBS_LEN        )
 530                __array( char,          comm,           TASK_COMM_LEN   )
 531        ),
 532
 533        TP_fast_assign(
 534                __entry->dev            = bio_dev(bio);
 535                __entry->sector         = bio->bi_iter.bi_sector;
 536                __entry->new_sector     = new_sector;
 537                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 538                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 539        ),
 540
 541        TP_printk("%d,%d %s %llu / %llu [%s]",
 542                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 543                  (unsigned long long)__entry->sector,
 544                  (unsigned long long)__entry->new_sector,
 545                  __entry->comm)
 546);
 547
 548/**
 549 * block_bio_remap - map request for a logical device to the raw device
 550 * @q: queue holding the operation
 551 * @bio: revised operation
 552 * @dev: device for the operation
 553 * @from: original sector for the operation
 554 *
 555 * An operation for a logical device has been mapped to the
 556 * raw block device.
 557 */
 558TRACE_EVENT(block_bio_remap,
 559
 560        TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
 561                 sector_t from),
 562
 563        TP_ARGS(q, bio, dev, from),
 564
 565        TP_STRUCT__entry(
 566                __field( dev_t,         dev             )
 567                __field( sector_t,      sector          )
 568                __field( unsigned int,  nr_sector       )
 569                __field( dev_t,         old_dev         )
 570                __field( sector_t,      old_sector      )
 571                __array( char,          rwbs,   RWBS_LEN)
 572        ),
 573
 574        TP_fast_assign(
 575                __entry->dev            = bio_dev(bio);
 576                __entry->sector         = bio->bi_iter.bi_sector;
 577                __entry->nr_sector      = bio_sectors(bio);
 578                __entry->old_dev        = dev;
 579                __entry->old_sector     = from;
 580                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 581        ),
 582
 583        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
 584                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 585                  (unsigned long long)__entry->sector,
 586                  __entry->nr_sector,
 587                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
 588                  (unsigned long long)__entry->old_sector)
 589);
 590
 591/**
 592 * block_rq_remap - map request for a block operation request
 593 * @q: queue holding the operation
 594 * @rq: block IO operation request
 595 * @dev: device for the operation
 596 * @from: original sector for the operation
 597 *
 598 * The block operation request @rq in @q has been remapped.  The block
 599 * operation request @rq holds the current information and @from hold
 600 * the original sector.
 601 */
 602TRACE_EVENT(block_rq_remap,
 603
 604        TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
 605                 sector_t from),
 606
 607        TP_ARGS(q, rq, dev, from),
 608
 609        TP_STRUCT__entry(
 610                __field( dev_t,         dev             )
 611                __field( sector_t,      sector          )
 612                __field( unsigned int,  nr_sector       )
 613                __field( dev_t,         old_dev         )
 614                __field( sector_t,      old_sector      )
 615                __field( unsigned int,  nr_bios         )
 616                __array( char,          rwbs,   RWBS_LEN)
 617        ),
 618
 619        TP_fast_assign(
 620                __entry->dev            = disk_devt(rq->rq_disk);
 621                __entry->sector         = blk_rq_pos(rq);
 622                __entry->nr_sector      = blk_rq_sectors(rq);
 623                __entry->old_dev        = dev;
 624                __entry->old_sector     = from;
 625                __entry->nr_bios        = blk_rq_count_bios(rq);
 626                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 627        ),
 628
 629        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
 630                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 631                  (unsigned long long)__entry->sector,
 632                  __entry->nr_sector,
 633                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
 634                  (unsigned long long)__entry->old_sector, __entry->nr_bios)
 635);
 636
 637#endif /* _TRACE_BLOCK_H */
 638
 639/* This part must be outside protection */
 640#include <trace/define_trace.h>
 641
 642