linux/include/trace/events/block.h
<<
>>
Prefs
   1#undef TRACE_SYSTEM
   2#define TRACE_SYSTEM block
   3
   4#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
   5#define _TRACE_BLOCK_H
   6
   7#include <linux/blktrace_api.h>
   8#include <linux/blkdev.h>
   9#include <linux/buffer_head.h>
  10#include <linux/tracepoint.h>
  11
  12#define RWBS_LEN        8
  13
  14DECLARE_EVENT_CLASS(block_buffer,
  15
  16        TP_PROTO(struct buffer_head *bh),
  17
  18        TP_ARGS(bh),
  19
  20        TP_STRUCT__entry (
  21                __field(  dev_t,        dev                     )
  22                __field(  sector_t,     sector                  )
  23                __field(  size_t,       size                    )
  24        ),
  25
  26        TP_fast_assign(
  27                __entry->dev            = bh->b_bdev->bd_dev;
  28                __entry->sector         = bh->b_blocknr;
  29                __entry->size           = bh->b_size;
  30        ),
  31
  32        TP_printk("%d,%d sector=%llu size=%zu",
  33                MAJOR(__entry->dev), MINOR(__entry->dev),
  34                (unsigned long long)__entry->sector, __entry->size
  35        )
  36);
  37
  38/**
  39 * block_touch_buffer - mark a buffer accessed
  40 * @bh: buffer_head being touched
  41 *
  42 * Called from touch_buffer().
  43 */
  44DEFINE_EVENT(block_buffer, block_touch_buffer,
  45
  46        TP_PROTO(struct buffer_head *bh),
  47
  48        TP_ARGS(bh)
  49);
  50
  51/**
  52 * block_dirty_buffer - mark a buffer dirty
  53 * @bh: buffer_head being dirtied
  54 *
  55 * Called from mark_buffer_dirty().
  56 */
  57DEFINE_EVENT(block_buffer, block_dirty_buffer,
  58
  59        TP_PROTO(struct buffer_head *bh),
  60
  61        TP_ARGS(bh)
  62);
  63
  64DECLARE_EVENT_CLASS(block_rq_with_error,
  65
  66        TP_PROTO(struct request_queue *q, struct request *rq),
  67
  68        TP_ARGS(q, rq),
  69
  70        TP_STRUCT__entry(
  71                __field(  dev_t,        dev                     )
  72                __field(  sector_t,     sector                  )
  73                __field(  unsigned int, nr_sector               )
  74                __field(  int,          errors                  )
  75                __array(  char,         rwbs,   RWBS_LEN        )
  76                __dynamic_array( char,  cmd,    blk_cmd_buf_len(rq)     )
  77        ),
  78
  79        TP_fast_assign(
  80                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
  81                __entry->sector    = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
  82                                        0 : blk_rq_pos(rq);
  83                __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
  84                                        0 : blk_rq_sectors(rq);
  85                __entry->errors    = rq->errors;
  86
  87                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
  88                blk_dump_cmd(__get_str(cmd), rq);
  89        ),
  90
  91        TP_printk("%d,%d %s (%s) %llu + %u [%d]",
  92                  MAJOR(__entry->dev), MINOR(__entry->dev),
  93                  __entry->rwbs, __get_str(cmd),
  94                  (unsigned long long)__entry->sector,
  95                  __entry->nr_sector, __entry->errors)
  96);
  97
  98/**
  99 * block_rq_abort - abort block operation request
 100 * @q: queue containing the block operation request
 101 * @rq: block IO operation request
 102 *
 103 * Called immediately after pending block IO operation request @rq in
 104 * queue @q is aborted. The fields in the operation request @rq
 105 * can be examined to determine which device and sectors the pending
 106 * operation would access.
 107 */
 108DEFINE_EVENT(block_rq_with_error, block_rq_abort,
 109
 110        TP_PROTO(struct request_queue *q, struct request *rq),
 111
 112        TP_ARGS(q, rq)
 113);
 114
 115/**
 116 * block_rq_requeue - place block IO request back on a queue
 117 * @q: queue holding operation
 118 * @rq: block IO operation request
 119 *
 120 * The block operation request @rq is being placed back into queue
 121 * @q.  For some reason the request was not completed and needs to be
 122 * put back in the queue.
 123 */
 124DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
 125
 126        TP_PROTO(struct request_queue *q, struct request *rq),
 127
 128        TP_ARGS(q, rq)
 129);
 130
 131/**
 132 * block_rq_complete - block IO operation completed by device driver
 133 * @q: queue containing the block operation request
 134 * @rq: block operations request
 135 *
 136 * The block_rq_complete tracepoint event indicates that some portion
 137 * of operation request has been completed by the device driver.  If
 138 * the @rq->bio is %NULL, then there is absolutely no additional work to
 139 * do for the request. If @rq->bio is non-NULL then there is
 140 * additional work required to complete the request.
 141 */
 142DEFINE_EVENT(block_rq_with_error, block_rq_complete,
 143
 144        TP_PROTO(struct request_queue *q, struct request *rq),
 145
 146        TP_ARGS(q, rq)
 147);
 148
 149DECLARE_EVENT_CLASS(block_rq,
 150
 151        TP_PROTO(struct request_queue *q, struct request *rq),
 152
 153        TP_ARGS(q, rq),
 154
 155        TP_STRUCT__entry(
 156                __field(  dev_t,        dev                     )
 157                __field(  sector_t,     sector                  )
 158                __field(  unsigned int, nr_sector               )
 159                __field(  unsigned int, bytes                   )
 160                __array(  char,         rwbs,   RWBS_LEN        )
 161                __array(  char,         comm,   TASK_COMM_LEN   )
 162                __dynamic_array( char,  cmd,    blk_cmd_buf_len(rq)     )
 163        ),
 164
 165        TP_fast_assign(
 166                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 167                __entry->sector    = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 168                                        0 : blk_rq_pos(rq);
 169                __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 170                                        0 : blk_rq_sectors(rq);
 171                __entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 172                                        blk_rq_bytes(rq) : 0;
 173
 174                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 175                blk_dump_cmd(__get_str(cmd), rq);
 176                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 177        ),
 178
 179        TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
 180                  MAJOR(__entry->dev), MINOR(__entry->dev),
 181                  __entry->rwbs, __entry->bytes, __get_str(cmd),
 182                  (unsigned long long)__entry->sector,
 183                  __entry->nr_sector, __entry->comm)
 184);
 185
 186/**
 187 * block_rq_insert - insert block operation request into queue
 188 * @q: target queue
 189 * @rq: block IO operation request
 190 *
 191 * Called immediately before block operation request @rq is inserted
 192 * into queue @q.  The fields in the operation request @rq struct can
 193 * be examined to determine which device and sectors the pending
 194 * operation would access.
 195 */
 196DEFINE_EVENT(block_rq, block_rq_insert,
 197
 198        TP_PROTO(struct request_queue *q, struct request *rq),
 199
 200        TP_ARGS(q, rq)
 201);
 202
 203/**
 204 * block_rq_issue - issue pending block IO request operation to device driver
 205 * @q: queue holding operation
 206 * @rq: block IO operation operation request
 207 *
 208 * Called when block operation request @rq from queue @q is sent to a
 209 * device driver for processing.
 210 */
 211DEFINE_EVENT(block_rq, block_rq_issue,
 212
 213        TP_PROTO(struct request_queue *q, struct request *rq),
 214
 215        TP_ARGS(q, rq)
 216);
 217
 218/**
 219 * block_bio_bounce - used bounce buffer when processing block operation
 220 * @q: queue holding the block operation
 221 * @bio: block operation
 222 *
 223 * A bounce buffer was used to handle the block operation @bio in @q.
 224 * This occurs when hardware limitations prevent a direct transfer of
 225 * data between the @bio data memory area and the IO device.  Use of a
 226 * bounce buffer requires extra copying of data and decreases
 227 * performance.
 228 */
 229TRACE_EVENT(block_bio_bounce,
 230
 231        TP_PROTO(struct request_queue *q, struct bio *bio),
 232
 233        TP_ARGS(q, bio),
 234
 235        TP_STRUCT__entry(
 236                __field( dev_t,         dev                     )
 237                __field( sector_t,      sector                  )
 238                __field( unsigned int,  nr_sector               )
 239                __array( char,          rwbs,   RWBS_LEN        )
 240                __array( char,          comm,   TASK_COMM_LEN   )
 241        ),
 242
 243        TP_fast_assign(
 244                __entry->dev            = bio->bi_bdev ?
 245                                          bio->bi_bdev->bd_dev : 0;
 246                __entry->sector         = bio->bi_sector;
 247                __entry->nr_sector      = bio_sectors(bio);
 248                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 249                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 250        ),
 251
 252        TP_printk("%d,%d %s %llu + %u [%s]",
 253                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 254                  (unsigned long long)__entry->sector,
 255                  __entry->nr_sector, __entry->comm)
 256);
 257
 258/**
 259 * block_bio_complete - completed all work on the block operation
 260 * @q: queue holding the block operation
 261 * @bio: block operation completed
 262 * @error: io error value
 263 *
 264 * This tracepoint indicates there is no further work to do on this
 265 * block IO operation @bio.
 266 */
 267TRACE_EVENT(block_bio_complete,
 268
 269        TP_PROTO(struct request_queue *q, struct bio *bio, int error),
 270
 271        TP_ARGS(q, bio, error),
 272
 273        TP_STRUCT__entry(
 274                __field( dev_t,         dev             )
 275                __field( sector_t,      sector          )
 276                __field( unsigned,      nr_sector       )
 277                __field( int,           error           )
 278                __array( char,          rwbs,   RWBS_LEN)
 279        ),
 280
 281        TP_fast_assign(
 282                __entry->dev            = bio->bi_bdev->bd_dev;
 283                __entry->sector         = bio->bi_sector;
 284                __entry->nr_sector      = bio_sectors(bio);
 285                __entry->error          = error;
 286                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 287        ),
 288
 289        TP_printk("%d,%d %s %llu + %u [%d]",
 290                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 291                  (unsigned long long)__entry->sector,
 292                  __entry->nr_sector, __entry->error)
 293);
 294
 295DECLARE_EVENT_CLASS(block_bio_merge,
 296
 297        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 298
 299        TP_ARGS(q, rq, bio),
 300
 301        TP_STRUCT__entry(
 302                __field( dev_t,         dev                     )
 303                __field( sector_t,      sector                  )
 304                __field( unsigned int,  nr_sector               )
 305                __array( char,          rwbs,   RWBS_LEN        )
 306                __array( char,          comm,   TASK_COMM_LEN   )
 307        ),
 308
 309        TP_fast_assign(
 310                __entry->dev            = bio->bi_bdev->bd_dev;
 311                __entry->sector         = bio->bi_sector;
 312                __entry->nr_sector      = bio_sectors(bio);
 313                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 314                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 315        ),
 316
 317        TP_printk("%d,%d %s %llu + %u [%s]",
 318                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 319                  (unsigned long long)__entry->sector,
 320                  __entry->nr_sector, __entry->comm)
 321);
 322
 323/**
 324 * block_bio_backmerge - merging block operation to the end of an existing operation
 325 * @q: queue holding operation
 326 * @rq: request bio is being merged into
 327 * @bio: new block operation to merge
 328 *
 329 * Merging block request @bio to the end of an existing block request
 330 * in queue @q.
 331 */
 332DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
 333
 334        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 335
 336        TP_ARGS(q, rq, bio)
 337);
 338
 339/**
 340 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
 341 * @q: queue holding operation
 342 * @rq: request bio is being merged into
 343 * @bio: new block operation to merge
 344 *
 345 * Merging block IO operation @bio to the beginning of an existing block
 346 * operation in queue @q.
 347 */
 348DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
 349
 350        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 351
 352        TP_ARGS(q, rq, bio)
 353);
 354
 355/**
 356 * block_bio_queue - putting new block IO operation in queue
 357 * @q: queue holding operation
 358 * @bio: new block operation
 359 *
 360 * About to place the block IO operation @bio into queue @q.
 361 */
 362TRACE_EVENT(block_bio_queue,
 363
 364        TP_PROTO(struct request_queue *q, struct bio *bio),
 365
 366        TP_ARGS(q, bio),
 367
 368        TP_STRUCT__entry(
 369                __field( dev_t,         dev                     )
 370                __field( sector_t,      sector                  )
 371                __field( unsigned int,  nr_sector               )
 372                __array( char,          rwbs,   RWBS_LEN        )
 373                __array( char,          comm,   TASK_COMM_LEN   )
 374        ),
 375
 376        TP_fast_assign(
 377                __entry->dev            = bio->bi_bdev->bd_dev;
 378                __entry->sector         = bio->bi_sector;
 379                __entry->nr_sector      = bio_sectors(bio);
 380                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 381                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 382        ),
 383
 384        TP_printk("%d,%d %s %llu + %u [%s]",
 385                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 386                  (unsigned long long)__entry->sector,
 387                  __entry->nr_sector, __entry->comm)
 388);
 389
 390DECLARE_EVENT_CLASS(block_get_rq,
 391
 392        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 393
 394        TP_ARGS(q, bio, rw),
 395
 396        TP_STRUCT__entry(
 397                __field( dev_t,         dev                     )
 398                __field( sector_t,      sector                  )
 399                __field( unsigned int,  nr_sector               )
 400                __array( char,          rwbs,   RWBS_LEN        )
 401                __array( char,          comm,   TASK_COMM_LEN   )
 402        ),
 403
 404        TP_fast_assign(
 405                __entry->dev            = bio ? bio->bi_bdev->bd_dev : 0;
 406                __entry->sector         = bio ? bio->bi_sector : 0;
 407                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
 408                blk_fill_rwbs(__entry->rwbs,
 409                              bio ? bio->bi_rw : 0, __entry->nr_sector);
 410                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 411        ),
 412
 413        TP_printk("%d,%d %s %llu + %u [%s]",
 414                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 415                  (unsigned long long)__entry->sector,
 416                  __entry->nr_sector, __entry->comm)
 417);
 418
 419/**
 420 * block_getrq - get a free request entry in queue for block IO operations
 421 * @q: queue for operations
 422 * @bio: pending block IO operation
 423 * @rw: low bit indicates a read (%0) or a write (%1)
 424 *
 425 * A request struct for queue @q has been allocated to handle the
 426 * block IO operation @bio.
 427 */
 428DEFINE_EVENT(block_get_rq, block_getrq,
 429
 430        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 431
 432        TP_ARGS(q, bio, rw)
 433);
 434
 435/**
 436 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
 437 * @q: queue for operation
 438 * @bio: pending block IO operation
 439 * @rw: low bit indicates a read (%0) or a write (%1)
 440 *
 441 * In the case where a request struct cannot be provided for queue @q
 442 * the process needs to wait for an request struct to become
 443 * available.  This tracepoint event is generated each time the
 444 * process goes to sleep waiting for request struct become available.
 445 */
 446DEFINE_EVENT(block_get_rq, block_sleeprq,
 447
 448        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 449
 450        TP_ARGS(q, bio, rw)
 451);
 452
 453/**
 454 * block_plug - keep operations requests in request queue
 455 * @q: request queue to plug
 456 *
 457 * Plug the request queue @q.  Do not allow block operation requests
 458 * to be sent to the device driver. Instead, accumulate requests in
 459 * the queue to improve throughput performance of the block device.
 460 */
 461TRACE_EVENT(block_plug,
 462
 463        TP_PROTO(struct request_queue *q),
 464
 465        TP_ARGS(q),
 466
 467        TP_STRUCT__entry(
 468                __array( char,          comm,   TASK_COMM_LEN   )
 469        ),
 470
 471        TP_fast_assign(
 472                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 473        ),
 474
 475        TP_printk("[%s]", __entry->comm)
 476);
 477
 478DECLARE_EVENT_CLASS(block_unplug,
 479
 480        TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 481
 482        TP_ARGS(q, depth, explicit),
 483
 484        TP_STRUCT__entry(
 485                __field( int,           nr_rq                   )
 486                __array( char,          comm,   TASK_COMM_LEN   )
 487        ),
 488
 489        TP_fast_assign(
 490                __entry->nr_rq = depth;
 491                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 492        ),
 493
 494        TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
 495);
 496
 497/**
 498 * block_unplug - release of operations requests in request queue
 499 * @q: request queue to unplug
 500 * @depth: number of requests just added to the queue
 501 * @explicit: whether this was an explicit unplug, or one from schedule()
 502 *
 503 * Unplug request queue @q because device driver is scheduled to work
 504 * on elements in the request queue.
 505 */
 506DEFINE_EVENT(block_unplug, block_unplug,
 507
 508        TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 509
 510        TP_ARGS(q, depth, explicit)
 511);
 512
 513/**
 514 * block_split - split a single bio struct into two bio structs
 515 * @q: queue containing the bio
 516 * @bio: block operation being split
 517 * @new_sector: The starting sector for the new bio
 518 *
 519 * The bio request @bio in request queue @q needs to be split into two
 520 * bio requests. The newly created @bio request starts at
 521 * @new_sector. This split may be required due to hardware limitation
 522 * such as operation crossing device boundaries in a RAID system.
 523 */
 524TRACE_EVENT(block_split,
 525
 526        TP_PROTO(struct request_queue *q, struct bio *bio,
 527                 unsigned int new_sector),
 528
 529        TP_ARGS(q, bio, new_sector),
 530
 531        TP_STRUCT__entry(
 532                __field( dev_t,         dev                             )
 533                __field( sector_t,      sector                          )
 534                __field( sector_t,      new_sector                      )
 535                __array( char,          rwbs,           RWBS_LEN        )
 536                __array( char,          comm,           TASK_COMM_LEN   )
 537        ),
 538
 539        TP_fast_assign(
 540                __entry->dev            = bio->bi_bdev->bd_dev;
 541                __entry->sector         = bio->bi_sector;
 542                __entry->new_sector     = new_sector;
 543                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 544                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 545        ),
 546
 547        TP_printk("%d,%d %s %llu / %llu [%s]",
 548                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 549                  (unsigned long long)__entry->sector,
 550                  (unsigned long long)__entry->new_sector,
 551                  __entry->comm)
 552);
 553
 554/**
 555 * block_bio_remap - map request for a logical device to the raw device
 556 * @q: queue holding the operation
 557 * @bio: revised operation
 558 * @dev: device for the operation
 559 * @from: original sector for the operation
 560 *
 561 * An operation for a logical device has been mapped to the
 562 * raw block device.
 563 */
 564TRACE_EVENT(block_bio_remap,
 565
 566        TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
 567                 sector_t from),
 568
 569        TP_ARGS(q, bio, dev, from),
 570
 571        TP_STRUCT__entry(
 572                __field( dev_t,         dev             )
 573                __field( sector_t,      sector          )
 574                __field( unsigned int,  nr_sector       )
 575                __field( dev_t,         old_dev         )
 576                __field( sector_t,      old_sector      )
 577                __array( char,          rwbs,   RWBS_LEN)
 578        ),
 579
 580        TP_fast_assign(
 581                __entry->dev            = bio->bi_bdev->bd_dev;
 582                __entry->sector         = bio->bi_sector;
 583                __entry->nr_sector      = bio_sectors(bio);
 584                __entry->old_dev        = dev;
 585                __entry->old_sector     = from;
 586                blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
 587        ),
 588
 589        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
 590                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 591                  (unsigned long long)__entry->sector,
 592                  __entry->nr_sector,
 593                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
 594                  (unsigned long long)__entry->old_sector)
 595);
 596
 597/**
 598 * block_rq_remap - map request for a block operation request
 599 * @q: queue holding the operation
 600 * @rq: block IO operation request
 601 * @dev: device for the operation
 602 * @from: original sector for the operation
 603 *
 604 * The block operation request @rq in @q has been remapped.  The block
 605 * operation request @rq holds the current information and @from hold
 606 * the original sector.
 607 */
 608TRACE_EVENT(block_rq_remap,
 609
 610        TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
 611                 sector_t from),
 612
 613        TP_ARGS(q, rq, dev, from),
 614
 615        TP_STRUCT__entry(
 616                __field( dev_t,         dev             )
 617                __field( sector_t,      sector          )
 618                __field( unsigned int,  nr_sector       )
 619                __field( dev_t,         old_dev         )
 620                __field( sector_t,      old_sector      )
 621                __array( char,          rwbs,   RWBS_LEN)
 622        ),
 623
 624        TP_fast_assign(
 625                __entry->dev            = disk_devt(rq->rq_disk);
 626                __entry->sector         = blk_rq_pos(rq);
 627                __entry->nr_sector      = blk_rq_sectors(rq);
 628                __entry->old_dev        = dev;
 629                __entry->old_sector     = from;
 630                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 631        ),
 632
 633        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
 634                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 635                  (unsigned long long)__entry->sector,
 636                  __entry->nr_sector,
 637                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
 638                  (unsigned long long)__entry->old_sector)
 639);
 640
 641#endif /* _TRACE_BLOCK_H */
 642
 643/* This part must be outside protection */
 644#include <trace/define_trace.h>
 645
 646