linux/include/trace/events/block.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#undef TRACE_SYSTEM
   3#define TRACE_SYSTEM block
   4
   5#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
   6#define _TRACE_BLOCK_H
   7
   8#include <linux/blktrace_api.h>
   9#include <linux/blkdev.h>
  10#include <linux/buffer_head.h>
  11#include <linux/tracepoint.h>
  12
  13#define RWBS_LEN        8
  14
  15DECLARE_EVENT_CLASS(block_buffer,
  16
  17        TP_PROTO(struct buffer_head *bh),
  18
  19        TP_ARGS(bh),
  20
  21        TP_STRUCT__entry (
  22                __field(  dev_t,        dev                     )
  23                __field(  sector_t,     sector                  )
  24                __field(  size_t,       size                    )
  25        ),
  26
  27        TP_fast_assign(
  28                __entry->dev            = bh->b_bdev->bd_dev;
  29                __entry->sector         = bh->b_blocknr;
  30                __entry->size           = bh->b_size;
  31        ),
  32
  33        TP_printk("%d,%d sector=%llu size=%zu",
  34                MAJOR(__entry->dev), MINOR(__entry->dev),
  35                (unsigned long long)__entry->sector, __entry->size
  36        )
  37);
  38
  39/**
  40 * block_touch_buffer - mark a buffer accessed
  41 * @bh: buffer_head being touched
  42 *
  43 * Called from touch_buffer().
  44 */
  45DEFINE_EVENT(block_buffer, block_touch_buffer,
  46
  47        TP_PROTO(struct buffer_head *bh),
  48
  49        TP_ARGS(bh)
  50);
  51
  52/**
  53 * block_dirty_buffer - mark a buffer dirty
  54 * @bh: buffer_head being dirtied
  55 *
  56 * Called from mark_buffer_dirty().
  57 */
  58DEFINE_EVENT(block_buffer, block_dirty_buffer,
  59
  60        TP_PROTO(struct buffer_head *bh),
  61
  62        TP_ARGS(bh)
  63);
  64
  65/**
  66 * block_rq_requeue - place block IO request back on a queue
  67 * @q: queue holding operation
  68 * @rq: block IO operation request
  69 *
  70 * The block operation request @rq is being placed back into queue
  71 * @q.  For some reason the request was not completed and needs to be
  72 * put back in the queue.
  73 */
  74TRACE_EVENT(block_rq_requeue,
  75
  76        TP_PROTO(struct request_queue *q, struct request *rq),
  77
  78        TP_ARGS(q, rq),
  79
  80        TP_STRUCT__entry(
  81                __field(  dev_t,        dev                     )
  82                __field(  sector_t,     sector                  )
  83                __field(  unsigned int, nr_sector               )
  84                __array(  char,         rwbs,   RWBS_LEN        )
  85                __dynamic_array( char,  cmd,    1               )
  86        ),
  87
  88        TP_fast_assign(
  89                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
  90                __entry->sector    = blk_rq_trace_sector(rq);
  91                __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
  92
  93                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
  94                __get_str(cmd)[0] = '\0';
  95        ),
  96
  97        TP_printk("%d,%d %s (%s) %llu + %u [%d]",
  98                  MAJOR(__entry->dev), MINOR(__entry->dev),
  99                  __entry->rwbs, __get_str(cmd),
 100                  (unsigned long long)__entry->sector,
 101                  __entry->nr_sector, 0)
 102);
 103
 104/**
 105 * block_rq_complete - block IO operation completed by device driver
 106 * @rq: block operations request
 107 * @error: status code
 108 * @nr_bytes: number of completed bytes
 109 *
 110 * The block_rq_complete tracepoint event indicates that some portion
 111 * of operation request has been completed by the device driver.  If
 112 * the @rq->bio is %NULL, then there is absolutely no additional work to
 113 * do for the request. If @rq->bio is non-NULL then there is
 114 * additional work required to complete the request.
 115 */
 116TRACE_EVENT(block_rq_complete,
 117
 118        TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
 119
 120        TP_ARGS(rq, error, nr_bytes),
 121
 122        TP_STRUCT__entry(
 123                __field(  dev_t,        dev                     )
 124                __field(  sector_t,     sector                  )
 125                __field(  unsigned int, nr_sector               )
 126                __field(  int,          error                   )
 127                __array(  char,         rwbs,   RWBS_LEN        )
 128                __dynamic_array( char,  cmd,    1               )
 129        ),
 130
 131        TP_fast_assign(
 132                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 133                __entry->sector    = blk_rq_pos(rq);
 134                __entry->nr_sector = nr_bytes >> 9;
 135                __entry->error     = error;
 136
 137                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
 138                __get_str(cmd)[0] = '\0';
 139        ),
 140
 141        TP_printk("%d,%d %s (%s) %llu + %u [%d]",
 142                  MAJOR(__entry->dev), MINOR(__entry->dev),
 143                  __entry->rwbs, __get_str(cmd),
 144                  (unsigned long long)__entry->sector,
 145                  __entry->nr_sector, __entry->error)
 146);
 147
 148DECLARE_EVENT_CLASS(block_rq,
 149
 150        TP_PROTO(struct request_queue *q, struct request *rq),
 151
 152        TP_ARGS(q, rq),
 153
 154        TP_STRUCT__entry(
 155                __field(  dev_t,        dev                     )
 156                __field(  sector_t,     sector                  )
 157                __field(  unsigned int, nr_sector               )
 158                __field(  unsigned int, bytes                   )
 159                __array(  char,         rwbs,   RWBS_LEN        )
 160                __array(  char,         comm,   TASK_COMM_LEN   )
 161                __dynamic_array( char,  cmd,    1               )
 162        ),
 163
 164        TP_fast_assign(
 165                __entry->dev       = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
 166                __entry->sector    = blk_rq_trace_sector(rq);
 167                __entry->nr_sector = blk_rq_trace_nr_sectors(rq);
 168                __entry->bytes     = blk_rq_bytes(rq);
 169
 170                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 171                __get_str(cmd)[0] = '\0';
 172                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 173        ),
 174
 175        TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
 176                  MAJOR(__entry->dev), MINOR(__entry->dev),
 177                  __entry->rwbs, __entry->bytes, __get_str(cmd),
 178                  (unsigned long long)__entry->sector,
 179                  __entry->nr_sector, __entry->comm)
 180);
 181
 182/**
 183 * block_rq_insert - insert block operation request into queue
 184 * @q: target queue
 185 * @rq: block IO operation request
 186 *
 187 * Called immediately before block operation request @rq is inserted
 188 * into queue @q.  The fields in the operation request @rq struct can
 189 * be examined to determine which device and sectors the pending
 190 * operation would access.
 191 */
 192DEFINE_EVENT(block_rq, block_rq_insert,
 193
 194        TP_PROTO(struct request_queue *q, struct request *rq),
 195
 196        TP_ARGS(q, rq)
 197);
 198
 199/**
 200 * block_rq_issue - issue pending block IO request operation to device driver
 201 * @q: queue holding operation
 202 * @rq: block IO operation operation request
 203 *
 204 * Called when block operation request @rq from queue @q is sent to a
 205 * device driver for processing.
 206 */
 207DEFINE_EVENT(block_rq, block_rq_issue,
 208
 209        TP_PROTO(struct request_queue *q, struct request *rq),
 210
 211        TP_ARGS(q, rq)
 212);
 213
 214/**
 215 * block_bio_bounce - used bounce buffer when processing block operation
 216 * @q: queue holding the block operation
 217 * @bio: block operation
 218 *
 219 * A bounce buffer was used to handle the block operation @bio in @q.
 220 * This occurs when hardware limitations prevent a direct transfer of
 221 * data between the @bio data memory area and the IO device.  Use of a
 222 * bounce buffer requires extra copying of data and decreases
 223 * performance.
 224 */
 225TRACE_EVENT(block_bio_bounce,
 226
 227        TP_PROTO(struct request_queue *q, struct bio *bio),
 228
 229        TP_ARGS(q, bio),
 230
 231        TP_STRUCT__entry(
 232                __field( dev_t,         dev                     )
 233                __field( sector_t,      sector                  )
 234                __field( unsigned int,  nr_sector               )
 235                __array( char,          rwbs,   RWBS_LEN        )
 236                __array( char,          comm,   TASK_COMM_LEN   )
 237        ),
 238
 239        TP_fast_assign(
 240                __entry->dev            = bio_dev(bio);
 241                __entry->sector         = bio->bi_iter.bi_sector;
 242                __entry->nr_sector      = bio_sectors(bio);
 243                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 244                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 245        ),
 246
 247        TP_printk("%d,%d %s %llu + %u [%s]",
 248                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 249                  (unsigned long long)__entry->sector,
 250                  __entry->nr_sector, __entry->comm)
 251);
 252
 253/**
 254 * block_bio_complete - completed all work on the block operation
 255 * @q: queue holding the block operation
 256 * @bio: block operation completed
 257 * @error: io error value
 258 *
 259 * This tracepoint indicates there is no further work to do on this
 260 * block IO operation @bio.
 261 */
 262TRACE_EVENT(block_bio_complete,
 263
 264        TP_PROTO(struct request_queue *q, struct bio *bio, int error),
 265
 266        TP_ARGS(q, bio, error),
 267
 268        TP_STRUCT__entry(
 269                __field( dev_t,         dev             )
 270                __field( sector_t,      sector          )
 271                __field( unsigned,      nr_sector       )
 272                __field( int,           error           )
 273                __array( char,          rwbs,   RWBS_LEN)
 274        ),
 275
 276        TP_fast_assign(
 277                __entry->dev            = bio_dev(bio);
 278                __entry->sector         = bio->bi_iter.bi_sector;
 279                __entry->nr_sector      = bio_sectors(bio);
 280                __entry->error          = error;
 281                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 282        ),
 283
 284        TP_printk("%d,%d %s %llu + %u [%d]",
 285                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 286                  (unsigned long long)__entry->sector,
 287                  __entry->nr_sector, __entry->error)
 288);
 289
 290DECLARE_EVENT_CLASS(block_bio_merge,
 291
 292        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 293
 294        TP_ARGS(q, rq, bio),
 295
 296        TP_STRUCT__entry(
 297                __field( dev_t,         dev                     )
 298                __field( sector_t,      sector                  )
 299                __field( unsigned int,  nr_sector               )
 300                __array( char,          rwbs,   RWBS_LEN        )
 301                __array( char,          comm,   TASK_COMM_LEN   )
 302        ),
 303
 304        TP_fast_assign(
 305                __entry->dev            = bio_dev(bio);
 306                __entry->sector         = bio->bi_iter.bi_sector;
 307                __entry->nr_sector      = bio_sectors(bio);
 308                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 309                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 310        ),
 311
 312        TP_printk("%d,%d %s %llu + %u [%s]",
 313                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 314                  (unsigned long long)__entry->sector,
 315                  __entry->nr_sector, __entry->comm)
 316);
 317
 318/**
 319 * block_bio_backmerge - merging block operation to the end of an existing operation
 320 * @q: queue holding operation
 321 * @rq: request bio is being merged into
 322 * @bio: new block operation to merge
 323 *
 324 * Merging block request @bio to the end of an existing block request
 325 * in queue @q.
 326 */
 327DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
 328
 329        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 330
 331        TP_ARGS(q, rq, bio)
 332);
 333
 334/**
 335 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
 336 * @q: queue holding operation
 337 * @rq: request bio is being merged into
 338 * @bio: new block operation to merge
 339 *
 340 * Merging block IO operation @bio to the beginning of an existing block
 341 * operation in queue @q.
 342 */
 343DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
 344
 345        TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
 346
 347        TP_ARGS(q, rq, bio)
 348);
 349
 350/**
 351 * block_bio_queue - putting new block IO operation in queue
 352 * @q: queue holding operation
 353 * @bio: new block operation
 354 *
 355 * About to place the block IO operation @bio into queue @q.
 356 */
 357TRACE_EVENT(block_bio_queue,
 358
 359        TP_PROTO(struct request_queue *q, struct bio *bio),
 360
 361        TP_ARGS(q, bio),
 362
 363        TP_STRUCT__entry(
 364                __field( dev_t,         dev                     )
 365                __field( sector_t,      sector                  )
 366                __field( unsigned int,  nr_sector               )
 367                __array( char,          rwbs,   RWBS_LEN        )
 368                __array( char,          comm,   TASK_COMM_LEN   )
 369        ),
 370
 371        TP_fast_assign(
 372                __entry->dev            = bio_dev(bio);
 373                __entry->sector         = bio->bi_iter.bi_sector;
 374                __entry->nr_sector      = bio_sectors(bio);
 375                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 376                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 377        ),
 378
 379        TP_printk("%d,%d %s %llu + %u [%s]",
 380                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 381                  (unsigned long long)__entry->sector,
 382                  __entry->nr_sector, __entry->comm)
 383);
 384
 385DECLARE_EVENT_CLASS(block_get_rq,
 386
 387        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 388
 389        TP_ARGS(q, bio, rw),
 390
 391        TP_STRUCT__entry(
 392                __field( dev_t,         dev                     )
 393                __field( sector_t,      sector                  )
 394                __field( unsigned int,  nr_sector               )
 395                __array( char,          rwbs,   RWBS_LEN        )
 396                __array( char,          comm,   TASK_COMM_LEN   )
 397        ),
 398
 399        TP_fast_assign(
 400                __entry->dev            = bio ? bio_dev(bio) : 0;
 401                __entry->sector         = bio ? bio->bi_iter.bi_sector : 0;
 402                __entry->nr_sector      = bio ? bio_sectors(bio) : 0;
 403                blk_fill_rwbs(__entry->rwbs,
 404                              bio ? bio->bi_opf : 0, __entry->nr_sector);
 405                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 406        ),
 407
 408        TP_printk("%d,%d %s %llu + %u [%s]",
 409                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 410                  (unsigned long long)__entry->sector,
 411                  __entry->nr_sector, __entry->comm)
 412);
 413
 414/**
 415 * block_getrq - get a free request entry in queue for block IO operations
 416 * @q: queue for operations
 417 * @bio: pending block IO operation (can be %NULL)
 418 * @rw: low bit indicates a read (%0) or a write (%1)
 419 *
 420 * A request struct for queue @q has been allocated to handle the
 421 * block IO operation @bio.
 422 */
 423DEFINE_EVENT(block_get_rq, block_getrq,
 424
 425        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 426
 427        TP_ARGS(q, bio, rw)
 428);
 429
 430/**
 431 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
 432 * @q: queue for operation
 433 * @bio: pending block IO operation (can be %NULL)
 434 * @rw: low bit indicates a read (%0) or a write (%1)
 435 *
 436 * In the case where a request struct cannot be provided for queue @q
 437 * the process needs to wait for an request struct to become
 438 * available.  This tracepoint event is generated each time the
 439 * process goes to sleep waiting for request struct become available.
 440 */
 441DEFINE_EVENT(block_get_rq, block_sleeprq,
 442
 443        TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
 444
 445        TP_ARGS(q, bio, rw)
 446);
 447
 448/**
 449 * block_plug - keep operations requests in request queue
 450 * @q: request queue to plug
 451 *
 452 * Plug the request queue @q.  Do not allow block operation requests
 453 * to be sent to the device driver. Instead, accumulate requests in
 454 * the queue to improve throughput performance of the block device.
 455 */
 456TRACE_EVENT(block_plug,
 457
 458        TP_PROTO(struct request_queue *q),
 459
 460        TP_ARGS(q),
 461
 462        TP_STRUCT__entry(
 463                __array( char,          comm,   TASK_COMM_LEN   )
 464        ),
 465
 466        TP_fast_assign(
 467                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 468        ),
 469
 470        TP_printk("[%s]", __entry->comm)
 471);
 472
 473DECLARE_EVENT_CLASS(block_unplug,
 474
 475        TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 476
 477        TP_ARGS(q, depth, explicit),
 478
 479        TP_STRUCT__entry(
 480                __field( int,           nr_rq                   )
 481                __array( char,          comm,   TASK_COMM_LEN   )
 482        ),
 483
 484        TP_fast_assign(
 485                __entry->nr_rq = depth;
 486                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 487        ),
 488
 489        TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
 490);
 491
 492/**
 493 * block_unplug - release of operations requests in request queue
 494 * @q: request queue to unplug
 495 * @depth: number of requests just added to the queue
 496 * @explicit: whether this was an explicit unplug, or one from schedule()
 497 *
 498 * Unplug request queue @q because device driver is scheduled to work
 499 * on elements in the request queue.
 500 */
 501DEFINE_EVENT(block_unplug, block_unplug,
 502
 503        TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
 504
 505        TP_ARGS(q, depth, explicit)
 506);
 507
 508/**
 509 * block_split - split a single bio struct into two bio structs
 510 * @q: queue containing the bio
 511 * @bio: block operation being split
 512 * @new_sector: The starting sector for the new bio
 513 *
 514 * The bio request @bio in request queue @q needs to be split into two
 515 * bio requests. The newly created @bio request starts at
 516 * @new_sector. This split may be required due to hardware limitation
 517 * such as operation crossing device boundaries in a RAID system.
 518 */
 519TRACE_EVENT(block_split,
 520
 521        TP_PROTO(struct request_queue *q, struct bio *bio,
 522                 unsigned int new_sector),
 523
 524        TP_ARGS(q, bio, new_sector),
 525
 526        TP_STRUCT__entry(
 527                __field( dev_t,         dev                             )
 528                __field( sector_t,      sector                          )
 529                __field( sector_t,      new_sector                      )
 530                __array( char,          rwbs,           RWBS_LEN        )
 531                __array( char,          comm,           TASK_COMM_LEN   )
 532        ),
 533
 534        TP_fast_assign(
 535                __entry->dev            = bio_dev(bio);
 536                __entry->sector         = bio->bi_iter.bi_sector;
 537                __entry->new_sector     = new_sector;
 538                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 539                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 540        ),
 541
 542        TP_printk("%d,%d %s %llu / %llu [%s]",
 543                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 544                  (unsigned long long)__entry->sector,
 545                  (unsigned long long)__entry->new_sector,
 546                  __entry->comm)
 547);
 548
 549/**
 550 * block_bio_remap - map request for a logical device to the raw device
 551 * @q: queue holding the operation
 552 * @bio: revised operation
 553 * @dev: device for the operation
 554 * @from: original sector for the operation
 555 *
 556 * An operation for a logical device has been mapped to the
 557 * raw block device.
 558 */
 559TRACE_EVENT(block_bio_remap,
 560
 561        TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
 562                 sector_t from),
 563
 564        TP_ARGS(q, bio, dev, from),
 565
 566        TP_STRUCT__entry(
 567                __field( dev_t,         dev             )
 568                __field( sector_t,      sector          )
 569                __field( unsigned int,  nr_sector       )
 570                __field( dev_t,         old_dev         )
 571                __field( sector_t,      old_sector      )
 572                __array( char,          rwbs,   RWBS_LEN)
 573        ),
 574
 575        TP_fast_assign(
 576                __entry->dev            = bio_dev(bio);
 577                __entry->sector         = bio->bi_iter.bi_sector;
 578                __entry->nr_sector      = bio_sectors(bio);
 579                __entry->old_dev        = dev;
 580                __entry->old_sector     = from;
 581                blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 582        ),
 583
 584        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
 585                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 586                  (unsigned long long)__entry->sector,
 587                  __entry->nr_sector,
 588                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
 589                  (unsigned long long)__entry->old_sector)
 590);
 591
 592/**
 593 * block_rq_remap - map request for a block operation request
 594 * @q: queue holding the operation
 595 * @rq: block IO operation request
 596 * @dev: device for the operation
 597 * @from: original sector for the operation
 598 *
 599 * The block operation request @rq in @q has been remapped.  The block
 600 * operation request @rq holds the current information and @from hold
 601 * the original sector.
 602 */
 603TRACE_EVENT(block_rq_remap,
 604
 605        TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
 606                 sector_t from),
 607
 608        TP_ARGS(q, rq, dev, from),
 609
 610        TP_STRUCT__entry(
 611                __field( dev_t,         dev             )
 612                __field( sector_t,      sector          )
 613                __field( unsigned int,  nr_sector       )
 614                __field( dev_t,         old_dev         )
 615                __field( sector_t,      old_sector      )
 616                __field( unsigned int,  nr_bios         )
 617                __array( char,          rwbs,   RWBS_LEN)
 618        ),
 619
 620        TP_fast_assign(
 621                __entry->dev            = disk_devt(rq->rq_disk);
 622                __entry->sector         = blk_rq_pos(rq);
 623                __entry->nr_sector      = blk_rq_sectors(rq);
 624                __entry->old_dev        = dev;
 625                __entry->old_sector     = from;
 626                __entry->nr_bios        = blk_rq_count_bios(rq);
 627                blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 628        ),
 629
 630        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
 631                  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
 632                  (unsigned long long)__entry->sector,
 633                  __entry->nr_sector,
 634                  MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
 635                  (unsigned long long)__entry->old_sector, __entry->nr_bios)
 636);
 637
 638#endif /* _TRACE_BLOCK_H */
 639
 640/* This part must be outside protection */
 641#include <trace/define_trace.h>
 642
 643