1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM block 4 5 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_BLOCK_H 7 8 #include <linux/blktrace_api.h> 9 #include <linux/blkdev.h> 10 #include <linux/buffer_head.h> 11 #include <linux/tracepoint.h> 12 #include <uapi/linux/ioprio.h> 13 14 #define RWBS_LEN 10 15 16 #define IOPRIO_CLASS_STRINGS \ 17 { IOPRIO_CLASS_NONE, "none" }, \ 18 { IOPRIO_CLASS_RT, "rt" }, \ 19 { IOPRIO_CLASS_BE, "be" }, \ 20 { IOPRIO_CLASS_IDLE, "idle" }, \ 21 { IOPRIO_CLASS_INVALID, "invalid"} 22 23 #ifdef CONFIG_BUFFER_HEAD 24 DECLARE_EVENT_CLASS(block_buffer, 25 26 TP_PROTO(struct buffer_head *bh), 27 28 TP_ARGS(bh), 29 30 TP_STRUCT__entry ( 31 __field( dev_t, dev ) 32 __field( sector_t, sector ) 33 __field( size_t, size ) 34 ), 35 36 TP_fast_assign( 37 __entry->dev = bh->b_bdev->bd_dev; 38 __entry->sector = bh->b_blocknr; 39 __entry->size = bh->b_size; 40 ), 41 42 TP_printk("%d,%d sector=%llu size=%zu", 43 MAJOR(__entry->dev), MINOR(__entry->dev), 44 (unsigned long long)__entry->sector, __entry->size 45 ) 46 ); 47 48 /** 49 * block_touch_buffer - mark a buffer accessed 50 * @bh: buffer_head being touched 51 * 52 * Called from touch_buffer(). 53 */ 54 DEFINE_EVENT(block_buffer, block_touch_buffer, 55 56 TP_PROTO(struct buffer_head *bh), 57 58 TP_ARGS(bh) 59 ); 60 61 /** 62 * block_dirty_buffer - mark a buffer dirty 63 * @bh: buffer_head being dirtied 64 * 65 * Called from mark_buffer_dirty(). 66 */ 67 DEFINE_EVENT(block_buffer, block_dirty_buffer, 68 69 TP_PROTO(struct buffer_head *bh), 70 71 TP_ARGS(bh) 72 ); 73 #endif /* CONFIG_BUFFER_HEAD */ 74 75 /** 76 * block_rq_requeue - place block IO request back on a queue 77 * @rq: block IO operation request 78 * 79 * The block operation request @rq is being placed back into queue 80 * @q. For some reason the request was not completed and needs to be 81 * put back in the queue. 82 */ 83 TRACE_EVENT(block_rq_requeue, 84 85 TP_PROTO(struct request *rq), 86 87 TP_ARGS(rq), 88 89 TP_STRUCT__entry( 90 __field( dev_t, dev ) 91 __field( sector_t, sector ) 92 __field( unsigned int, nr_sector ) 93 __field( unsigned short, ioprio ) 94 __array( char, rwbs, RWBS_LEN ) 95 __dynamic_array( char, cmd, 1 ) 96 ), 97 98 TP_fast_assign( 99 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; 100 __entry->sector = blk_rq_trace_sector(rq); 101 __entry->nr_sector = blk_rq_trace_nr_sectors(rq); 102 __entry->ioprio = rq->ioprio; 103 104 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); 105 __get_str(cmd)[0] = '\0'; 106 ), 107 108 TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", 109 MAJOR(__entry->dev), MINOR(__entry->dev), 110 __entry->rwbs, __get_str(cmd), 111 (unsigned long long)__entry->sector, __entry->nr_sector, 112 __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), 113 IOPRIO_CLASS_STRINGS), 114 IOPRIO_PRIO_HINT(__entry->ioprio), 115 IOPRIO_PRIO_LEVEL(__entry->ioprio), 0) 116 ); 117 118 DECLARE_EVENT_CLASS(block_rq_completion, 119 120 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), 121 122 TP_ARGS(rq, error, nr_bytes), 123 124 TP_STRUCT__entry( 125 __field( dev_t, dev ) 126 __field( sector_t, sector ) 127 __field( unsigned int, nr_sector ) 128 __field( int , error ) 129 __field( unsigned short, ioprio ) 130 __array( char, rwbs, RWBS_LEN ) 131 __dynamic_array( char, cmd, 1 ) 132 ), 133 134 TP_fast_assign( 135 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; 136 __entry->sector = blk_rq_pos(rq); 137 __entry->nr_sector = nr_bytes >> 9; 138 __entry->error = blk_status_to_errno(error); 139 __entry->ioprio = rq->ioprio; 140 141 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); 142 __get_str(cmd)[0] = '\0'; 143 ), 144 145 TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]", 146 MAJOR(__entry->dev), MINOR(__entry->dev), 147 __entry->rwbs, __get_str(cmd), 148 (unsigned long long)__entry->sector, __entry->nr_sector, 149 __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), 150 IOPRIO_CLASS_STRINGS), 151 IOPRIO_PRIO_HINT(__entry->ioprio), 152 IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error) 153 ); 154 155 /** 156 * block_rq_complete - block IO operation completed by device driver 157 * @rq: block operations request 158 * @error: status code 159 * @nr_bytes: number of completed bytes 160 * 161 * The block_rq_complete tracepoint event indicates that some portion 162 * of operation request has been completed by the device driver. If 163 * the @rq->bio is %NULL, then there is absolutely no additional work to 164 * do for the request. If @rq->bio is non-NULL then there is 165 * additional work required to complete the request. 166 */ 167 DEFINE_EVENT(block_rq_completion, block_rq_complete, 168 169 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), 170 171 TP_ARGS(rq, error, nr_bytes) 172 ); 173 174 /** 175 * block_rq_error - block IO operation error reported by device driver 176 * @rq: block operations request 177 * @error: status code 178 * @nr_bytes: number of completed bytes 179 * 180 * The block_rq_error tracepoint event indicates that some portion 181 * of operation request has failed as reported by the device driver. 182 */ 183 DEFINE_EVENT(block_rq_completion, block_rq_error, 184 185 TP_PROTO(struct request *rq, blk_status_t error, unsigned int nr_bytes), 186 187 TP_ARGS(rq, error, nr_bytes) 188 ); 189 190 DECLARE_EVENT_CLASS(block_rq, 191 192 TP_PROTO(struct request *rq), 193 194 TP_ARGS(rq), 195 196 TP_STRUCT__entry( 197 __field( dev_t, dev ) 198 __field( sector_t, sector ) 199 __field( unsigned int, nr_sector ) 200 __field( unsigned int, bytes ) 201 __field( unsigned short, ioprio ) 202 __array( char, rwbs, RWBS_LEN ) 203 __array( char, comm, TASK_COMM_LEN ) 204 __dynamic_array( char, cmd, 1 ) 205 ), 206 207 TP_fast_assign( 208 __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; 209 __entry->sector = blk_rq_trace_sector(rq); 210 __entry->nr_sector = blk_rq_trace_nr_sectors(rq); 211 __entry->bytes = blk_rq_bytes(rq); 212 __entry->ioprio = rq->ioprio; 213 214 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); 215 __get_str(cmd)[0] = '\0'; 216 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 217 ), 218 219 TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]", 220 MAJOR(__entry->dev), MINOR(__entry->dev), 221 __entry->rwbs, __entry->bytes, __get_str(cmd), 222 (unsigned long long)__entry->sector, __entry->nr_sector, 223 __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio), 224 IOPRIO_CLASS_STRINGS), 225 IOPRIO_PRIO_HINT(__entry->ioprio), 226 IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm) 227 ); 228 229 /** 230 * block_rq_insert - insert block operation request into queue 231 * @rq: block IO operation request 232 * 233 * Called immediately before block operation request @rq is inserted 234 * into queue @q. The fields in the operation request @rq struct can 235 * be examined to determine which device and sectors the pending 236 * operation would access. 237 */ 238 DEFINE_EVENT(block_rq, block_rq_insert, 239 240 TP_PROTO(struct request *rq), 241 242 TP_ARGS(rq) 243 ); 244 245 /** 246 * block_rq_issue - issue pending block IO request operation to device driver 247 * @rq: block IO operation request 248 * 249 * Called when block operation request @rq from queue @q is sent to a 250 * device driver for processing. 251 */ 252 DEFINE_EVENT(block_rq, block_rq_issue, 253 254 TP_PROTO(struct request *rq), 255 256 TP_ARGS(rq) 257 ); 258 259 /** 260 * block_rq_merge - merge request with another one in the elevator 261 * @rq: block IO operation request 262 * 263 * Called when block operation request @rq from queue @q is merged to another 264 * request queued in the elevator. 265 */ 266 DEFINE_EVENT(block_rq, block_rq_merge, 267 268 TP_PROTO(struct request *rq), 269 270 TP_ARGS(rq) 271 ); 272 273 /** 274 * block_io_start - insert a request for execution 275 * @rq: block IO operation request 276 * 277 * Called when block operation request @rq is queued for execution 278 */ 279 DEFINE_EVENT(block_rq, block_io_start, 280 281 TP_PROTO(struct request *rq), 282 283 TP_ARGS(rq) 284 ); 285 286 /** 287 * block_io_done - block IO operation request completed 288 * @rq: block IO operation request 289 * 290 * Called when block operation request @rq is completed 291 */ 292 DEFINE_EVENT(block_rq, block_io_done, 293 294 TP_PROTO(struct request *rq), 295 296 TP_ARGS(rq) 297 ); 298 299 /** 300 * block_bio_complete - completed all work on the block operation 301 * @q: queue holding the block operation 302 * @bio: block operation completed 303 * 304 * This tracepoint indicates there is no further work to do on this 305 * block IO operation @bio. 306 */ 307 TRACE_EVENT(block_bio_complete, 308 309 TP_PROTO(struct request_queue *q, struct bio *bio), 310 311 TP_ARGS(q, bio), 312 313 TP_STRUCT__entry( 314 __field( dev_t, dev ) 315 __field( sector_t, sector ) 316 __field( unsigned, nr_sector ) 317 __field( int, error ) 318 __array( char, rwbs, RWBS_LEN) 319 ), 320 321 TP_fast_assign( 322 __entry->dev = bio_dev(bio); 323 __entry->sector = bio->bi_iter.bi_sector; 324 __entry->nr_sector = bio_sectors(bio); 325 __entry->error = blk_status_to_errno(bio->bi_status); 326 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 327 ), 328 329 TP_printk("%d,%d %s %llu + %u [%d]", 330 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 331 (unsigned long long)__entry->sector, 332 __entry->nr_sector, __entry->error) 333 ); 334 335 DECLARE_EVENT_CLASS(block_bio, 336 337 TP_PROTO(struct bio *bio), 338 339 TP_ARGS(bio), 340 341 TP_STRUCT__entry( 342 __field( dev_t, dev ) 343 __field( sector_t, sector ) 344 __field( unsigned int, nr_sector ) 345 __array( char, rwbs, RWBS_LEN ) 346 __array( char, comm, TASK_COMM_LEN ) 347 ), 348 349 TP_fast_assign( 350 __entry->dev = bio_dev(bio); 351 __entry->sector = bio->bi_iter.bi_sector; 352 __entry->nr_sector = bio_sectors(bio); 353 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 354 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 355 ), 356 357 TP_printk("%d,%d %s %llu + %u [%s]", 358 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 359 (unsigned long long)__entry->sector, 360 __entry->nr_sector, __entry->comm) 361 ); 362 363 /** 364 * block_bio_bounce - used bounce buffer when processing block operation 365 * @bio: block operation 366 * 367 * A bounce buffer was used to handle the block operation @bio in @q. 368 * This occurs when hardware limitations prevent a direct transfer of 369 * data between the @bio data memory area and the IO device. Use of a 370 * bounce buffer requires extra copying of data and decreases 371 * performance. 372 */ 373 DEFINE_EVENT(block_bio, block_bio_bounce, 374 TP_PROTO(struct bio *bio), 375 TP_ARGS(bio) 376 ); 377 378 /** 379 * block_bio_backmerge - merging block operation to the end of an existing operation 380 * @bio: new block operation to merge 381 * 382 * Merging block request @bio to the end of an existing block request. 383 */ 384 DEFINE_EVENT(block_bio, block_bio_backmerge, 385 TP_PROTO(struct bio *bio), 386 TP_ARGS(bio) 387 ); 388 389 /** 390 * block_bio_frontmerge - merging block operation to the beginning of an existing operation 391 * @bio: new block operation to merge 392 * 393 * Merging block IO operation @bio to the beginning of an existing block request. 394 */ 395 DEFINE_EVENT(block_bio, block_bio_frontmerge, 396 TP_PROTO(struct bio *bio), 397 TP_ARGS(bio) 398 ); 399 400 /** 401 * block_bio_queue - putting new block IO operation in queue 402 * @bio: new block operation 403 * 404 * About to place the block IO operation @bio into queue @q. 405 */ 406 DEFINE_EVENT(block_bio, block_bio_queue, 407 TP_PROTO(struct bio *bio), 408 TP_ARGS(bio) 409 ); 410 411 /** 412 * block_getrq - get a free request entry in queue for block IO operations 413 * @bio: pending block IO operation (can be %NULL) 414 * 415 * A request struct has been allocated to handle the block IO operation @bio. 416 */ 417 DEFINE_EVENT(block_bio, block_getrq, 418 TP_PROTO(struct bio *bio), 419 TP_ARGS(bio) 420 ); 421 422 /** 423 * blk_zone_append_update_request_bio - update bio sector after zone append 424 * @rq: the completed request that sets the bio sector 425 * 426 * Update the bio's bi_sector after a zone append command has been completed. 427 */ 428 DEFINE_EVENT(block_rq, blk_zone_append_update_request_bio, 429 TP_PROTO(struct request *rq), 430 TP_ARGS(rq) 431 ); 432 433 /** 434 * block_plug - keep operations requests in request queue 435 * @q: request queue to plug 436 * 437 * Plug the request queue @q. Do not allow block operation requests 438 * to be sent to the device driver. Instead, accumulate requests in 439 * the queue to improve throughput performance of the block device. 440 */ 441 TRACE_EVENT(block_plug, 442 443 TP_PROTO(struct request_queue *q), 444 445 TP_ARGS(q), 446 447 TP_STRUCT__entry( 448 __array( char, comm, TASK_COMM_LEN ) 449 ), 450 451 TP_fast_assign( 452 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 453 ), 454 455 TP_printk("[%s]", __entry->comm) 456 ); 457 458 DECLARE_EVENT_CLASS(block_unplug, 459 460 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 461 462 TP_ARGS(q, depth, explicit), 463 464 TP_STRUCT__entry( 465 __field( int, nr_rq ) 466 __array( char, comm, TASK_COMM_LEN ) 467 ), 468 469 TP_fast_assign( 470 __entry->nr_rq = depth; 471 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 472 ), 473 474 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) 475 ); 476 477 /** 478 * block_unplug - release of operations requests in request queue 479 * @q: request queue to unplug 480 * @depth: number of requests just added to the queue 481 * @explicit: whether this was an explicit unplug, or one from schedule() 482 * 483 * Unplug request queue @q because device driver is scheduled to work 484 * on elements in the request queue. 485 */ 486 DEFINE_EVENT(block_unplug, block_unplug, 487 488 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 489 490 TP_ARGS(q, depth, explicit) 491 ); 492 493 /** 494 * block_split - split a single bio struct into two bio structs 495 * @bio: block operation being split 496 * @new_sector: The starting sector for the new bio 497 * 498 * The bio request @bio needs to be split into two bio requests. The newly 499 * created @bio request starts at @new_sector. This split may be required due to 500 * hardware limitations such as operation crossing device boundaries in a RAID 501 * system. 502 */ 503 TRACE_EVENT(block_split, 504 505 TP_PROTO(struct bio *bio, unsigned int new_sector), 506 507 TP_ARGS(bio, new_sector), 508 509 TP_STRUCT__entry( 510 __field( dev_t, dev ) 511 __field( sector_t, sector ) 512 __field( sector_t, new_sector ) 513 __array( char, rwbs, RWBS_LEN ) 514 __array( char, comm, TASK_COMM_LEN ) 515 ), 516 517 TP_fast_assign( 518 __entry->dev = bio_dev(bio); 519 __entry->sector = bio->bi_iter.bi_sector; 520 __entry->new_sector = new_sector; 521 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 522 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 523 ), 524 525 TP_printk("%d,%d %s %llu / %llu [%s]", 526 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 527 (unsigned long long)__entry->sector, 528 (unsigned long long)__entry->new_sector, 529 __entry->comm) 530 ); 531 532 /** 533 * block_bio_remap - map request for a logical device to the raw device 534 * @bio: revised operation 535 * @dev: original device for the operation 536 * @from: original sector for the operation 537 * 538 * An operation for a logical device has been mapped to the 539 * raw block device. 540 */ 541 TRACE_EVENT(block_bio_remap, 542 543 TP_PROTO(struct bio *bio, dev_t dev, sector_t from), 544 545 TP_ARGS(bio, dev, from), 546 547 TP_STRUCT__entry( 548 __field( dev_t, dev ) 549 __field( sector_t, sector ) 550 __field( unsigned int, nr_sector ) 551 __field( dev_t, old_dev ) 552 __field( sector_t, old_sector ) 553 __array( char, rwbs, RWBS_LEN) 554 ), 555 556 TP_fast_assign( 557 __entry->dev = bio_dev(bio); 558 __entry->sector = bio->bi_iter.bi_sector; 559 __entry->nr_sector = bio_sectors(bio); 560 __entry->old_dev = dev; 561 __entry->old_sector = from; 562 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 563 ), 564 565 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 566 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 567 (unsigned long long)__entry->sector, 568 __entry->nr_sector, 569 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 570 (unsigned long long)__entry->old_sector) 571 ); 572 573 /** 574 * block_rq_remap - map request for a block operation request 575 * @rq: block IO operation request 576 * @dev: device for the operation 577 * @from: original sector for the operation 578 * 579 * The block operation request @rq in @q has been remapped. The block 580 * operation request @rq holds the current information and @from hold 581 * the original sector. 582 */ 583 TRACE_EVENT(block_rq_remap, 584 585 TP_PROTO(struct request *rq, dev_t dev, sector_t from), 586 587 TP_ARGS(rq, dev, from), 588 589 TP_STRUCT__entry( 590 __field( dev_t, dev ) 591 __field( sector_t, sector ) 592 __field( unsigned int, nr_sector ) 593 __field( dev_t, old_dev ) 594 __field( sector_t, old_sector ) 595 __field( unsigned int, nr_bios ) 596 __array( char, rwbs, RWBS_LEN) 597 ), 598 599 TP_fast_assign( 600 __entry->dev = disk_devt(rq->q->disk); 601 __entry->sector = blk_rq_pos(rq); 602 __entry->nr_sector = blk_rq_sectors(rq); 603 __entry->old_dev = dev; 604 __entry->old_sector = from; 605 __entry->nr_bios = blk_rq_count_bios(rq); 606 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); 607 ), 608 609 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u", 610 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 611 (unsigned long long)__entry->sector, 612 __entry->nr_sector, 613 MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 614 (unsigned long long)__entry->old_sector, __entry->nr_bios) 615 ); 616 617 /** 618 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones 619 * @bio: The block IO operation sent down to the device 620 * @nr_sectors: The number of sectors affected by this operation 621 * 622 * Execute a zone management operation on a specified range of zones. This 623 * range is encoded in %nr_sectors, which has to be a multiple of the zone 624 * size. 625 */ 626 TRACE_EVENT(blkdev_zone_mgmt, 627 628 TP_PROTO(struct bio *bio, sector_t nr_sectors), 629 630 TP_ARGS(bio, nr_sectors), 631 632 TP_STRUCT__entry( 633 __field( dev_t, dev ) 634 __field( sector_t, sector ) 635 __field( sector_t, nr_sectors ) 636 __array( char, rwbs, RWBS_LEN) 637 ), 638 639 TP_fast_assign( 640 __entry->dev = bio_dev(bio); 641 __entry->sector = bio->bi_iter.bi_sector; 642 __entry->nr_sectors = bio_sectors(bio); 643 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 644 ), 645 646 TP_printk("%d,%d %s %llu + %llu", 647 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 648 (unsigned long long)__entry->sector, 649 __entry->nr_sectors) 650 ); 651 652 DECLARE_EVENT_CLASS(block_zwplug, 653 654 TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector, 655 unsigned int nr_sectors), 656 657 TP_ARGS(q, zno, sector, nr_sectors), 658 659 TP_STRUCT__entry( 660 __field( dev_t, dev ) 661 __field( unsigned int, zno ) 662 __field( sector_t, sector ) 663 __field( unsigned int, nr_sectors ) 664 ), 665 666 TP_fast_assign( 667 __entry->dev = disk_devt(q->disk); 668 __entry->zno = zno; 669 __entry->sector = sector; 670 __entry->nr_sectors = nr_sectors; 671 ), 672 673 TP_printk("%d,%d zone %u, BIO %llu + %u", 674 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->zno, 675 (unsigned long long)__entry->sector, 676 __entry->nr_sectors) 677 ); 678 679 DEFINE_EVENT(block_zwplug, disk_zone_wplug_add_bio, 680 681 TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector, 682 unsigned int nr_sectors), 683 684 TP_ARGS(q, zno, sector, nr_sectors) 685 ); 686 687 DEFINE_EVENT(block_zwplug, blk_zone_wplug_bio, 688 689 TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector, 690 unsigned int nr_sectors), 691 692 TP_ARGS(q, zno, sector, nr_sectors) 693 ); 694 695 #endif /* _TRACE_BLOCK_H */ 696 697 /* This part must be outside protection */ 698 #include <trace/define_trace.h> 699 700