/drivers/md/bcache/ |
D | request.c | 126 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 128 while (bio_sectors(bio)) { in bch_data_invalidate() 129 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 207 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in bch_data_insert_start() 234 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in bch_data_insert_start() 403 bio_sectors(bio) & (c->sb.block_size - 1)) { in check_should_bypass() 459 bch_rescale_priorities(c, bio_sectors(bio)); in check_should_bypass() 462 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass() 529 unsigned int bio_sectors = bio_sectors(bio); in cache_lookup_fn() local 540 BUG_ON(bio_sectors <= sectors); in cache_lookup_fn() [all …]
|
D | writeback.h | 79 bio_sectors(bio))) in should_writeback()
|
/drivers/lightnvm/ |
D | pblk-cache.c | 32 generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio), in pblk_write_to_cache()
|
D | pblk-read.c | 275 generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio), in pblk_submit_read()
|
/drivers/md/ |
D | raid0.c | 469 + bio_sectors(bio)); in is_io_in_chunk_boundary() 473 + bio_sectors(bio)); in is_io_in_chunk_boundary() 599 if (sectors < bio_sectors(bio)) { in raid0_make_request()
|
D | dm-log-writes.c | 692 if (!bio_sectors(bio) && !flush_bio) in log_writes_map() 726 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map() 738 if (flush_bio && !bio_sectors(bio)) { in log_writes_map()
|
D | dm-zoned.h | 46 #define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
|
D | dm-linear.c | 94 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) in linear_map_bio()
|
D | dm.c | 686 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), in start_io_acct() 691 bio->bi_iter.bi_sector, bio_sectors(bio), in start_io_acct() 706 bio->bi_iter.bi_sector, bio_sectors(bio), in end_io_acct() 1617 ci.sector_count = bio_sectors(bio); in __split_and_process_bio() 1629 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, in __split_and_process_bio() 1689 ci.sector_count = bio_sectors(bio); in __process_bio() 1706 sector_count = bio_sectors(*bio); in dm_queue_split()
|
D | dm-crypt.c | 819 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) in dm_crypt_integrity_io_alloc() 826 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); in dm_crypt_integrity_io_alloc() 1623 sector += bio_sectors(clone); in kcryptd_crypt_write_convert() 2755 if (bio_sectors(bio)) in crypt_map() 2782 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); in crypt_map() 2787 if (bio_sectors(bio) > cc->tag_pool_max_sectors) in crypt_map()
|
D | dm-default-key.c | 266 if (bio_sectors(bio) == 0) in default_key_map()
|
D | dm-delay.c | 298 if (bio_sectors(bio)) in delay_map()
|
D | dm-flakey.c | 283 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) in flakey_map_bio()
|
D | raid1.c | 1193 r1_bio->sectors = bio_sectors(bio); in init_r1bio() 1293 if (max_sectors < bio_sectors(bio)) { in raid1_read_request() 1464 if (max_sectors < bio_sectors(bio)) { in raid1_write_request() 1582 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request() 2228 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
|
D | raid10.c | 1192 if (max_sectors < bio_sectors(bio)) { in raid10_read_request() 1477 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request() 1526 int sectors = bio_sectors(bio); in raid10_make_request() 2096 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write() 2120 bio_sectors(tbio)); in sync_request_write() 2251 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write() 2257 bio_sectors(wbio2)); in recovery_request_write()
|
D | dm-integrity.c | 1433 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight() 1644 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_map() 1646 (unsigned long long)dio->range.logical_sector, bio_sectors(bio), in dm_integrity_map() 1650 …if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1… in dm_integrity_map() 1653 (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); in dm_integrity_map() 1672 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map() 1875 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue()
|
D | dm-zoned-target.c | 624 unsigned int nr_sectors = bio_sectors(bio); in dmz_map()
|
D | dm-verity-target.c | 645 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & in verity_map()
|
/drivers/ide/ |
D | ide-cd.c | 150 unsigned long bio_sectors; in cdrom_analyze_sense_data() local 191 bio_sectors = max(bio_sectors(failed_command->bio), 4U); in cdrom_analyze_sense_data() 192 sector &= ~(bio_sectors - 1); in cdrom_analyze_sense_data()
|
/drivers/block/rsxx/ |
D | dev.c | 101 generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio), in disk_stats_start()
|
/drivers/nvdimm/ |
D | nd.h | 408 generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio), in nd_iostat_start()
|
/drivers/block/ |
D | ps3disk.c | 94 __func__, __LINE__, i, bio_sectors(iter.bio), in ps3disk_scatter_gather()
|
D | null_blk_main.c | 1124 bio_sectors(bio) << SECTOR_SHIFT); in null_handle_bio() 1310 sector_t nr_sectors = bio_sectors(bio); in null_queue_bio()
|
/drivers/scsi/ |
D | sr.c | 353 bio_sectors(SCpnt->request->bio); in sr_done()
|
/drivers/target/ |
D | target_core_iblock.c | 648 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in iblock_alloc_bip()
|