/drivers/md/bcache/ |
D | writeback.h | 46 unsigned nr_sectors) in bcache_dev_stripe_dirty() argument 54 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty() 57 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty()
|
D | writeback.c | 286 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add() argument 297 while (nr_sectors) { in bcache_dev_sectors_dirty_add() 298 int s = min_t(unsigned, abs(nr_sectors), in bcache_dev_sectors_dirty_add() 301 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add() 314 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
|
/drivers/block/xen-blkback/ |
D | common.h | 94 uint64_t nr_sectors; member 148 uint64_t nr_sectors; member 429 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_32_req() 477 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_64_req()
|
D | blkback.c | 1014 preq.nr_sects = req->u.discard.nr_sectors; in dispatch_discard_io() 1030 req->u.discard.nr_sectors, in dispatch_discard_io()
|
/drivers/md/ |
D | dm-log-writes.c | 94 __le64 nr_sectors; member 123 sector_t nr_sectors; member 269 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block() 389 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); in log_writes_kthread() 639 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map()
|
D | md.h | 519 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument 521 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); in md_sync_acct() 524 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) in md_sync_acct_bio() argument 526 atomic_add(nr_sectors, &bio->bi_disk->sync_io); in md_sync_acct_bio()
|
D | dm-zoned-target.c | 623 unsigned int nr_sectors = bio_sectors(bio); in dmz_map() local 631 bio_op(bio), (unsigned long long)sector, nr_sectors, in dmz_map() 638 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) in dmz_map() 642 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) in dmz_map() 652 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { in dmz_map() 662 if (chunk_sector + nr_sectors > dev->zone_nr_sectors) in dmz_map()
|
D | raid1.c | 2608 sector_t max_sector, nr_sectors; in raid1_sync_request() local 2821 nr_sectors = 0; in raid1_sync_request() 2855 nr_sectors += len>>9; in raid1_sync_request() 2860 r1_bio->sectors = nr_sectors; in raid1_sync_request() 2863 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request() 2881 md_sync_acct_bio(bio, nr_sectors); in raid1_sync_request() 2890 md_sync_acct_bio(bio, nr_sectors); in raid1_sync_request() 2896 return nr_sectors; in raid1_sync_request()
|
D | raid10.c | 2877 sector_t max_sector, nr_sectors; in raid10_sync_request() local 3375 nr_sectors = 0; in raid10_sync_request() 3394 nr_sectors += len>>9; in raid10_sync_request() 3397 r10_bio->sectors = nr_sectors; in raid10_sync_request() 3405 r10_bio->sectors = nr_sectors; in raid10_sync_request() 3408 md_sync_acct_bio(bio, nr_sectors); in raid10_sync_request() 3420 return sectors_skipped + nr_sectors; in raid10_sync_request() 4305 int nr_sectors; in reshape_request() local 4475 nr_sectors = 0; in reshape_request() 4490 nr_sectors += len >> 9; in reshape_request() [all …]
|
/drivers/block/drbd/ |
D | drbd_actlog.c | 864 sector_t esector, nr_sectors; in __drbd_change_sync() local 880 nr_sectors = drbd_get_capacity(device->this_bdev); in __drbd_change_sync() 883 if (!expect(sector < nr_sectors)) in __drbd_change_sync() 885 if (!expect(esector < nr_sectors)) in __drbd_change_sync() 886 esector = nr_sectors - 1; in __drbd_change_sync() 888 lbnr = BM_SECT_TO_BIT(nr_sectors-1); in __drbd_change_sync() 895 if (unlikely(esector == (nr_sectors-1))) in __drbd_change_sync()
|
D | drbd_req.c | 914 sector_t esector, nr_sectors; in drbd_may_do_local_read() local 921 nr_sectors = drbd_get_capacity(device->this_bdev); in drbd_may_do_local_read() 922 D_ASSERT(device, sector < nr_sectors); in drbd_may_do_local_read() 923 D_ASSERT(device, esector < nr_sectors); in drbd_may_do_local_read()
|
/drivers/scsi/ |
D | sd.c | 765 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_unmap_cmnd() local 785 put_unaligned_be32(nr_sectors, &buf[16]); in sd_setup_unmap_cmnd() 800 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same16_cmnd() local 816 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); in sd_setup_write_same16_cmnd() 831 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same10_cmnd() local 847 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); in sd_setup_write_same10_cmnd() 863 u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_zeroes_cmnd() local 880 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) in sd_setup_write_zeroes_cmnd() 948 unsigned int nr_sectors = blk_rq_sectors(rq); in sd_setup_write_same_cmnd() local 964 nr_sectors >>= ilog2(sdp->sector_size) - 9; in sd_setup_write_same_cmnd() [all …]
|
/drivers/block/ |
D | floppy.c | 2214 unsigned int nr_sectors = current_count_sectors; in floppy_end_request() local 2219 nr_sectors = blk_rq_cur_sectors(req); in floppy_end_request() 2220 if (__blk_end_request(req, error, nr_sectors << 9)) in floppy_end_request() 2284 int nr_sectors; in rw_interrupt() local 2296 nr_sectors = 0; in rw_interrupt() 2309 nr_sectors = (((R_TRACK - TRACK) * heads + in rw_interrupt() 2313 if (nr_sectors / ssize > in rw_interrupt() 2316 nr_sectors, current_count_sectors); in rw_interrupt() 2326 nr_sectors -= in_sector_offset; in rw_interrupt() 2327 INFBOUND(nr_sectors, 0); in rw_interrupt() [all …]
|
D | xen-blkfront.c | 555 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); in blkif_queue_discard_req()
|