/drivers/block/null_blk/ |
D | zoned.c | 215 unsigned int nr_sectors = len >> SECTOR_SHIFT; in null_zone_valid_read_len() local 219 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len() 334 unsigned int nr_sectors, bool append) in null_zone_write() argument 346 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write() 388 if (zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write() 409 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write() 416 zone->wp += nr_sectors; in null_zone_write() 590 sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument 598 sts = null_zone_write(cmd, sector, nr_sectors, false); in null_process_zoned_cmd() 601 sts = null_zone_write(cmd, sector, nr_sectors, true); in null_process_zoned_cmd() [all …]
|
D | null_blk.h | 99 unsigned int nr_sectors); 109 sector_t nr_sectors); 125 enum req_opf op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument
|
D | main.c | 1253 sector_t nr_sectors) in null_handle_badblocks() argument 1259 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) in null_handle_badblocks() 1333 unsigned int nr_sectors) in null_process_cmd() argument 1339 ret = null_handle_badblocks(cmd, sector, nr_sectors); in null_process_cmd() 1351 sector_t nr_sectors, enum req_opf op) in null_handle_cmd() argument 1369 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); in null_handle_cmd() 1371 sts = null_process_cmd(cmd, op, sector, nr_sectors); in null_handle_cmd() 1422 sector_t nr_sectors = bio_sectors(bio); in null_submit_bio() local 1430 null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio)); in null_submit_bio() 1476 sector_t nr_sectors = blk_rq_sectors(bd->rq); in null_queue_rq() local [all …]
|
/drivers/md/bcache/ |
D | writeback.h | 76 unsigned int nr_sectors) in bcache_dev_stripe_dirty() argument 87 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty() 90 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty() 145 uint64_t offset, int nr_sectors);
|
D | writeback.c | 552 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add() argument 566 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); in bcache_dev_sectors_dirty_add() 570 while (nr_sectors) { in bcache_dev_sectors_dirty_add() 571 int s = min_t(unsigned int, abs(nr_sectors), in bcache_dev_sectors_dirty_add() 574 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add() 587 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
|
/drivers/block/xen-blkback/ |
D | common.h | 93 uint64_t nr_sectors; member 147 uint64_t nr_sectors; member 424 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_32_req() 472 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_64_req()
|
D | blkback.c | 979 preq.nr_sects = req->u.discard.nr_sectors; in dispatch_discard_io() 995 req->u.discard.nr_sectors, in dispatch_discard_io()
|
/drivers/md/ |
D | dm-log-writes.c | 97 __le64 nr_sectors; member 126 sector_t nr_sectors; member 340 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block() 473 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); in log_writes_kthread() 726 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map() 937 block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift; in log_dax()
|
D | md.h | 551 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument 553 atomic_add(nr_sectors, &bdev->bd_disk->sync_io); in md_sync_acct() 556 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) in md_sync_acct_bio() argument 558 atomic_add(nr_sectors, &bio->bi_disk->sync_io); in md_sync_acct_bio()
|
D | dm-zoned-target.c | 632 unsigned int nr_sectors = bio_sectors(bio); in dmz_map() local 641 bio_op(bio), (unsigned long long)sector, nr_sectors, in dmz_map() 646 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) in dmz_map() 650 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) in dmz_map() 660 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { in dmz_map() 670 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd)) in dmz_map()
|
D | raid10.c | 2913 sector_t max_sector, nr_sectors; in raid10_sync_request() local 3420 nr_sectors = 0; in raid10_sync_request() 3439 nr_sectors += len>>9; in raid10_sync_request() 3442 r10_bio->sectors = nr_sectors; in raid10_sync_request() 3447 if (conf->cluster_sync_high < sector_nr + nr_sectors) { in raid10_sync_request() 3468 if (conf->cluster_sync_high < sect_va1 + nr_sectors) { in raid10_sync_request() 3496 r10_bio->sectors = nr_sectors; in raid10_sync_request() 3499 md_sync_acct_bio(bio, nr_sectors); in raid10_sync_request() 3511 return sectors_skipped + nr_sectors; in raid10_sync_request() 4428 int nr_sectors; in reshape_request() local [all …]
|
D | raid1.c | 2623 sector_t max_sector, nr_sectors; in raid1_sync_request() local 2839 nr_sectors = 0; in raid1_sync_request() 2873 nr_sectors += len>>9; in raid1_sync_request() 2878 r1_bio->sectors = nr_sectors; in raid1_sync_request() 2881 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request() 2899 md_sync_acct_bio(bio, nr_sectors); in raid1_sync_request() 2908 md_sync_acct_bio(bio, nr_sectors); in raid1_sync_request() 2913 return nr_sectors; in raid1_sync_request()
|
/drivers/block/drbd/ |
D | drbd_actlog.c | 852 sector_t esector, nr_sectors; in __drbd_change_sync() local 868 nr_sectors = get_capacity(device->vdisk); in __drbd_change_sync() 871 if (!expect(sector < nr_sectors)) in __drbd_change_sync() 873 if (!expect(esector < nr_sectors)) in __drbd_change_sync() 874 esector = nr_sectors - 1; in __drbd_change_sync() 876 lbnr = BM_SECT_TO_BIT(nr_sectors-1); in __drbd_change_sync() 883 if (unlikely(esector == (nr_sectors-1))) in __drbd_change_sync()
|
D | drbd_req.c | 885 sector_t esector, nr_sectors; in drbd_may_do_local_read() local 892 nr_sectors = get_capacity(device->vdisk); in drbd_may_do_local_read() 893 D_ASSERT(device, sector < nr_sectors); in drbd_may_do_local_read() 894 D_ASSERT(device, esector < nr_sectors); in drbd_may_do_local_read()
|
D | drbd_receiver.c | 1512 …discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, int flags) in drbd_issue_discard_or_zero_out() argument 1533 if (nr_sectors < granularity) in drbd_issue_discard_or_zero_out() 1538 if (nr_sectors < 2*granularity) in drbd_issue_discard_or_zero_out() 1548 nr_sectors -= nr; in drbd_issue_discard_or_zero_out() 1551 while (nr_sectors >= max_discard_sectors) { in drbd_issue_discard_or_zero_out() 1553 nr_sectors -= max_discard_sectors; in drbd_issue_discard_or_zero_out() 1556 if (nr_sectors) { in drbd_issue_discard_or_zero_out() 1561 nr = nr_sectors; in drbd_issue_discard_or_zero_out() 1565 nr_sectors -= nr; in drbd_issue_discard_or_zero_out() 1570 if (nr_sectors) { in drbd_issue_discard_or_zero_out() [all …]
|
D | drbd_int.h | 1547 sector_t start, unsigned int nr_sectors, int flags);
|
/drivers/block/ |
D | floppy.c | 2258 unsigned int nr_sectors = current_count_sectors; in floppy_end_request() local 2263 nr_sectors = blk_rq_cur_sectors(req); in floppy_end_request() 2264 if (blk_update_request(req, error, nr_sectors << 9)) in floppy_end_request() 2320 int nr_sectors; in rw_interrupt() local 2332 nr_sectors = 0; in rw_interrupt() 2345 nr_sectors = (((reply_buffer[R_TRACK] - raw_cmd->cmd[TRACK]) * heads + in rw_interrupt() 2349 if (nr_sectors / ssize > in rw_interrupt() 2352 nr_sectors, current_count_sectors); in rw_interrupt() 2365 nr_sectors -= in_sector_offset; in rw_interrupt() 2366 INFBOUND(nr_sectors, 0); in rw_interrupt() [all …]
|
D | nbd.c | 302 sector_t nr_sectors = config->bytesize >> 9; in nbd_size_update() local 311 set_capacity(nbd->disk, nr_sectors); in nbd_size_update() 314 bd_set_nr_sectors(bdev, nr_sectors); in nbd_size_update()
|
D | xen-blkfront.c | 575 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); in blkif_queue_discard_req()
|