Home
last modified time | relevance | path

Searched refs:nr_sectors (Results 1 – 15 of 15) sorted by relevance

/drivers/md/bcache/
Dwriteback.h45 unsigned nr_sectors) in bcache_dev_stripe_dirty() argument
53 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty()
56 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty()
Dwriteback.c286 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add() argument
297 while (nr_sectors) { in bcache_dev_sectors_dirty_add()
298 int s = min_t(unsigned, abs(nr_sectors), in bcache_dev_sectors_dirty_add()
301 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add()
314 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
/drivers/block/xen-blkback/
Dcommon.h94 uint64_t nr_sectors; member
148 uint64_t nr_sectors; member
429 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_32_req()
477 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_64_req()
Dblkback.c1012 preq.nr_sects = req->u.discard.nr_sectors; in dispatch_discard_io()
1028 req->u.discard.nr_sectors, in dispatch_discard_io()
/drivers/md/
Ddm-log-writes.c93 __le64 nr_sectors; member
120 sector_t nr_sectors; member
245 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block()
360 lc->next_sector += block->nr_sectors + 1; in log_writes_kthread()
603 block->nr_sectors = bio_sectors(bio); in log_writes_map()
Dmd.h469 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument
471 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); in md_sync_acct()
Draid10.c2857 sector_t max_sector, nr_sectors; in raid10_sync_request() local
3342 nr_sectors = 0; in raid10_sync_request()
3370 nr_sectors += len>>9; in raid10_sync_request()
3374 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3382 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3385 md_sync_acct(bio->bi_bdev, nr_sectors); in raid10_sync_request()
3397 return sectors_skipped + nr_sectors; in raid10_sync_request()
4277 int nr_sectors; in reshape_request() local
4447 nr_sectors = 0; in reshape_request()
4470 nr_sectors += len >> 9; in reshape_request()
[all …]
Draid1.c2483 sector_t max_sector, nr_sectors; in raid1_sync_request() local
2692 nr_sectors = 0; in raid1_sync_request()
2732 nr_sectors += len>>9; in raid1_sync_request()
2737 r1_bio->sectors = nr_sectors; in raid1_sync_request()
2740 conf->cluster_sync_high < sector_nr + nr_sectors) { in raid1_sync_request()
2758 md_sync_acct(bio->bi_bdev, nr_sectors); in raid1_sync_request()
2765 md_sync_acct(bio->bi_bdev, nr_sectors); in raid1_sync_request()
2769 return nr_sectors; in raid1_sync_request()
/drivers/block/drbd/
Ddrbd_actlog.c864 sector_t esector, nr_sectors; in __drbd_change_sync() local
880 nr_sectors = drbd_get_capacity(device->this_bdev); in __drbd_change_sync()
883 if (!expect(sector < nr_sectors)) in __drbd_change_sync()
885 if (!expect(esector < nr_sectors)) in __drbd_change_sync()
886 esector = nr_sectors - 1; in __drbd_change_sync()
888 lbnr = BM_SECT_TO_BIT(nr_sectors-1); in __drbd_change_sync()
895 if (unlikely(esector == (nr_sectors-1))) in __drbd_change_sync()
Ddrbd_req.c916 sector_t esector, nr_sectors; in drbd_may_do_local_read() local
923 nr_sectors = drbd_get_capacity(device->this_bdev); in drbd_may_do_local_read()
924 D_ASSERT(device, sector < nr_sectors); in drbd_may_do_local_read()
925 D_ASSERT(device, esector < nr_sectors); in drbd_may_do_local_read()
Ddrbd_receiver.c1467 …card_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard) in drbd_issue_discard_or_zero_out() argument
1488 if (nr_sectors < granularity) in drbd_issue_discard_or_zero_out()
1493 if (nr_sectors < 2*granularity) in drbd_issue_discard_or_zero_out()
1501 nr_sectors -= nr; in drbd_issue_discard_or_zero_out()
1504 while (nr_sectors >= granularity) { in drbd_issue_discard_or_zero_out()
1505 nr = min_t(sector_t, nr_sectors, max_discard_sectors); in drbd_issue_discard_or_zero_out()
1507 nr_sectors -= nr; in drbd_issue_discard_or_zero_out()
1511 if (nr_sectors) { in drbd_issue_discard_or_zero_out()
1512 err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0); in drbd_issue_discard_or_zero_out()
Ddrbd_int.h1565 sector_t start, unsigned int nr_sectors, bool discard);
/drivers/scsi/
Dsd.c720 unsigned int nr_sectors = blk_rq_sectors(rq); in sd_setup_discard_cmnd() local
728 nr_sectors >>= ilog2(sdp->sector_size) - 9; in sd_setup_discard_cmnd()
745 put_unaligned_be32(nr_sectors, &buf[16]); in sd_setup_discard_cmnd()
755 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); in sd_setup_discard_cmnd()
767 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); in sd_setup_discard_cmnd()
846 unsigned int nr_sectors = blk_rq_sectors(rq); in sd_setup_write_same_cmnd() local
856 nr_sectors >>= ilog2(sdp->sector_size) - 9; in sd_setup_write_same_cmnd()
860 if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { in sd_setup_write_same_cmnd()
864 put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); in sd_setup_write_same_cmnd()
869 put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); in sd_setup_write_same_cmnd()
/drivers/block/
Dfloppy.c2207 unsigned int nr_sectors = current_count_sectors; in floppy_end_request() local
2212 nr_sectors = blk_rq_cur_sectors(req); in floppy_end_request()
2213 if (__blk_end_request(req, error, nr_sectors << 9)) in floppy_end_request()
2277 int nr_sectors; in rw_interrupt() local
2289 nr_sectors = 0; in rw_interrupt()
2302 nr_sectors = (((R_TRACK - TRACK) * heads + in rw_interrupt()
2306 if (nr_sectors / ssize > in rw_interrupt()
2309 nr_sectors, current_count_sectors); in rw_interrupt()
2319 nr_sectors -= in_sector_offset; in rw_interrupt()
2320 INFBOUND(nr_sectors, 0); in rw_interrupt()
[all …]
Dxen-blkfront.c550 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); in blkif_queue_discard_req()