Home
last modified time | relevance | path

Searched refs:bi_sector (Results 1 – 25 of 66) sorted by relevance

123

/drivers/md/
Dlinear.c260 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); in linear_make_request()
266 if (unlikely(bio->bi_iter.bi_sector >= end_sector || in linear_make_request()
267 bio->bi_iter.bi_sector < start_sector)) in linear_make_request()
275 bio->bi_iter.bi_sector, in linear_make_request()
282 split->bi_iter.bi_sector = split->bi_iter.bi_sector - in linear_make_request()
299 (unsigned long long)bio->bi_iter.bi_sector, in linear_make_request()
Dfaulty.c78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail()
188 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request()
192 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
200 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request()
206 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
211 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
Ddm-linear.c80 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) in linear_map_sector() argument
84 return lc->start + dm_target_offset(ti, bi_sector); in linear_map_sector()
93 bio->bi_iter.bi_sector = in linear_map_bio()
94 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio()
Dmultipath.c101 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request()
135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; in multipath_make_request()
341 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; in multipathd()
347 (unsigned long long)bio->bi_iter.bi_sector); in multipathd()
353 (unsigned long long)bio->bi_iter.bi_sector); in multipathd()
355 bio->bi_iter.bi_sector += in multipathd()
Draid1.c70 sector_t bi_sector);
243 sector_t bi_sector = bio->bi_iter.bi_sector; in call_bio_endio() local
268 allow_barrier(conf, start_next_window, bi_sector); in call_bio_endio()
280 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io()
479 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request()
849 <= bio->bi_iter.bi_sector)) in need_to_wait_for_sync()
887 if (bio->bi_iter.bi_sector >= conf->next_resync) { in wait_barrier()
894 <= bio->bi_iter.bi_sector) in wait_barrier()
908 sector_t bi_sector) in allow_barrier() argument
917 <= bi_sector) in allow_barrier()
[all …]
Ddm-flakey.c247 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) in flakey_map_sector() argument
251 return fc->start + dm_target_offset(ti, bi_sector); in flakey_map_sector()
260 bio->bi_iter.bi_sector = in flakey_map_bio()
261 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio()
279 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); in corrupt_bio_data()
Ddm-stripe.c266 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range()
272 bio->bi_iter.bi_sector = begin + in stripe_map_range()
302 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map()
303 &stripe, &bio->bi_iter.bi_sector); in stripe_map()
305 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map()
Ddm-delay.c288 bio->bi_iter.bi_sector = dc->start_write + in delay_map()
289 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
295 bio->bi_iter.bi_sector = dc->start_read + in delay_map()
296 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
Draid10.c1087 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request()
1088 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1094 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request()
1095 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request()
1102 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request()
1103 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1104 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1105 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request()
1123 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1152 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
[all …]
Ddm-stats.c589 sector_t bi_sector, sector_t end_sector, in __dm_stat_bio() argument
596 if (end_sector <= s->start || bi_sector >= s->end) in __dm_stat_bio()
598 if (unlikely(bi_sector < s->start)) { in __dm_stat_bio()
602 rel_sector = bi_sector - s->start; in __dm_stat_bio()
603 todo = end_sector - bi_sector; in __dm_stat_bio()
627 sector_t bi_sector, unsigned bi_sectors, bool end, in dm_stats_account_io() argument
639 end_sector = bi_sector + bi_sectors; in dm_stats_account_io()
648 (bi_sector == (ACCESS_ONCE(last->last_sector) && in dm_stats_account_io()
667 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); in dm_stats_account_io()
Draid0.c446 ((bio->bi_iter.bi_sector & (chunk_sects-1)) in is_io_in_chunk_boundary()
449 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary()
467 sector_t sector = bio->bi_iter.bi_sector; in raid0_make_request()
476 sector = bio->bi_iter.bi_sector; in raid0_make_request()
488 split->bi_iter.bi_sector = sector + zone->dev_start + in raid0_make_request()
Ddm-verity-avb.c38 bio->bi_iter.bi_sector = 0; in invalidate_vbmeta_submit()
43 bio->bi_iter.bi_sector = last_sector; in invalidate_vbmeta_submit()
Ddm-thin.c372 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard_async()
645 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
662 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
686 sector_t bi_sector = bio->bi_iter.bi_sector; in remap() local
690 bio->bi_iter.bi_sector = in remap()
692 (bi_sector & (pool->sectors_per_block - 1)); in remap()
694 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
695 sector_div(bi_sector, pool->sectors_per_block); in remap()
1887 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1889 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
[all …]
Ddm-stats.h33 sector_t bi_sector, unsigned bi_sectors, bool end,
Ddm-verity-target.c80 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector) in verity_map_sector() argument
82 return v->data_start + dm_target_offset(v->ti, bi_sector); in verity_map_sector()
588 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); in verity_map()
590 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & in verity_map()
608 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); in verity_map()
Ddm-log-writes.c204 bio->bi_iter.bi_sector = sector; in write_metadata()
268 bio->bi_iter.bi_sector = sector; in log_one_block()
289 bio->bi_iter.bi_sector = sector; in log_one_block()
603 block->sector = bio->bi_iter.bi_sector; in log_writes_map()
Draid5.c140 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
1016 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1019 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1068 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1071 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1122 if (bio->bi_iter.bi_sector >= sector) in async_copy_data()
1123 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data()
1125 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data()
1197 while (rbi && rbi->bi_iter.bi_sector < in ops_complete_biofill()
1232 while (rbi && rbi->bi_iter.bi_sector < in ops_run_biofill()
[all …]
Ddm-raid1.c449 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
461 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
467 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio()
578 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
1228 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
/drivers/md/bcache/
Drequest.c124 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
133 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
137 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); in bch_data_invalidate()
227 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in bch_data_insert_start()
387 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || in check_should_bypass()
411 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass()
412 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass()
514 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn()
518 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn()
522 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn()
[all …]
Ddebug.c53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify()
132 (uint64_t) bio->bi_iter.bi_sector); in bch_data_verify()
Dwriteback.h72 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback()
/drivers/s390/block/
Dxpram.c195 if ((bio->bi_iter.bi_sector & 7) != 0 || in xpram_make_request()
202 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) in xpram_make_request()
204 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; in xpram_make_request()
/drivers/block/
Dpktcdvd.c655 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find()
664 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find()
669 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find()
680 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert()
686 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert()
865 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue()
988 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); in pkt_end_io_read()
1036 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data()
1064 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1161 pkt->bio->bi_iter.bi_sector = new_sector; in pkt_start_recovery()
[all …]
/drivers/target/
Dtarget_core_iblock.c335 bio->bi_iter.bi_sector = lba; in iblock_get_bio()
612 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; in iblock_alloc_bip()
615 (unsigned long long)bip->bip_iter.bi_sector); in iblock_alloc_bip()
/drivers/nvdimm/
Dblk.c196 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_make_request()
201 (unsigned long long) iter.bi_sector, len); in nd_blk_make_request()

123