/drivers/md/ |
D | dm-bow.c | 110 return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE; in bvec_top() 126 if (br->sector <= bi_iter->bi_sector in find_first_overlapping_range() 127 && bi_iter->bi_sector < range_top(br)) in find_first_overlapping_range() 130 if (bi_iter->bi_sector < br->sector) in find_first_overlapping_range() 140 if (range_top(br) - bi_iter->bi_sector in find_first_overlapping_range() 142 bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector) in find_first_overlapping_range() 176 if (bi_iter->bi_sector < (*br)->sector) { in split_range() 181 if (bi_iter->bi_sector > (*br)->sector) { in split_range() 193 (*br)->sector = bi_iter->bi_sector; in split_range() 351 bi_iter.bi_sector = free_br->sector; in backup_log_sector() [all …]
|
D | dm-ebs-target.c | 49 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks() 70 unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs)); in __ebs_rw_bvec() 71 sector_t block = __sector_to_block(ec, iter->bi_sector); in __ebs_rw_bvec() 143 sector_t block, blocks, sector = bio->bi_iter.bi_sector; in __ebs_discard_bio() 167 sector_t blocks, sector = bio->bi_iter.bi_sector; in __ebs_forget_bio() 193 block1 = __sector_to_block(ec, bio->bi_iter.bi_sector); in __ebs_process_bios() 198 if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs)) in __ebs_process_bios() 364 bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in ebs_map() 373 if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) || in ebs_map()
|
D | md-faulty.c | 69 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail() 179 if (check_sector(conf, bio->bi_iter.bi_sector, in faulty_make_request() 183 add_sector(conf, bio->bi_iter.bi_sector, in faulty_make_request() 191 if (check_sector(conf, bio->bi_iter.bi_sector, in faulty_make_request() 197 add_sector(conf, bio->bi_iter.bi_sector, in faulty_make_request() 202 add_sector(conf, bio->bi_iter.bi_sector, in faulty_make_request()
|
D | md-multipath.c | 94 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request() 127 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; in multipath_make_request() 311 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; in multipathd() 316 (unsigned long long)bio->bi_iter.bi_sector); in multipathd() 321 (unsigned long long)bio->bi_iter.bi_sector); in multipathd() 323 bio->bi_iter.bi_sector += in multipathd()
|
D | dm-zone.c | 396 if ((clone->bi_iter.bi_sector & (zsectors - 1)) != zwp_offset) in dm_zone_map_bio_begin() 407 clone->bi_iter.bi_sector = in dm_zone_map_bio_begin() 408 orig_bio->bi_iter.bi_sector + zwp_offset; in dm_zone_map_bio_begin() 620 orig_bio->bi_iter.bi_sector += in dm_zone_endio() 621 clone->bi_iter.bi_sector & mask; in dm_zone_endio() 655 orig_bio->bi_iter.bi_sector += in dm_zone_endio()
|
D | md-linear.c | 221 sector_t bio_sector = bio->bi_iter.bi_sector; in linear_make_request() 252 bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - in linear_make_request() 272 (unsigned long long)bio->bi_iter.bi_sector, in linear_make_request()
|
D | dm-linear.c | 81 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) in linear_map_sector() argument 85 return lc->start + dm_target_offset(ti, bi_sector); in linear_map_sector() 94 bio->bi_iter.bi_sector = in linear_map_bio() 95 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio()
|
D | dm-flakey.c | 271 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) in flakey_map_sector() argument 275 return fc->start + dm_target_offset(ti, bi_sector); in flakey_map_sector() 284 bio->bi_iter.bi_sector = in flakey_map_bio() 285 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio() 315 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_iter.bi_size); in corrupt_bio_data()
|
D | dm-stripe.c | 256 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range() 262 bio->bi_iter.bi_sector = begin + in stripe_map_range() 294 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map() 295 &stripe, &bio->bi_iter.bi_sector); in stripe_map() 297 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map()
|
D | dm-stats.c | 618 sector_t bi_sector, sector_t end_sector, in __dm_stat_bio() argument 625 if (end_sector <= s->start || bi_sector >= s->end) in __dm_stat_bio() 627 if (unlikely(bi_sector < s->start)) { in __dm_stat_bio() 631 rel_sector = bi_sector - s->start; in __dm_stat_bio() 632 todo = end_sector - bi_sector; in __dm_stat_bio() 656 sector_t bi_sector, unsigned bi_sectors, bool end, in dm_stats_account_io() argument 669 end_sector = bi_sector + bi_sectors; in dm_stats_account_io() 678 (bi_sector == (READ_ONCE(last->last_sector) && in dm_stats_account_io() 697 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); in dm_stats_account_io()
|
D | dm-unstripe.c | 119 sector_t sector = bio->bi_iter.bi_sector; in map_to_core() 139 bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start; in unstripe_map()
|
D | dm-dust.c | 230 bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector); in dust_map() 233 r = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map() 235 r = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb); in dust_map()
|
D | raid0.c | 477 sector_t start = bio->bi_iter.bi_sector; in raid0_handle_discard() 493 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, in raid0_handle_discard() 578 sector_t bio_sector = bio->bi_iter.bi_sector; in raid0_map_submit_bio() 604 bio->bi_iter.bi_sector = sector + zone->dev_start + in raid0_map_submit_bio() 630 sector = bio->bi_iter.bi_sector; in raid0_make_request()
|
D | dm-writecache.c | 1299 writecache_discard(wc, bio->bi_iter.bi_sector, in writecache_flush_thread() 1336 read_original_sector(wc, e) - bio->bi_iter.bi_sector; in writecache_map_remap_origin() 1349 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_read() 1350 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { in writecache_map_read() 1360 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map_read() 1386 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + in writecache_bio_copy_ssd() 1412 bio->bi_iter.bi_sector = start_cache_sec; in writecache_bio_copy_ssd() 1437 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); in writecache_map_write() 1460 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_write() 1470 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); in writecache_map_write() [all …]
|
D | raid10.c | 1138 bio->bi_iter.bi_sector < conf->reshape_progress && in regular_request_wait() 1139 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in regular_request_wait() 1147 conf->reshape_progress <= bio->bi_iter.bi_sector || in regular_request_wait() 1148 conf->reshape_progress >= bio->bi_iter.bi_sector + in regular_request_wait() 1235 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request() 1283 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk() 1412 bio->bi_iter.bi_sector, in raid10_write_request() 1424 bio->bi_iter.bi_sector, bio_end_sector(bio))) in raid10_write_request() 1436 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in raid10_write_request() 1437 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in raid10_write_request() [all …]
|
D | dm-thin.c | 680 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block() 697 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range() 721 sector_t bi_sector = bio->bi_iter.bi_sector; in remap() local 725 bio->bi_iter.bi_sector = in remap() 727 (bi_sector & (pool->sectors_per_block - 1)); in remap() 729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap() 730 sector_div(bi_sector, pool->sectors_per_block); in remap() 1971 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell() 1973 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell() 2109 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add() local [all …]
|
D | dm-stats.h | 33 sector_t bi_sector, unsigned bi_sectors, bool end,
|
D | raid1.c | 317 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io() 543 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request() 1186 r1_bio->sector = bio->bi_iter.bi_sector; in init_r1bio() 1239 wait_read_barrier(conf, bio->bi_iter.bi_sector); in raid1_read_request() 1302 read_bio->bi_iter.bi_sector = r1_bio->sector + in raid1_read_request() 1336 bio->bi_iter.bi_sector, bio_end_sector(bio))) { in raid1_write_request() 1343 bio->bi_iter.bi_sector, in raid1_write_request() 1356 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request() 1454 allow_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request() 1457 wait_barrier(conf, bio->bi_iter.bi_sector); in raid1_write_request() [all …]
|
D | dm-zoned.h | 45 #define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector) 84 #define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
|
D | dm-io.c | 219 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; in bio_get_page() 224 unsigned int len = (unsigned int)dp->context_bi.bi_sector; in bio_next_page() 349 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); in do_region()
|
/drivers/md/bcache/ |
D | request.c | 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 128 bio->bi_iter.bi_sector, in bch_data_invalidate() 220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in bch_data_insert_start() 397 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) || in check_should_bypass() 416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass() 417 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass() 520 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn() 524 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn() 528 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn() [all …]
|
D | debug.c | 54 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify() 119 check->bi_iter.bi_sector = bio->bi_iter.bi_sector; in bch_data_verify() 142 (uint64_t) bio->bi_iter.bi_sector); in bch_data_verify()
|
/drivers/nvdimm/ |
D | nd_virtio.c | 107 if (bio && bio->bi_iter.bi_sector != -1) { in async_pmem_flush() 114 child->bi_iter.bi_sector = -1; in async_pmem_flush()
|
/drivers/block/ |
D | pktcdvd.c | 655 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find() 664 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find() 669 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find() 680 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert() 686 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert() 865 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue() 955 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); in pkt_end_io_read() 1003 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data() 1031 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data() 1132 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_handle_queue() [all …]
|
/drivers/nvme/target/ |
D | io-cmd-bdev.c | 202 bip_set_seed(bip, bio->bi_iter.bi_sector >> in nvmet_bdev_alloc_bip() 274 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw() 300 bio->bi_iter.bi_sector = sector; in nvmet_bdev_execute_rw()
|