Searched refs:bv (Results 1 – 8 of 8) sorted by relevance
/block/ |
D | bio.c | 157 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) in bvec_free() argument 162 mempool_free(bv, pool); in bvec_free() 164 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); in bvec_free() 534 struct bio_vec bv; in zero_fill_bio() local 537 bio_for_each_segment(bv, bio, iter) in zero_fill_bio() 538 memzero_bvec(&bv); in zero_fill_bio() 554 struct bio_vec bv; in bio_truncate() local 565 bio_for_each_segment(bv, bio, iter) { in bio_truncate() 566 if (done + bv.bv_len > new_size) { in bio_truncate() 573 zero_user(bv.bv_page, bv.bv_offset + offset, in bio_truncate() [all …]
|
D | blk-merge.c | 199 const struct bio_vec *bv, unsigned *nsegs, in bvec_split_segs() argument 204 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs() 209 seg_size = get_max_segment_size(q, bv->bv_page, in bvec_split_segs() 210 bv->bv_offset + total_len); in bvec_split_segs() 217 if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) in bvec_split_segs() 224 return len > 0 || bv->bv_len > max_len; in bvec_split_segs() 251 struct bio_vec bv, bvprv, *bvprvp = NULL; in blk_bio_segment_split() local 257 bio_for_each_bvec(bv, bio, iter) { in blk_bio_segment_split() 262 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) in blk_bio_segment_split() 266 sectors + (bv.bv_len >> 9) <= max_sectors && in blk_bio_segment_split() [all …]
|
D | blk-crypto-fallback.c | 161 struct bio_vec bv; in blk_crypto_fallback_clone_bio() local 176 bio_for_each_segment(bv, bio_src, iter) in blk_crypto_fallback_clone_bio() 177 bio->bi_io_vec[bio->bi_vcnt++] = bv; in blk_crypto_fallback_clone_bio() 216 struct bio_vec bv; in blk_crypto_fallback_split_bio_if_needed() local 219 bio_for_each_segment(bv, bio, iter) { in blk_crypto_fallback_split_bio_if_needed() 220 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_fallback_split_bio_if_needed() 389 struct bio_vec bv; in blk_crypto_fallback_decrypt_bio() local 418 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { in blk_crypto_fallback_decrypt_bio() 419 struct page *page = bv.bv_page; in blk_crypto_fallback_decrypt_bio() 421 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio() [all …]
|
D | blk-crypto.c | 230 struct bio_vec bv; in bio_crypt_check_alignment() local 232 bio_for_each_segment(bv, bio, iter) { in bio_crypt_check_alignment() 233 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in bio_crypt_check_alignment()
|
D | bounce.c | 143 struct bio_vec bv; in bounce_clone_bio() local 187 bio_for_each_segment(bv, bio_src, iter) in bounce_clone_bio() 188 bio->bi_io_vec[bio->bi_vcnt++] = bv; in bounce_clone_bio()
|
D | bio-integrity.c | 162 struct bio_vec bv; in bio_integrity_process() local 171 __bio_for_each_segment(bv, bio, bviter, *proc_iter) { in bio_integrity_process() 172 void *kaddr = bvec_kmap_local(&bv); in bio_integrity_process() 175 iter.data_size = bv.bv_len; in bio_integrity_process()
|
D | blk-map.c | 485 struct bio_vec bv; in blk_rq_append_bio() local 488 bio_for_each_bvec(bv, bio, iter) in blk_rq_append_bio() 489 nr_segs += blk_segments(&rq->q->limits, bv.bv_len); in blk_rq_append_bio()
|
D | blk.h | 81 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
|