/drivers/nvme/target/ |
D | io-cmd-file.c | 94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec() 108 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() 110 kfree(req->f.bvec); in nvmet_file_io_done() 112 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done() 142 bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length, in nvmet_file_execute_io() 144 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io() 145 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io() 231 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), in nvmet_file_execute_rw() 234 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw() 236 if (unlikely(!req->f.bvec)) { in nvmet_file_execute_rw() [all …]
|
D | tcp.c | 585 struct bio_vec bvec; in nvmet_try_send_data_pdu() local 590 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); in nvmet_try_send_data_pdu() 591 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); in nvmet_try_send_data_pdu() 617 struct bio_vec bvec; in nvmet_try_send_data() local 625 bvec_set_page(&bvec, page, left, cmd->offset); in nvmet_try_send_data() 626 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); in nvmet_try_send_data() 664 struct bio_vec bvec; in nvmet_try_send_response() local 674 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); in nvmet_try_send_response() 675 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left); in nvmet_try_send_response() 694 struct bio_vec bvec; in nvmet_try_send_r2t() local [all …]
|
/drivers/md/ |
D | dm-user.c | 297 struct bio_vec bvec; in bio_size() local 301 bio_for_each_segment (bvec, bio, iter) in bio_size() 471 struct bio_vec bvec; in bio_copy_from_iter() local 475 bio_for_each_segment (bvec, bio, biter) { in bio_copy_from_iter() 478 ret = copy_page_from_iter(bvec.bv_page, bvec.bv_offset, in bio_copy_from_iter() 479 bvec.bv_len, iter); in bio_copy_from_iter() 493 if (ret < bvec.bv_len) in bio_copy_from_iter() 502 struct bio_vec bvec; in bio_copy_to_iter() local 506 bio_for_each_segment (bvec, bio, biter) { in bio_copy_to_iter() 509 ret = copy_page_to_iter(bvec.bv_page, bvec.bv_offset, in bio_copy_to_iter() [all …]
|
D | dm-flakey.c | 345 struct bio_vec bvec; in corrupt_bio_common() local 351 bio_for_each_segment(bvec, bio, iter) { in corrupt_bio_common() 353 unsigned char *segment = bvec_kmap_local(&bvec); in corrupt_bio_common() 460 struct bio_vec bvec = bvec_iter_bvec(bio->bi_io_vec, iter); in clone_bio() local 461 unsigned this_step = min(bvec.bv_len, to_copy); in clone_bio() 462 void *map = bvec_kmap_local(&bvec); in clone_bio()
|
D | dm-io.c | 219 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, in bio_get_page() local 222 *p = bvec.bv_page; in bio_get_page() 223 *len = bvec.bv_len; in bio_get_page() 224 *offset = bvec.bv_offset; in bio_get_page() 227 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; in bio_get_page()
|
/drivers/block/ |
D | n64cart.c | 89 struct bio_vec bvec; in n64cart_submit_bio() local 94 bio_for_each_segment(bvec, bio, iter) { in n64cart_submit_bio() 95 if (!n64cart_do_bvec(dev, &bvec, pos)) { in n64cart_submit_bio() 99 pos += bvec.bv_len; in n64cart_submit_bio()
|
D | loop.c | 87 struct bio_vec *bvec; member 239 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) in lo_write_bvec() argument 244 iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len); in lo_write_bvec() 250 if (likely(bw == bvec->bv_len)) in lo_write_bvec() 255 (unsigned long long)*ppos, bvec->bv_len); in lo_write_bvec() 264 struct bio_vec bvec; in lo_write_simple() local 268 rq_for_each_segment(bvec, rq, iter) { in lo_write_simple() 269 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); in lo_write_simple() 281 struct bio_vec bvec; in lo_read_simple() local 286 rq_for_each_segment(bvec, rq, iter) { in lo_read_simple() [all …]
|
D | brd.c | 247 struct bio_vec bvec; in brd_submit_bio() local 250 bio_for_each_segment(bvec, bio, iter) { in brd_submit_bio() 251 unsigned int len = bvec.bv_len; in brd_submit_bio() 255 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || in brd_submit_bio() 258 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, in brd_submit_bio()
|
D | ps3disk.c | 85 struct bio_vec bvec; in ps3disk_scatter_gather() local 87 rq_for_each_segment(bvec, req, iter) { in ps3disk_scatter_gather() 89 memcpy_from_bvec(dev->bounce_buf + offset, &bvec); in ps3disk_scatter_gather() 91 memcpy_to_bvec(&bvec, dev->bounce_buf + offset); in ps3disk_scatter_gather()
|
D | nbd.c | 673 struct bio_vec bvec; in nbd_send_cmd() local 675 bio_for_each_segment(bvec, bio, iter) { in nbd_send_cmd() 676 bool is_last = !next && bio_iter_last(bvec, iter); in nbd_send_cmd() 680 req, bvec.bv_len); in nbd_send_cmd() 681 iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len); in nbd_send_cmd() 818 struct bio_vec bvec; in nbd_handle_reply() local 821 rq_for_each_segment(bvec, req, iter) { in nbd_handle_reply() 822 iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len); in nbd_handle_reply() 841 req, bvec.bv_len); in nbd_handle_reply()
|
D | ps3vram.c | 538 struct bio_vec bvec; in ps3vram_do_bio() local 542 bio_for_each_segment(bvec, bio, iter) { in ps3vram_do_bio() 544 char *ptr = bvec_virt(&bvec); in ps3vram_do_bio() 545 size_t len = bvec.bv_len, retlen; in ps3vram_do_bio()
|
/drivers/target/ |
D | target_core_file.c | 318 struct bio_vec *bvec; in fd_do_rw() local 323 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); in fd_do_rw() 324 if (!bvec) { in fd_do_rw() 330 bvec_set_page(&bvec[i], sg_page(sg), sg->length, sg->offset); in fd_do_rw() 334 iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); in fd_do_rw() 380 kfree(bvec); in fd_do_rw() 437 struct bio_vec *bvec; in fd_execute_write_same() local 460 bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); in fd_execute_write_same() 461 if (!bvec) in fd_execute_write_same() 465 bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]), in fd_execute_write_same() [all …]
|
/drivers/vhost/ |
D | vringh.c | 1110 struct bio_vec *bvec; member 1156 struct bio_vec *bvec = ivec->iov.bvec; in iotlb_translate() local 1158 bvec_set_page(&bvec[ret], pfn_to_page(pfn), io_len, in iotlb_translate() 1183 struct bio_vec bvec[IOTLB_IOV_STRIDE]; in copy_from_iotlb() member 1207 iov_iter_bvec(&iter, ITER_SOURCE, ivec.iov.bvec, ret, in copy_from_iotlb() 1229 struct bio_vec bvec[IOTLB_IOV_STRIDE]; in copy_to_iotlb() member 1253 iov_iter_bvec(&iter, ITER_DEST, ivec.iov.bvec, ret, in copy_to_iotlb() 1275 struct bio_vec bvec[1]; in getu16_iotlb() member 1294 void *kaddr = kmap_local_page(ivec.iov.bvec[0].bv_page); in getu16_iotlb() 1295 void *from = kaddr + ivec.iov.bvec[0].bv_offset; in getu16_iotlb() [all …]
|
/drivers/nvdimm/ |
D | pmem.c | 207 struct bio_vec bvec; in pmem_submit_bio() local 218 bio_for_each_segment(bvec, bio, iter) { in pmem_submit_bio() 220 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, in pmem_submit_bio() 221 iter.bi_sector, bvec.bv_len); in pmem_submit_bio() 223 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, in pmem_submit_bio() 224 iter.bi_sector, bvec.bv_len); in pmem_submit_bio()
|
D | btt.c | 1446 struct bio_vec bvec; in btt_submit_bio() local 1456 bio_for_each_segment(bvec, bio, iter) { in btt_submit_bio() 1457 unsigned int len = bvec.bv_len; in btt_submit_bio() 1467 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, in btt_submit_bio()
|
/drivers/mtd/ |
D | mtd_blkdevs.c | 50 struct bio_vec bvec; in do_blktrans_request() local 76 rq_for_each_segment(bvec, req, iter) in do_blktrans_request() 77 flush_dcache_page(bvec.bv_page); in do_blktrans_request() 83 rq_for_each_segment(bvec, req, iter) in do_blktrans_request() 84 flush_dcache_page(bvec.bv_page); in do_blktrans_request()
|
/drivers/s390/block/ |
D | dcssblk.c | 861 struct bio_vec bvec; in dcssblk_submit_bio() local 892 bio_for_each_segment(bvec, bio, iter) { in dcssblk_submit_bio() 893 page_addr = bvec_virt(&bvec); in dcssblk_submit_bio() 896 !IS_ALIGNED(bvec.bv_len, PAGE_SIZE))) in dcssblk_submit_bio() 900 memcpy(page_addr, __va(source_addr), bvec.bv_len); in dcssblk_submit_bio() 902 memcpy(__va(source_addr), page_addr, bvec.bv_len); in dcssblk_submit_bio() 903 bytes_done += bvec.bv_len; in dcssblk_submit_bio()
|
/drivers/block/zram/ |
D | zram_drv.c | 146 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument 148 return bvec->bv_len != PAGE_SIZE; in is_partial_io() 152 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument 1389 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read_partial() argument 1399 memcpy_to_bvec(bvec, page_address(page) + offset); in zram_bvec_read_partial() 1404 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument 1407 if (is_partial_io(bvec)) in zram_bvec_read() 1408 return zram_bvec_read_partial(zram, bvec, index, offset); in zram_bvec_read() 1409 return zram_read_page(zram, bvec->bv_page, index, bio); in zram_bvec_read() 1541 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec, in zram_bvec_write_partial() argument [all …]
|
/drivers/block/null_blk/ |
D | main.c | 1292 struct bio_vec bvec; in null_handle_rq() local 1295 rq_for_each_segment(bvec, rq, iter) { in null_handle_rq() 1296 len = min(bvec.bv_len, nullb->dev->max_segment_size); in null_handle_rq() 1297 bvec.bv_len = len; in null_handle_rq() 1298 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_rq() 1319 struct bio_vec bvec; in null_handle_bio() local 1323 bio_for_each_segment(bvec, bio, iter) { in null_handle_bio() 1324 len = min(bvec.bv_len, nullb->dev->max_segment_size); in null_handle_bio() 1325 bvec.bv_len = len; in null_handle_bio() 1326 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_bio()
|
/drivers/mtd/ubi/ |
D | block.c | 191 struct bio_vec bvec; in ubiblock_read() local 222 rq_for_each_segment(bvec, req, iter) in ubiblock_read() 223 flush_dcache_page(bvec.bv_page); in ubiblock_read()
|
/drivers/block/drbd/ |
D | drbd_main.c | 1543 struct bio_vec bvec; in _drbd_send_page() local 1560 bvec_set_page(&bvec, page, len, offset); in _drbd_send_page() 1561 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in _drbd_send_page() 1590 struct bio_vec bvec; in _drbd_send_bio() local 1594 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_bio() 1597 err = _drbd_no_send_page(peer_device, bvec.bv_page, in _drbd_send_bio() 1598 bvec.bv_offset, bvec.bv_len, in _drbd_send_bio() 1599 bio_iter_last(bvec, iter) in _drbd_send_bio() 1609 struct bio_vec bvec; in _drbd_send_zc_bio() local 1613 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_zc_bio() [all …]
|
D | drbd_worker.c | 319 struct bio_vec bvec; in drbd_csum_bio() local 326 bio_for_each_segment(bvec, bio, iter) { in drbd_csum_bio() 329 src = bvec_kmap_local(&bvec); in drbd_csum_bio() 330 crypto_shash_update(desc, src, bvec.bv_len); in drbd_csum_bio()
|
/drivers/nvme/host/ |
D | tcp.c | 250 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page() 255 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset() 1000 struct bio_vec bvec; in nvme_tcp_try_send_data() local 1019 bvec_set_page(&bvec, page, len, offset); in nvme_tcp_try_send_data() 1020 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data() 1060 struct bio_vec bvec; in nvme_tcp_try_send_cmd_pdu() local 1075 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_cmd_pdu() 1076 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_cmd_pdu() 1101 struct bio_vec bvec; in nvme_tcp_try_send_data_pdu() local 1113 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_data_pdu() [all …]
|
/drivers/infiniband/sw/siw/ |
D | siw_qp_tx.c | 326 struct bio_vec bvec; in siw_tcp_sendpages() local 340 bvec_set_page(&bvec, page[i], bytes, offset); in siw_tcp_sendpages() 341 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); in siw_tcp_sendpages()
|
/drivers/target/iscsi/ |
D | iscsi_target_util.c | 1132 struct bio_vec bvec; in iscsit_fe_sendpage_sg() local 1177 bvec_set_page(&bvec, sg_page(sg), sub_len, sg->offset + offset); in iscsit_fe_sendpage_sg() 1178 iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, sub_len); in iscsit_fe_sendpage_sg()
|