Home
last modified time | relevance | path

Searched refs:bvec (Results 1 – 25 of 25) sorted by relevance

/drivers/block/zram/
Dzram_drv.c59 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
149 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument
151 return bvec->bv_len != PAGE_SIZE; in is_partial_io()
154 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument
184 static void update_position(u32 *index, int *offset, struct bio_vec *bvec) in update_position() argument
186 *index += (*offset + bvec->bv_len) / PAGE_SIZE; in update_position()
187 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position()
585 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_async() argument
596 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async()
667 struct bio_vec bvec; in writeback_store() local
[all …]
/drivers/nvme/target/
Dio-cmd-file.c101 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
115 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done()
117 kfree(req->f.bvec); in nvmet_file_io_done()
119 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done()
149 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); in nvmet_file_execute_io()
150 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()
151 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()
237 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), in nvmet_file_execute_rw()
240 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw()
242 if (unlikely(!req->f.bvec)) { in nvmet_file_execute_rw()
[all …]
Dnvmet.h332 struct bio_vec *bvec; member
/drivers/target/
Dtarget_core_file.c316 struct bio_vec *bvec; in fd_do_rw() local
321 bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL); in fd_do_rw()
322 if (!bvec) { in fd_do_rw()
328 bvec[i].bv_page = sg_page(sg); in fd_do_rw()
329 bvec[i].bv_len = sg->length; in fd_do_rw()
330 bvec[i].bv_offset = sg->offset; in fd_do_rw()
335 iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); in fd_do_rw()
379 kfree(bvec); in fd_do_rw()
436 struct bio_vec *bvec; in fd_execute_write_same() local
460 bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); in fd_execute_write_same()
[all …]
/drivers/block/
Dloop.c309 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) in lo_write_bvec() argument
314 iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len); in lo_write_bvec()
320 if (likely(bw == bvec->bv_len)) in lo_write_bvec()
325 (unsigned long long)*ppos, bvec->bv_len); in lo_write_bvec()
334 struct bio_vec bvec; in lo_write_simple() local
338 rq_for_each_segment(bvec, rq, iter) { in lo_write_simple()
339 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); in lo_write_simple()
356 struct bio_vec bvec, b; in lo_write_transfer() local
365 rq_for_each_segment(bvec, rq, iter) { in lo_write_transfer()
366 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page, in lo_write_transfer()
[all …]
Dn64cart.c89 struct bio_vec bvec; in n64cart_submit_bio() local
94 bio_for_each_segment(bvec, bio, iter) { in n64cart_submit_bio()
95 if (!n64cart_do_bvec(dev, &bvec, pos)) in n64cart_submit_bio()
97 pos += bvec.bv_len; in n64cart_submit_bio()
Dbrd.c287 struct bio_vec bvec; in brd_submit_bio() local
290 bio_for_each_segment(bvec, bio, iter) { in brd_submit_bio()
291 unsigned int len = bvec.bv_len; in brd_submit_bio()
295 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) || in brd_submit_bio()
298 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, in brd_submit_bio()
Dps3disk.c85 struct bio_vec bvec; in ps3disk_scatter_gather() local
87 rq_for_each_segment(bvec, req, iter) { in ps3disk_scatter_gather()
89 memcpy_from_bvec(dev->bounce_buf + offset, &bvec); in ps3disk_scatter_gather()
91 memcpy_to_bvec(&bvec, dev->bounce_buf + offset); in ps3disk_scatter_gather()
Dnbd.c642 struct bio_vec bvec; in nbd_send_cmd() local
644 bio_for_each_segment(bvec, bio, iter) { in nbd_send_cmd()
645 bool is_last = !next && bio_iter_last(bvec, iter); in nbd_send_cmd()
649 req, bvec.bv_len); in nbd_send_cmd()
650 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); in nbd_send_cmd()
768 struct bio_vec bvec; in nbd_read_stat() local
770 rq_for_each_segment(bvec, req, iter) { in nbd_read_stat()
771 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); in nbd_read_stat()
790 req, bvec.bv_len); in nbd_read_stat()
Dloop.h80 struct bio_vec *bvec; member
Dps3vram.c538 struct bio_vec bvec; in ps3vram_do_bio() local
542 bio_for_each_segment(bvec, bio, iter) { in ps3vram_do_bio()
544 char *ptr = bvec_virt(&bvec); in ps3vram_do_bio()
545 size_t len = bvec.bv_len, retlen; in ps3vram_do_bio()
/drivers/md/
Ddm-user.c297 struct bio_vec bvec; in bio_size() local
301 bio_for_each_segment (bvec, bio, iter) in bio_size()
475 struct bio_vec bvec; in bio_copy_from_iter() local
479 bio_for_each_segment (bvec, bio, biter) { in bio_copy_from_iter()
482 ret = copy_page_from_iter(bvec.bv_page, bvec.bv_offset, in bio_copy_from_iter()
483 bvec.bv_len, iter); in bio_copy_from_iter()
497 if (ret < bvec.bv_len) in bio_copy_from_iter()
506 struct bio_vec bvec; in bio_copy_to_iter() local
510 bio_for_each_segment (bvec, bio, biter) { in bio_copy_to_iter()
513 ret = copy_page_to_iter(bvec.bv_page, bvec.bv_offset, in bio_copy_to_iter()
[all …]
Ddm-io.c211 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, in bio_get_page() local
214 *p = bvec.bv_page; in bio_get_page()
215 *len = bvec.bv_len; in bio_get_page()
216 *offset = bvec.bv_offset; in bio_get_page()
219 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; in bio_get_page()
Ddm-flakey.c293 struct bio_vec bvec; in corrupt_bio_data() local
302 bio_for_each_segment(bvec, bio, iter) { in corrupt_bio_data()
308 segment = bvec_kmap_local(&bvec); in corrupt_bio_data()
Dmd.c1012 struct bio_vec bvec; in sync_page_io() local
1014 bio_init(&bio, &bvec, 1); in sync_page_io()
/drivers/nvdimm/
Dblk.c171 struct bio_vec bvec; in nd_blk_submit_bio() local
183 bio_for_each_segment(bvec, bio, iter) { in nd_blk_submit_bio()
184 unsigned int len = bvec.bv_len; in nd_blk_submit_bio()
187 err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len, in nd_blk_submit_bio()
188 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_submit_bio()
Dpmem.c199 struct bio_vec bvec; in pmem_submit_bio() local
210 bio_for_each_segment(bvec, bio, iter) { in pmem_submit_bio()
212 rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, in pmem_submit_bio()
213 iter.bi_sector, bvec.bv_len); in pmem_submit_bio()
215 rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, in pmem_submit_bio()
216 iter.bi_sector, bvec.bv_len); in pmem_submit_bio()
Dbtt.c1449 struct bio_vec bvec; in btt_submit_bio() local
1459 bio_for_each_segment(bvec, bio, iter) { in btt_submit_bio()
1460 unsigned int len = bvec.bv_len; in btt_submit_bio()
1470 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, in btt_submit_bio()
/drivers/s390/block/
Ddcssblk.c861 struct bio_vec bvec; in dcssblk_submit_bio() local
896 bio_for_each_segment(bvec, bio, iter) { in dcssblk_submit_bio()
897 page_addr = (unsigned long)bvec_virt(&bvec); in dcssblk_submit_bio()
899 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) in dcssblk_submit_bio()
904 bvec.bv_len); in dcssblk_submit_bio()
907 bvec.bv_len); in dcssblk_submit_bio()
909 bytes_done += bvec.bv_len; in dcssblk_submit_bio()
/drivers/block/null_blk/
Dmain.c1169 struct bio_vec bvec; in null_handle_rq() local
1172 rq_for_each_segment(bvec, rq, iter) { in null_handle_rq()
1173 len = bvec.bv_len; in null_handle_rq()
1174 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_rq()
1195 struct bio_vec bvec; in null_handle_bio() local
1199 bio_for_each_segment(bvec, bio, iter) { in null_handle_bio()
1200 len = bvec.bv_len; in null_handle_bio()
1201 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, in null_handle_bio()
/drivers/block/rsxx/
Ddma.c670 struct bio_vec bvec; in rsxx_dma_queue_bio() local
709 bio_for_each_segment(bvec, bio, iter) { in rsxx_dma_queue_bio()
710 bv_len = bvec.bv_len; in rsxx_dma_queue_bio()
711 bv_off = bvec.bv_offset; in rsxx_dma_queue_bio()
723 laddr, bvec.bv_page, in rsxx_dma_queue_bio()
/drivers/block/drbd/
Ddrbd_main.c1583 struct bio_vec bvec; in _drbd_send_bio() local
1587 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_bio()
1590 err = _drbd_no_send_page(peer_device, bvec.bv_page, in _drbd_send_bio()
1591 bvec.bv_offset, bvec.bv_len, in _drbd_send_bio()
1592 bio_iter_last(bvec, iter) in _drbd_send_bio()
1605 struct bio_vec bvec; in _drbd_send_zc_bio() local
1609 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_zc_bio()
1612 err = _drbd_send_page(peer_device, bvec.bv_page, in _drbd_send_zc_bio()
1613 bvec.bv_offset, bvec.bv_len, in _drbd_send_zc_bio()
1614 bio_iter_last(bvec, iter) ? 0 : MSG_MORE); in _drbd_send_zc_bio()
Ddrbd_worker.c319 struct bio_vec bvec; in drbd_csum_bio() local
326 bio_for_each_segment(bvec, bio, iter) { in drbd_csum_bio()
329 src = kmap_atomic(bvec.bv_page); in drbd_csum_bio()
330 crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len); in drbd_csum_bio()
Ddrbd_receiver.c2012 struct bio_vec bvec; in recv_dless_read() local
2035 bio_for_each_segment(bvec, bio, iter) { in recv_dless_read()
2036 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset; in recv_dless_read()
2037 expect = min_t(int, data_size, bvec.bv_len); in recv_dless_read()
2039 kunmap(bvec.bv_page); in recv_dless_read()
/drivers/nvme/host/
Dtcp.c233 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
238 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()