• Home
  • Raw
  • Download

Lines Matching refs:bio

46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)  in bio_copy_from_iter()  argument
51 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
82 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
107 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
109 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
120 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
121 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
123 bio_free_pages(bio); in bio_uncopy_user()
134 struct bio *bio; in bio_copy_user_iov() local
155 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov()
156 if (!bio) in bio_copy_user_iov()
158 bio->bi_opf |= req_op(rq); in bio_copy_user_iov()
190 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
201 map_data->offset += bio->bi_iter.bi_size; in bio_copy_user_iov()
209 ret = bio_copy_from_iter(bio, iter); in bio_copy_user_iov()
214 zero_fill_bio(bio); in bio_copy_user_iov()
215 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_copy_user_iov()
218 bio->bi_private = bmd; in bio_copy_user_iov()
220 ret = blk_rq_append_bio(rq, bio); in bio_copy_user_iov()
226 bio_free_pages(bio); in bio_copy_user_iov()
227 bio_put(bio); in bio_copy_user_iov()
237 struct bio *bio; in bio_map_user_iov() local
244 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS)); in bio_map_user_iov()
245 if (!bio) in bio_map_user_iov()
247 bio->bi_opf |= req_op(rq); in bio_map_user_iov()
275 if (!bio_add_hw_page(rq->q, bio, page, n, offs, in bio_map_user_iov()
299 ret = blk_rq_append_bio(rq, bio); in bio_map_user_iov()
305 bio_release_pages(bio, false); in bio_map_user_iov()
306 bio_put(bio); in bio_map_user_iov()
310 static void bio_invalidate_vmalloc_pages(struct bio *bio) in bio_invalidate_vmalloc_pages() argument
313 if (bio->bi_private && !op_is_write(bio_op(bio))) { in bio_invalidate_vmalloc_pages()
316 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages()
317 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
318 invalidate_kernel_vmap_range(bio->bi_private, len); in bio_invalidate_vmalloc_pages()
323 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
325 bio_invalidate_vmalloc_pages(bio); in bio_map_kern_endio()
326 bio_put(bio); in bio_map_kern_endio()
339 static struct bio *bio_map_kern(struct request_queue *q, void *data, in bio_map_kern()
349 struct bio *bio; in bio_map_kern() local
351 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern()
352 if (!bio) in bio_map_kern()
357 bio->bi_private = data; in bio_map_kern()
374 if (bio_add_pc_page(q, bio, page, bytes, in bio_map_kern()
377 bio_put(bio); in bio_map_kern()
386 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
387 return bio; in bio_map_kern()
390 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
392 bio_free_pages(bio); in bio_copy_kern_endio()
393 bio_put(bio); in bio_copy_kern_endio()
396 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
398 char *p = bio->bi_private; in bio_copy_kern_endio_read()
402 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_kern_endio_read()
407 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
421 static struct bio *bio_copy_kern(struct request_queue *q, void *data, in bio_copy_kern()
427 struct bio *bio; in bio_copy_kern() local
438 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_kern()
439 if (!bio) in bio_copy_kern()
456 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) in bio_copy_kern()
464 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
465 bio->bi_private = data; in bio_copy_kern()
467 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
470 return bio; in bio_copy_kern()
473 bio_free_pages(bio); in bio_copy_kern()
474 bio_put(bio); in bio_copy_kern()
482 int blk_rq_append_bio(struct request *rq, struct bio *bio) in blk_rq_append_bio() argument
488 bio_for_each_bvec(bv, bio, iter) in blk_rq_append_bio()
491 if (!rq->bio) { in blk_rq_append_bio()
492 blk_rq_bio_prep(rq, bio, nr_segs); in blk_rq_append_bio()
494 if (!ll_back_merge_fn(rq, bio, nr_segs)) in blk_rq_append_bio()
496 rq->biotail->bi_next = bio; in blk_rq_append_bio()
497 rq->biotail = bio; in blk_rq_append_bio()
498 rq->__data_len += (bio)->bi_iter.bi_size; in blk_rq_append_bio()
499 bio_crypt_free_ctx(bio); in blk_rq_append_bio()
527 struct bio *bio = NULL; in blk_rq_map_user_iov() local
551 if (!bio) in blk_rq_map_user_iov()
552 bio = rq->bio; in blk_rq_map_user_iov()
558 blk_rq_unmap_user(bio); in blk_rq_map_user_iov()
560 rq->bio = NULL; in blk_rq_map_user_iov()
589 int blk_rq_unmap_user(struct bio *bio) in blk_rq_unmap_user() argument
591 struct bio *next_bio; in blk_rq_unmap_user()
594 while (bio) { in blk_rq_unmap_user()
595 if (bio->bi_private) { in blk_rq_unmap_user()
596 ret2 = bio_uncopy_user(bio); in blk_rq_unmap_user()
600 bio_release_pages(bio, bio_data_dir(bio) == READ); in blk_rq_unmap_user()
603 next_bio = bio; in blk_rq_unmap_user()
604 bio = bio->bi_next; in blk_rq_unmap_user()
630 struct bio *bio; in blk_rq_map_kern() local
640 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern()
642 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
644 if (IS_ERR(bio)) in blk_rq_map_kern()
645 return PTR_ERR(bio); in blk_rq_map_kern()
647 bio->bi_opf &= ~REQ_OP_MASK; in blk_rq_map_kern()
648 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
650 ret = blk_rq_append_bio(rq, bio); in blk_rq_map_kern()
652 bio_put(bio); in blk_rq_map_kern()