• Home
  • Raw
  • Download

Lines Matching refs:bio

23 #define bio_prio(bio)			(bio)->bi_ioprio  argument
24 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) argument
26 #define bio_iter_iovec(bio, iter) \ argument
27 bvec_iter_bvec((bio)->bi_io_vec, (iter))
29 #define bio_iter_page(bio, iter) \ argument
30 bvec_iter_page((bio)->bi_io_vec, (iter))
31 #define bio_iter_len(bio, iter) \ argument
32 bvec_iter_len((bio)->bi_io_vec, (iter))
33 #define bio_iter_offset(bio, iter) \ argument
34 bvec_iter_offset((bio)->bi_io_vec, (iter))
36 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) argument
37 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) argument
38 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) argument
43 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) argument
44 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) argument
49 #define bio_data_dir(bio) \ argument
50 (op_is_write(bio_op(bio)) ? WRITE : READ)
55 static inline bool bio_has_data(struct bio *bio) in bio_has_data() argument
57 if (bio && in bio_has_data()
58 bio->bi_iter.bi_size && in bio_has_data()
59 bio_op(bio) != REQ_OP_DISCARD && in bio_has_data()
60 bio_op(bio) != REQ_OP_SECURE_ERASE && in bio_has_data()
61 bio_op(bio) != REQ_OP_WRITE_ZEROES) in bio_has_data()
67 static inline bool bio_no_advance_iter(const struct bio *bio) in bio_no_advance_iter() argument
69 return bio_op(bio) == REQ_OP_DISCARD || in bio_no_advance_iter()
70 bio_op(bio) == REQ_OP_SECURE_ERASE || in bio_no_advance_iter()
71 bio_op(bio) == REQ_OP_WRITE_ZEROES; in bio_no_advance_iter()
74 static inline void *bio_data(struct bio *bio) in bio_data() argument
76 if (bio_has_data(bio)) in bio_data()
77 return page_address(bio_page(bio)) + bio_offset(bio); in bio_data()
82 static inline bool bio_next_segment(const struct bio *bio, in bio_next_segment() argument
85 if (iter->idx >= bio->bi_vcnt) in bio_next_segment()
88 bvec_advance(&bio->bi_io_vec[iter->idx], iter); in bio_next_segment()
96 #define bio_for_each_segment_all(bvl, bio, iter) \ argument
97 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
99 static inline void bio_advance_iter(const struct bio *bio, in bio_advance_iter() argument
104 if (bio_no_advance_iter(bio)) in bio_advance_iter()
107 bvec_iter_advance(bio->bi_io_vec, iter, bytes); in bio_advance_iter()
112 static inline void bio_advance_iter_single(const struct bio *bio, in bio_advance_iter_single() argument
118 if (bio_no_advance_iter(bio)) in bio_advance_iter_single()
121 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); in bio_advance_iter_single()
124 void __bio_advance(struct bio *, unsigned bytes);
137 static inline void bio_advance(struct bio *bio, unsigned int nbytes) in bio_advance() argument
139 if (nbytes == bio->bi_iter.bi_size) { in bio_advance()
140 bio->bi_iter.bi_size = 0; in bio_advance()
143 __bio_advance(bio, nbytes); in bio_advance()
146 #define __bio_for_each_segment(bvl, bio, iter, start) \ argument
149 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
150 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
152 #define bio_for_each_segment(bvl, bio, iter) \ argument
153 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
155 #define __bio_for_each_bvec(bvl, bio, iter, start) \ argument
158 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
159 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
162 #define bio_for_each_bvec(bvl, bio, iter) \ argument
163 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
169 #define bio_for_each_bvec_all(bvl, bio, i) \ argument
170 for (i = 0, bvl = bio_first_bvec_all(bio); \
171 i < (bio)->bi_vcnt; i++, bvl++)
175 static inline unsigned bio_segments(struct bio *bio) in bio_segments() argument
186 switch (bio_op(bio)) { in bio_segments()
195 bio_for_each_segment(bv, bio, iter) in bio_segments()
215 static inline void bio_get(struct bio *bio) in bio_get() argument
217 bio->bi_flags |= (1 << BIO_REFFED); in bio_get()
219 atomic_inc(&bio->__bi_cnt); in bio_get()
222 static inline void bio_cnt_set(struct bio *bio, unsigned int count) in bio_cnt_set() argument
225 bio->bi_flags |= (1 << BIO_REFFED); in bio_cnt_set()
228 atomic_set(&bio->__bi_cnt, count); in bio_cnt_set()
231 static inline bool bio_flagged(struct bio *bio, unsigned int bit) in bio_flagged() argument
233 return bio->bi_flags & (1U << bit); in bio_flagged()
236 static inline void bio_set_flag(struct bio *bio, unsigned int bit) in bio_set_flag() argument
238 bio->bi_flags |= (1U << bit); in bio_set_flag()
241 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) in bio_clear_flag() argument
243 bio->bi_flags &= ~(1U << bit); in bio_clear_flag()
246 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) in bio_first_bvec_all() argument
248 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in bio_first_bvec_all()
249 return bio->bi_io_vec; in bio_first_bvec_all()
252 static inline struct page *bio_first_page_all(struct bio *bio) in bio_first_page_all() argument
254 return bio_first_bvec_all(bio)->bv_page; in bio_first_page_all()
257 static inline struct folio *bio_first_folio_all(struct bio *bio) in bio_first_folio_all() argument
259 return page_folio(bio_first_page_all(bio)); in bio_first_folio_all()
262 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) in bio_last_bvec_all() argument
264 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); in bio_last_bvec_all()
265 return &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_last_bvec_all()
285 static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, in bio_first_folio() argument
288 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; in bio_first_folio()
290 if (unlikely(i >= bio->bi_vcnt)) { in bio_first_folio()
304 static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) in bio_next_folio() argument
313 bio_first_folio(fi, bio, fi->_i + 1); in bio_next_folio()
322 #define bio_for_each_folio_all(fi, bio) \ argument
323 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
325 void bio_trim(struct bio *bio, sector_t offset, sector_t size);
326 extern struct bio *bio_split(struct bio *bio, int sectors,
328 int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
341 static inline struct bio *bio_next_split(struct bio *bio, int sectors, in bio_next_split() argument
344 if (sectors >= bio_sectors(bio)) in bio_next_split()
345 return bio; in bio_next_split()
347 return bio_split(bio, sectors, gfp, bs); in bio_next_split()
359 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
362 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
363 extern void bio_put(struct bio *);
365 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
367 int bio_init_clone(struct block_device *bdev, struct bio *bio,
368 struct bio *bio_src, gfp_t gfp);
372 static inline struct bio *bio_alloc(struct block_device *bdev, in bio_alloc()
378 void submit_bio(struct bio *bio);
380 extern void bio_endio(struct bio *);
382 static inline void bio_io_error(struct bio *bio) in bio_io_error() argument
384 bio->bi_status = BLK_STS_IOERR; in bio_io_error()
385 bio_endio(bio); in bio_io_error()
388 static inline void bio_wouldblock_error(struct bio *bio) in bio_wouldblock_error() argument
390 bio_set_flag(bio, BIO_QUIET); in bio_wouldblock_error()
391 bio->bi_status = BLK_STS_AGAIN; in bio_wouldblock_error()
392 bio_endio(bio); in bio_wouldblock_error()
409 extern int submit_bio_wait(struct bio *bio);
410 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
412 extern void bio_uninit(struct bio *);
413 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
414 void bio_chain(struct bio *, struct bio *);
416 int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
418 bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
420 void __bio_add_page(struct bio *bio, struct page *page,
422 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
424 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
425 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
426 void __bio_release_pages(struct bio *bio, bool mark_dirty);
427 extern void bio_set_pages_dirty(struct bio *bio);
428 extern void bio_check_pages_dirty(struct bio *bio);
430 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
431 struct bio *src, struct bvec_iter *src_iter);
432 extern void bio_copy_data(struct bio *dst, struct bio *src);
433 extern void bio_free_pages(struct bio *bio);
434 void guard_bio_eod(struct bio *bio);
435 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
437 static inline void zero_fill_bio(struct bio *bio) in zero_fill_bio() argument
439 zero_fill_bio_iter(bio, bio->bi_iter); in zero_fill_bio()
442 static inline void bio_release_pages(struct bio *bio, bool mark_dirty) in bio_release_pages() argument
444 if (bio_flagged(bio, BIO_PAGE_PINNED)) in bio_release_pages()
445 __bio_release_pages(bio, mark_dirty); in bio_release_pages()
448 #define bio_dev(bio) \ argument
449 disk_devt((bio)->bi_bdev->bd_disk)
452 void bio_associate_blkg(struct bio *bio);
453 void bio_associate_blkg_from_css(struct bio *bio,
455 void bio_clone_blkg_association(struct bio *dst, struct bio *src);
456 void blkcg_punt_bio_submit(struct bio *bio);
458 static inline void bio_associate_blkg(struct bio *bio) { } in bio_associate_blkg() argument
459 static inline void bio_associate_blkg_from_css(struct bio *bio, in bio_associate_blkg_from_css() argument
462 static inline void bio_clone_blkg_association(struct bio *dst, in bio_clone_blkg_association()
463 struct bio *src) { } in bio_clone_blkg_association()
464 static inline void blkcg_punt_bio_submit(struct bio *bio) in blkcg_punt_bio_submit() argument
466 submit_bio(bio); in blkcg_punt_bio_submit()
470 static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) in bio_set_dev() argument
472 bio_clear_flag(bio, BIO_REMAPPED); in bio_set_dev()
473 if (bio->bi_bdev != bdev) in bio_set_dev()
474 bio_clear_flag(bio, BIO_BPS_THROTTLED); in bio_set_dev()
475 bio->bi_bdev = bdev; in bio_set_dev()
476 bio_associate_blkg(bio); in bio_set_dev()
487 struct bio *head;
488 struct bio *tail;
503 #define bio_list_for_each(bio, bl) \ argument
504 for (bio = (bl)->head; bio; bio = bio->bi_next)
509 struct bio *bio; in bio_list_size() local
511 bio_list_for_each(bio, bl) in bio_list_size()
517 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) in bio_list_add() argument
519 bio->bi_next = NULL; in bio_list_add()
522 bl->tail->bi_next = bio; in bio_list_add()
524 bl->head = bio; in bio_list_add()
526 bl->tail = bio; in bio_list_add()
529 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) in bio_list_add_head() argument
531 bio->bi_next = bl->head; in bio_list_add_head()
533 bl->head = bio; in bio_list_add_head()
536 bl->tail = bio; in bio_list_add_head()
573 static inline struct bio *bio_list_peek(struct bio_list *bl) in bio_list_peek()
578 static inline struct bio *bio_list_pop(struct bio_list *bl) in bio_list_pop()
580 struct bio *bio = bl->head; in bio_list_pop() local
582 if (bio) { in bio_list_pop()
587 bio->bi_next = NULL; in bio_list_pop()
590 return bio; in bio_list_pop()
593 static inline struct bio *bio_list_get(struct bio_list *bl) in bio_list_get()
595 struct bio *bio = bl->head; in bio_list_get() local
599 return bio; in bio_list_get()
606 static inline void bio_inc_remaining(struct bio *bio) in bio_inc_remaining() argument
608 bio_set_flag(bio, BIO_CHAIN); in bio_inc_remaining()
610 atomic_inc(&bio->__bi_remaining); in bio_inc_remaining()
665 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) in bio_set_polled() argument
667 bio->bi_opf |= REQ_POLLED; in bio_set_polled()
669 bio->bi_opf |= REQ_NOWAIT; in bio_set_polled()
672 static inline void bio_clear_polled(struct bio *bio) in bio_clear_polled() argument
674 bio->bi_opf &= ~REQ_POLLED; in bio_clear_polled()
686 static inline bool bio_is_zone_append(struct bio *bio) in bio_is_zone_append() argument
690 return bio_op(bio) == REQ_OP_ZONE_APPEND || in bio_is_zone_append()
691 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); in bio_is_zone_append()
694 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
696 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
698 struct bio *blk_alloc_discard_bio(struct block_device *bdev,