• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4  */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/highmem.h>
19 #include <linux/sched/sysctl.h>
20 #include <linux/blk-crypto.h>
21 #include <linux/xarray.h>
22 
23 #include <trace/events/block.h>
24 #include "blk.h"
25 #include "blk-rq-qos.h"
26 #include "blk-cgroup.h"
27 
28 #define ALLOC_CACHE_THRESHOLD	16
29 #define ALLOC_CACHE_MAX		256
30 
31 struct bio_alloc_cache {
32 	struct bio		*free_list;
33 	struct bio		*free_list_irq;
34 	unsigned int		nr;
35 	unsigned int		nr_irq;
36 };
37 
38 static struct biovec_slab {
39 	int nr_vecs;
40 	char *name;
41 	struct kmem_cache *slab;
42 } bvec_slabs[] __read_mostly = {
43 	{ .nr_vecs = 16, .name = "biovec-16" },
44 	{ .nr_vecs = 64, .name = "biovec-64" },
45 	{ .nr_vecs = 128, .name = "biovec-128" },
46 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
47 };
48 
biovec_slab(unsigned short nr_vecs)49 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
50 {
51 	switch (nr_vecs) {
52 	/* smaller bios use inline vecs */
53 	case 5 ... 16:
54 		return &bvec_slabs[0];
55 	case 17 ... 64:
56 		return &bvec_slabs[1];
57 	case 65 ... 128:
58 		return &bvec_slabs[2];
59 	case 129 ... BIO_MAX_VECS:
60 		return &bvec_slabs[3];
61 	default:
62 		BUG();
63 		return NULL;
64 	}
65 }
66 
67 /*
68  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
69  * IO code that does not need private memory pools.
70  */
71 struct bio_set fs_bio_set;
72 EXPORT_SYMBOL(fs_bio_set);
73 
74 /*
75  * Our slab pool management
76  */
77 struct bio_slab {
78 	struct kmem_cache *slab;
79 	unsigned int slab_ref;
80 	unsigned int slab_size;
81 	char name[8];
82 };
83 static DEFINE_MUTEX(bio_slab_lock);
84 static DEFINE_XARRAY(bio_slabs);
85 
create_bio_slab(unsigned int size)86 static struct bio_slab *create_bio_slab(unsigned int size)
87 {
88 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
89 
90 	if (!bslab)
91 		return NULL;
92 
93 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
94 	bslab->slab = kmem_cache_create(bslab->name, size,
95 			ARCH_KMALLOC_MINALIGN,
96 			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
97 	if (!bslab->slab)
98 		goto fail_alloc_slab;
99 
100 	bslab->slab_ref = 1;
101 	bslab->slab_size = size;
102 
103 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
104 		return bslab;
105 
106 	kmem_cache_destroy(bslab->slab);
107 
108 fail_alloc_slab:
109 	kfree(bslab);
110 	return NULL;
111 }
112 
bs_bio_slab_size(struct bio_set * bs)113 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
114 {
115 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
116 }
117 
bio_find_or_create_slab(struct bio_set * bs)118 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
119 {
120 	unsigned int size = bs_bio_slab_size(bs);
121 	struct bio_slab *bslab;
122 
123 	mutex_lock(&bio_slab_lock);
124 	bslab = xa_load(&bio_slabs, size);
125 	if (bslab)
126 		bslab->slab_ref++;
127 	else
128 		bslab = create_bio_slab(size);
129 	mutex_unlock(&bio_slab_lock);
130 
131 	if (bslab)
132 		return bslab->slab;
133 	return NULL;
134 }
135 
bio_put_slab(struct bio_set * bs)136 static void bio_put_slab(struct bio_set *bs)
137 {
138 	struct bio_slab *bslab = NULL;
139 	unsigned int slab_size = bs_bio_slab_size(bs);
140 
141 	mutex_lock(&bio_slab_lock);
142 
143 	bslab = xa_load(&bio_slabs, slab_size);
144 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
145 		goto out;
146 
147 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
148 
149 	WARN_ON(!bslab->slab_ref);
150 
151 	if (--bslab->slab_ref)
152 		goto out;
153 
154 	xa_erase(&bio_slabs, slab_size);
155 
156 	kmem_cache_destroy(bslab->slab);
157 	kfree(bslab);
158 
159 out:
160 	mutex_unlock(&bio_slab_lock);
161 }
162 
bvec_free(mempool_t * pool,struct bio_vec * bv,unsigned short nr_vecs)163 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
164 {
165 	BUG_ON(nr_vecs > BIO_MAX_VECS);
166 
167 	if (nr_vecs == BIO_MAX_VECS)
168 		mempool_free(bv, pool);
169 	else if (nr_vecs > BIO_INLINE_VECS)
170 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
171 }
172 
173 /*
174  * Make the first allocation restricted and don't dump info on allocation
175  * failures, since we'll fall back to the mempool in case of failure.
176  */
bvec_alloc_gfp(gfp_t gfp)177 static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
178 {
179 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
180 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
181 }
182 
bvec_alloc(mempool_t * pool,unsigned short * nr_vecs,gfp_t gfp_mask)183 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
184 		gfp_t gfp_mask)
185 {
186 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
187 
188 	if (WARN_ON_ONCE(!bvs))
189 		return NULL;
190 
191 	/*
192 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
193 	 * We also rely on this in the bvec_free path.
194 	 */
195 	*nr_vecs = bvs->nr_vecs;
196 
197 	/*
198 	 * Try a slab allocation first for all smaller allocations.  If that
199 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
200 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
201 	 */
202 	if (*nr_vecs < BIO_MAX_VECS) {
203 		struct bio_vec *bvl;
204 
205 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
206 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
207 			return bvl;
208 		*nr_vecs = BIO_MAX_VECS;
209 	}
210 
211 	return mempool_alloc(pool, gfp_mask);
212 }
213 
bio_uninit(struct bio * bio)214 void bio_uninit(struct bio *bio)
215 {
216 #ifdef CONFIG_BLK_CGROUP
217 	if (bio->bi_blkg) {
218 		blkg_put(bio->bi_blkg);
219 		bio->bi_blkg = NULL;
220 	}
221 #endif
222 	if (bio_integrity(bio))
223 		bio_integrity_free(bio);
224 
225 	bio_crypt_free_ctx(bio);
226 }
227 EXPORT_SYMBOL(bio_uninit);
228 
bio_free(struct bio * bio)229 static void bio_free(struct bio *bio)
230 {
231 	struct bio_set *bs = bio->bi_pool;
232 	void *p = bio;
233 
234 	WARN_ON_ONCE(!bs);
235 
236 	bio_uninit(bio);
237 	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
238 	mempool_free(p - bs->front_pad, &bs->bio_pool);
239 }
240 
241 /*
242  * Users of this function have their own bio allocation. Subsequently,
243  * they must remember to pair any call to bio_init() with bio_uninit()
244  * when IO has completed, or when the bio is released.
245  */
bio_init(struct bio * bio,struct block_device * bdev,struct bio_vec * table,unsigned short max_vecs,blk_opf_t opf)246 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
247 	      unsigned short max_vecs, blk_opf_t opf)
248 {
249 	bio->bi_next = NULL;
250 	bio->bi_bdev = bdev;
251 	bio->bi_opf = opf;
252 	bio->bi_flags = 0;
253 	bio->bi_ioprio = 0;
254 	bio->bi_write_hint = 0;
255 	bio->bi_status = 0;
256 	bio->bi_iter.bi_sector = 0;
257 	bio->bi_iter.bi_size = 0;
258 	bio->bi_iter.bi_idx = 0;
259 	bio->bi_iter.bi_bvec_done = 0;
260 	bio->bi_end_io = NULL;
261 	bio->bi_private = NULL;
262 #ifdef CONFIG_BLK_CGROUP
263 	bio->bi_blkg = NULL;
264 	bio->bi_issue.value = 0;
265 	if (bdev)
266 		bio_associate_blkg(bio);
267 #ifdef CONFIG_BLK_CGROUP_IOCOST
268 	bio->bi_iocost_cost = 0;
269 #endif
270 #endif
271 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
272 	bio->bi_crypt_context = NULL;
273 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
274 	bio->bi_skip_dm_default_key = false;
275 #endif
276 #endif
277 #ifdef CONFIG_BLK_DEV_INTEGRITY
278 	bio->bi_integrity = NULL;
279 #endif
280 	bio->bi_vcnt = 0;
281 
282 	atomic_set(&bio->__bi_remaining, 1);
283 	atomic_set(&bio->__bi_cnt, 1);
284 	bio->bi_cookie = BLK_QC_T_NONE;
285 
286 	bio->bi_max_vecs = max_vecs;
287 	bio->bi_io_vec = table;
288 	bio->bi_pool = NULL;
289 }
290 EXPORT_SYMBOL(bio_init);
291 
292 /**
293  * bio_reset - reinitialize a bio
294  * @bio:	bio to reset
295  * @bdev:	block device to use the bio for
296  * @opf:	operation and flags for bio
297  *
298  * Description:
299  *   After calling bio_reset(), @bio will be in the same state as a freshly
300  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
301  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
302  *   comment in struct bio.
303  */
bio_reset(struct bio * bio,struct block_device * bdev,blk_opf_t opf)304 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
305 {
306 	bio_uninit(bio);
307 	memset(bio, 0, BIO_RESET_BYTES);
308 	atomic_set(&bio->__bi_remaining, 1);
309 	bio->bi_bdev = bdev;
310 	if (bio->bi_bdev)
311 		bio_associate_blkg(bio);
312 	bio->bi_opf = opf;
313 }
314 EXPORT_SYMBOL(bio_reset);
315 
__bio_chain_endio(struct bio * bio)316 static struct bio *__bio_chain_endio(struct bio *bio)
317 {
318 	struct bio *parent = bio->bi_private;
319 
320 	if (bio->bi_status && !parent->bi_status)
321 		parent->bi_status = bio->bi_status;
322 	bio_put(bio);
323 	return parent;
324 }
325 
bio_chain_endio(struct bio * bio)326 static void bio_chain_endio(struct bio *bio)
327 {
328 	bio_endio(__bio_chain_endio(bio));
329 }
330 
331 /**
332  * bio_chain - chain bio completions
333  * @bio: the target bio
334  * @parent: the parent bio of @bio
335  *
336  * The caller won't have a bi_end_io called when @bio completes - instead,
337  * @parent's bi_end_io won't be called until both @parent and @bio have
338  * completed; the chained bio will also be freed when it completes.
339  *
340  * The caller must not set bi_private or bi_end_io in @bio.
341  */
bio_chain(struct bio * bio,struct bio * parent)342 void bio_chain(struct bio *bio, struct bio *parent)
343 {
344 	BUG_ON(bio->bi_private || bio->bi_end_io);
345 
346 	bio->bi_private = parent;
347 	bio->bi_end_io	= bio_chain_endio;
348 	bio_inc_remaining(parent);
349 }
350 EXPORT_SYMBOL(bio_chain);
351 
blk_next_bio(struct bio * bio,struct block_device * bdev,unsigned int nr_pages,blk_opf_t opf,gfp_t gfp)352 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
353 		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
354 {
355 	struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
356 
357 	if (bio) {
358 		bio_chain(bio, new);
359 		submit_bio(bio);
360 	}
361 
362 	return new;
363 }
364 EXPORT_SYMBOL_GPL(blk_next_bio);
365 
bio_alloc_rescue(struct work_struct * work)366 static void bio_alloc_rescue(struct work_struct *work)
367 {
368 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
369 	struct bio *bio;
370 
371 	while (1) {
372 		spin_lock(&bs->rescue_lock);
373 		bio = bio_list_pop(&bs->rescue_list);
374 		spin_unlock(&bs->rescue_lock);
375 
376 		if (!bio)
377 			break;
378 
379 		submit_bio_noacct(bio);
380 	}
381 }
382 
punt_bios_to_rescuer(struct bio_set * bs)383 static void punt_bios_to_rescuer(struct bio_set *bs)
384 {
385 	struct bio_list punt, nopunt;
386 	struct bio *bio;
387 
388 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
389 		return;
390 	/*
391 	 * In order to guarantee forward progress we must punt only bios that
392 	 * were allocated from this bio_set; otherwise, if there was a bio on
393 	 * there for a stacking driver higher up in the stack, processing it
394 	 * could require allocating bios from this bio_set, and doing that from
395 	 * our own rescuer would be bad.
396 	 *
397 	 * Since bio lists are singly linked, pop them all instead of trying to
398 	 * remove from the middle of the list:
399 	 */
400 
401 	bio_list_init(&punt);
402 	bio_list_init(&nopunt);
403 
404 	while ((bio = bio_list_pop(&current->bio_list[0])))
405 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
406 	current->bio_list[0] = nopunt;
407 
408 	bio_list_init(&nopunt);
409 	while ((bio = bio_list_pop(&current->bio_list[1])))
410 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
411 	current->bio_list[1] = nopunt;
412 
413 	spin_lock(&bs->rescue_lock);
414 	bio_list_merge(&bs->rescue_list, &punt);
415 	spin_unlock(&bs->rescue_lock);
416 
417 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
418 }
419 
bio_alloc_irq_cache_splice(struct bio_alloc_cache * cache)420 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
421 {
422 	unsigned long flags;
423 
424 	/* cache->free_list must be empty */
425 	if (WARN_ON_ONCE(cache->free_list))
426 		return;
427 
428 	local_irq_save(flags);
429 	cache->free_list = cache->free_list_irq;
430 	cache->free_list_irq = NULL;
431 	cache->nr += cache->nr_irq;
432 	cache->nr_irq = 0;
433 	local_irq_restore(flags);
434 }
435 
bio_alloc_percpu_cache(struct block_device * bdev,unsigned short nr_vecs,blk_opf_t opf,gfp_t gfp,struct bio_set * bs)436 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
437 		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
438 		struct bio_set *bs)
439 {
440 	struct bio_alloc_cache *cache;
441 	struct bio *bio;
442 
443 	cache = per_cpu_ptr(bs->cache, get_cpu());
444 	if (!cache->free_list) {
445 		if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
446 			bio_alloc_irq_cache_splice(cache);
447 		if (!cache->free_list) {
448 			put_cpu();
449 			return NULL;
450 		}
451 	}
452 	bio = cache->free_list;
453 	cache->free_list = bio->bi_next;
454 	cache->nr--;
455 	put_cpu();
456 
457 	bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
458 	bio->bi_pool = bs;
459 	return bio;
460 }
461 
462 /**
463  * bio_alloc_bioset - allocate a bio for I/O
464  * @bdev:	block device to allocate the bio for (can be %NULL)
465  * @nr_vecs:	number of bvecs to pre-allocate
466  * @opf:	operation and flags for bio
467  * @gfp_mask:   the GFP_* mask given to the slab allocator
468  * @bs:		the bio_set to allocate from.
469  *
470  * Allocate a bio from the mempools in @bs.
471  *
472  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
473  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
474  * callers must never allocate more than 1 bio at a time from the general pool.
475  * Callers that need to allocate more than 1 bio must always submit the
476  * previously allocated bio for IO before attempting to allocate a new one.
477  * Failure to do so can cause deadlocks under memory pressure.
478  *
479  * Note that when running under submit_bio_noacct() (i.e. any block driver),
480  * bios are not submitted until after you return - see the code in
481  * submit_bio_noacct() that converts recursion into iteration, to prevent
482  * stack overflows.
483  *
484  * This would normally mean allocating multiple bios under submit_bio_noacct()
485  * would be susceptible to deadlocks, but we have
486  * deadlock avoidance code that resubmits any blocked bios from a rescuer
487  * thread.
488  *
489  * However, we do not guarantee forward progress for allocations from other
490  * mempools. Doing multiple allocations from the same mempool under
491  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
492  * for per bio allocations.
493  *
494  * Returns: Pointer to new bio on success, NULL on failure.
495  */
bio_alloc_bioset(struct block_device * bdev,unsigned short nr_vecs,blk_opf_t opf,gfp_t gfp_mask,struct bio_set * bs)496 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
497 			     blk_opf_t opf, gfp_t gfp_mask,
498 			     struct bio_set *bs)
499 {
500 	gfp_t saved_gfp = gfp_mask;
501 	struct bio *bio;
502 	void *p;
503 
504 	/* should not use nobvec bioset for nr_vecs > 0 */
505 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
506 		return NULL;
507 
508 	if (opf & REQ_ALLOC_CACHE) {
509 		if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
510 			bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
511 						     gfp_mask, bs);
512 			if (bio)
513 				return bio;
514 			/*
515 			 * No cached bio available, bio returned below marked with
516 			 * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
517 			 */
518 		} else {
519 			opf &= ~REQ_ALLOC_CACHE;
520 		}
521 	}
522 
523 	/*
524 	 * submit_bio_noacct() converts recursion to iteration; this means if
525 	 * we're running beneath it, any bios we allocate and submit will not be
526 	 * submitted (and thus freed) until after we return.
527 	 *
528 	 * This exposes us to a potential deadlock if we allocate multiple bios
529 	 * from the same bio_set() while running underneath submit_bio_noacct().
530 	 * If we were to allocate multiple bios (say a stacking block driver
531 	 * that was splitting bios), we would deadlock if we exhausted the
532 	 * mempool's reserve.
533 	 *
534 	 * We solve this, and guarantee forward progress, with a rescuer
535 	 * workqueue per bio_set. If we go to allocate and there are bios on
536 	 * current->bio_list, we first try the allocation without
537 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
538 	 * blocking to the rescuer workqueue before we retry with the original
539 	 * gfp_flags.
540 	 */
541 	if (current->bio_list &&
542 	    (!bio_list_empty(&current->bio_list[0]) ||
543 	     !bio_list_empty(&current->bio_list[1])) &&
544 	    bs->rescue_workqueue)
545 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
546 
547 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
548 	if (!p && gfp_mask != saved_gfp) {
549 		punt_bios_to_rescuer(bs);
550 		gfp_mask = saved_gfp;
551 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
552 	}
553 	if (unlikely(!p))
554 		return NULL;
555 	if (!mempool_is_saturated(&bs->bio_pool))
556 		opf &= ~REQ_ALLOC_CACHE;
557 
558 	bio = p + bs->front_pad;
559 	if (nr_vecs > BIO_INLINE_VECS) {
560 		struct bio_vec *bvl = NULL;
561 
562 		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
563 		if (!bvl && gfp_mask != saved_gfp) {
564 			punt_bios_to_rescuer(bs);
565 			gfp_mask = saved_gfp;
566 			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
567 		}
568 		if (unlikely(!bvl))
569 			goto err_free;
570 
571 		bio_init(bio, bdev, bvl, nr_vecs, opf);
572 	} else if (nr_vecs) {
573 		bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
574 	} else {
575 		bio_init(bio, bdev, NULL, 0, opf);
576 	}
577 
578 	bio->bi_pool = bs;
579 	return bio;
580 
581 err_free:
582 	mempool_free(p, &bs->bio_pool);
583 	return NULL;
584 }
585 EXPORT_SYMBOL(bio_alloc_bioset);
586 
587 /**
588  * bio_kmalloc - kmalloc a bio
589  * @nr_vecs:	number of bio_vecs to allocate
590  * @gfp_mask:   the GFP_* mask given to the slab allocator
591  *
592  * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
593  * using bio_init() before use.  To free a bio returned from this function use
594  * kfree() after calling bio_uninit().  A bio returned from this function can
595  * be reused by calling bio_uninit() before calling bio_init() again.
596  *
597  * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
598  * function are not backed by a mempool can fail.  Do not use this function
599  * for allocations in the file system I/O path.
600  *
601  * Returns: Pointer to new bio on success, NULL on failure.
602  */
bio_kmalloc(unsigned short nr_vecs,gfp_t gfp_mask)603 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
604 {
605 	struct bio *bio;
606 
607 	if (nr_vecs > UIO_MAXIOV)
608 		return NULL;
609 	return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
610 }
611 EXPORT_SYMBOL(bio_kmalloc);
612 
zero_fill_bio_iter(struct bio * bio,struct bvec_iter start)613 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
614 {
615 	struct bio_vec bv;
616 	struct bvec_iter iter;
617 
618 	__bio_for_each_segment(bv, bio, iter, start)
619 		memzero_bvec(&bv);
620 }
621 EXPORT_SYMBOL(zero_fill_bio_iter);
622 
623 /**
624  * bio_truncate - truncate the bio to small size of @new_size
625  * @bio:	the bio to be truncated
626  * @new_size:	new size for truncating the bio
627  *
628  * Description:
629  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
630  *   REQ_OP_READ, zero the truncated part. This function should only
631  *   be used for handling corner cases, such as bio eod.
632  */
bio_truncate(struct bio * bio,unsigned new_size)633 static void bio_truncate(struct bio *bio, unsigned new_size)
634 {
635 	struct bio_vec bv;
636 	struct bvec_iter iter;
637 	unsigned int done = 0;
638 	bool truncated = false;
639 
640 	if (new_size >= bio->bi_iter.bi_size)
641 		return;
642 
643 	if (bio_op(bio) != REQ_OP_READ)
644 		goto exit;
645 
646 	bio_for_each_segment(bv, bio, iter) {
647 		if (done + bv.bv_len > new_size) {
648 			unsigned offset;
649 
650 			if (!truncated)
651 				offset = new_size - done;
652 			else
653 				offset = 0;
654 			zero_user(bv.bv_page, bv.bv_offset + offset,
655 				  bv.bv_len - offset);
656 			truncated = true;
657 		}
658 		done += bv.bv_len;
659 	}
660 
661  exit:
662 	/*
663 	 * Don't touch bvec table here and make it really immutable, since
664 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
665 	 * in its .end_bio() callback.
666 	 *
667 	 * It is enough to truncate bio by updating .bi_size since we can make
668 	 * correct bvec with the updated .bi_size for drivers.
669 	 */
670 	bio->bi_iter.bi_size = new_size;
671 }
672 
673 /**
674  * guard_bio_eod - truncate a BIO to fit the block device
675  * @bio:	bio to truncate
676  *
677  * This allows us to do IO even on the odd last sectors of a device, even if the
678  * block size is some multiple of the physical sector size.
679  *
680  * We'll just truncate the bio to the size of the device, and clear the end of
681  * the buffer head manually.  Truly out-of-range accesses will turn into actual
682  * I/O errors, this only handles the "we need to be able to do I/O at the final
683  * sector" case.
684  */
guard_bio_eod(struct bio * bio)685 void guard_bio_eod(struct bio *bio)
686 {
687 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
688 
689 	if (!maxsector)
690 		return;
691 
692 	/*
693 	 * If the *whole* IO is past the end of the device,
694 	 * let it through, and the IO layer will turn it into
695 	 * an EIO.
696 	 */
697 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
698 		return;
699 
700 	maxsector -= bio->bi_iter.bi_sector;
701 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
702 		return;
703 
704 	bio_truncate(bio, maxsector << 9);
705 }
706 
__bio_alloc_cache_prune(struct bio_alloc_cache * cache,unsigned int nr)707 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
708 				   unsigned int nr)
709 {
710 	unsigned int i = 0;
711 	struct bio *bio;
712 
713 	while ((bio = cache->free_list) != NULL) {
714 		cache->free_list = bio->bi_next;
715 		cache->nr--;
716 		bio_free(bio);
717 		if (++i == nr)
718 			break;
719 	}
720 	return i;
721 }
722 
bio_alloc_cache_prune(struct bio_alloc_cache * cache,unsigned int nr)723 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
724 				  unsigned int nr)
725 {
726 	nr -= __bio_alloc_cache_prune(cache, nr);
727 	if (!READ_ONCE(cache->free_list)) {
728 		bio_alloc_irq_cache_splice(cache);
729 		__bio_alloc_cache_prune(cache, nr);
730 	}
731 }
732 
bio_cpu_dead(unsigned int cpu,struct hlist_node * node)733 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
734 {
735 	struct bio_set *bs;
736 
737 	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
738 	if (bs->cache) {
739 		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
740 
741 		bio_alloc_cache_prune(cache, -1U);
742 	}
743 	return 0;
744 }
745 
bio_alloc_cache_destroy(struct bio_set * bs)746 static void bio_alloc_cache_destroy(struct bio_set *bs)
747 {
748 	int cpu;
749 
750 	if (!bs->cache)
751 		return;
752 
753 	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
754 	for_each_possible_cpu(cpu) {
755 		struct bio_alloc_cache *cache;
756 
757 		cache = per_cpu_ptr(bs->cache, cpu);
758 		bio_alloc_cache_prune(cache, -1U);
759 	}
760 	free_percpu(bs->cache);
761 	bs->cache = NULL;
762 }
763 
bio_put_percpu_cache(struct bio * bio)764 static inline void bio_put_percpu_cache(struct bio *bio)
765 {
766 	struct bio_alloc_cache *cache;
767 
768 	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
769 	if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) {
770 		put_cpu();
771 		bio_free(bio);
772 		return;
773 	}
774 
775 	bio_uninit(bio);
776 
777 	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
778 		bio->bi_next = cache->free_list;
779 		bio->bi_bdev = NULL;
780 		cache->free_list = bio;
781 		cache->nr++;
782 	} else {
783 		unsigned long flags;
784 
785 		local_irq_save(flags);
786 		bio->bi_next = cache->free_list_irq;
787 		cache->free_list_irq = bio;
788 		cache->nr_irq++;
789 		local_irq_restore(flags);
790 	}
791 	put_cpu();
792 }
793 
794 /**
795  * bio_put - release a reference to a bio
796  * @bio:   bio to release reference to
797  *
798  * Description:
799  *   Put a reference to a &struct bio, either one you have gotten with
800  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
801  **/
bio_put(struct bio * bio)802 void bio_put(struct bio *bio)
803 {
804 	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
805 		BUG_ON(!atomic_read(&bio->__bi_cnt));
806 		if (!atomic_dec_and_test(&bio->__bi_cnt))
807 			return;
808 	}
809 	if (bio->bi_opf & REQ_ALLOC_CACHE)
810 		bio_put_percpu_cache(bio);
811 	else
812 		bio_free(bio);
813 }
814 EXPORT_SYMBOL(bio_put);
815 
__bio_clone(struct bio * bio,struct bio * bio_src,gfp_t gfp)816 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
817 {
818 	bio_set_flag(bio, BIO_CLONED);
819 	bio->bi_ioprio = bio_src->bi_ioprio;
820 	bio->bi_write_hint = bio_src->bi_write_hint;
821 	bio->bi_iter = bio_src->bi_iter;
822 
823 	if (bio->bi_bdev) {
824 		if (bio->bi_bdev == bio_src->bi_bdev &&
825 		    bio_flagged(bio_src, BIO_REMAPPED))
826 			bio_set_flag(bio, BIO_REMAPPED);
827 		bio_clone_blkg_association(bio, bio_src);
828 	}
829 
830 	if (bio_crypt_clone(bio, bio_src, gfp) < 0)
831 		return -ENOMEM;
832 	if (bio_integrity(bio_src) &&
833 	    bio_integrity_clone(bio, bio_src, gfp) < 0)
834 		return -ENOMEM;
835 	return 0;
836 }
837 
838 /**
839  * bio_alloc_clone - clone a bio that shares the original bio's biovec
840  * @bdev: block_device to clone onto
841  * @bio_src: bio to clone from
842  * @gfp: allocation priority
843  * @bs: bio_set to allocate from
844  *
845  * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
846  * bio, but not the actual data it points to.
847  *
848  * The caller must ensure that the return bio is not freed before @bio_src.
849  */
bio_alloc_clone(struct block_device * bdev,struct bio * bio_src,gfp_t gfp,struct bio_set * bs)850 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
851 		gfp_t gfp, struct bio_set *bs)
852 {
853 	struct bio *bio;
854 
855 	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
856 	if (!bio)
857 		return NULL;
858 
859 	if (__bio_clone(bio, bio_src, gfp) < 0) {
860 		bio_put(bio);
861 		return NULL;
862 	}
863 	bio->bi_io_vec = bio_src->bi_io_vec;
864 
865 	return bio;
866 }
867 EXPORT_SYMBOL(bio_alloc_clone);
868 
869 /**
870  * bio_init_clone - clone a bio that shares the original bio's biovec
871  * @bdev: block_device to clone onto
872  * @bio: bio to clone into
873  * @bio_src: bio to clone from
874  * @gfp: allocation priority
875  *
876  * Initialize a new bio in caller provided memory that is a clone of @bio_src.
877  * The caller owns the returned bio, but not the actual data it points to.
878  *
879  * The caller must ensure that @bio_src is not freed before @bio.
880  */
bio_init_clone(struct block_device * bdev,struct bio * bio,struct bio * bio_src,gfp_t gfp)881 int bio_init_clone(struct block_device *bdev, struct bio *bio,
882 		struct bio *bio_src, gfp_t gfp)
883 {
884 	int ret;
885 
886 	bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
887 	ret = __bio_clone(bio, bio_src, gfp);
888 	if (ret)
889 		bio_uninit(bio);
890 	return ret;
891 }
892 EXPORT_SYMBOL(bio_init_clone);
893 
894 /**
895  * bio_full - check if the bio is full
896  * @bio:	bio to check
897  * @len:	length of one segment to be added
898  *
899  * Return true if @bio is full and one segment with @len bytes can't be
900  * added to the bio, otherwise return false
901  */
bio_full(struct bio * bio,unsigned len)902 static inline bool bio_full(struct bio *bio, unsigned len)
903 {
904 	if (bio->bi_vcnt >= bio->bi_max_vecs)
905 		return true;
906 	if (bio->bi_iter.bi_size > UINT_MAX - len)
907 		return true;
908 	return false;
909 }
910 
bvec_try_merge_page(struct bio_vec * bv,struct page * page,unsigned int len,unsigned int off,bool * same_page)911 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
912 		unsigned int len, unsigned int off, bool *same_page)
913 {
914 	size_t bv_end = bv->bv_offset + bv->bv_len;
915 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
916 	phys_addr_t page_addr = page_to_phys(page);
917 
918 	if (vec_end_addr + 1 != page_addr + off)
919 		return false;
920 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
921 		return false;
922 	if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
923 		return false;
924 
925 	*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
926 	if (!*same_page) {
927 		if (IS_ENABLED(CONFIG_KMSAN))
928 			return false;
929 		if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
930 			return false;
931 	}
932 
933 	bv->bv_len += len;
934 	return true;
935 }
936 
937 /*
938  * Try to merge a page into a segment, while obeying the hardware segment
939  * size limit.  This is not for normal read/write bios, but for passthrough
940  * or Zone Append operations that we can't split.
941  */
bvec_try_merge_hw_page(struct request_queue * q,struct bio_vec * bv,struct page * page,unsigned len,unsigned offset,bool * same_page)942 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
943 		struct page *page, unsigned len, unsigned offset,
944 		bool *same_page)
945 {
946 	unsigned long mask = queue_segment_boundary(q);
947 	phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
948 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
949 
950 	if ((addr1 | mask) != (addr2 | mask))
951 		return false;
952 	if (len > queue_max_segment_size(q) - bv->bv_len)
953 		return false;
954 	return bvec_try_merge_page(bv, page, len, offset, same_page);
955 }
956 
957 /**
958  * bio_add_hw_page - attempt to add a page to a bio with hw constraints
959  * @q: the target queue
960  * @bio: destination bio
961  * @page: page to add
962  * @len: vec entry length
963  * @offset: vec entry offset
964  * @max_sectors: maximum number of sectors that can be added
965  * @same_page: return if the segment has been merged inside the same page
966  *
967  * Add a page to a bio while respecting the hardware max_sectors, max_segment
968  * and gap limitations.
969  */
bio_add_hw_page(struct request_queue * q,struct bio * bio,struct page * page,unsigned int len,unsigned int offset,unsigned int max_sectors,bool * same_page)970 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
971 		struct page *page, unsigned int len, unsigned int offset,
972 		unsigned int max_sectors, bool *same_page)
973 {
974 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
975 		return 0;
976 
977 	if (((bio->bi_iter.bi_size + len) >> SECTOR_SHIFT) > max_sectors)
978 		return 0;
979 
980 	if (bio->bi_vcnt > 0) {
981 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
982 
983 		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
984 				same_page)) {
985 			bio->bi_iter.bi_size += len;
986 			return len;
987 		}
988 
989 		if (bio->bi_vcnt >=
990 		    min(bio->bi_max_vecs, queue_max_segments(q)))
991 			return 0;
992 
993 		/*
994 		 * If the queue doesn't support SG gaps and adding this segment
995 		 * would create a gap, disallow it.
996 		 */
997 		if (bvec_gap_to_prev(&q->limits, bv, offset))
998 			return 0;
999 	}
1000 
1001 	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
1002 	bio->bi_vcnt++;
1003 	bio->bi_iter.bi_size += len;
1004 	return len;
1005 }
1006 
1007 /**
1008  * bio_add_pc_page	- attempt to add page to passthrough bio
1009  * @q: the target queue
1010  * @bio: destination bio
1011  * @page: page to add
1012  * @len: vec entry length
1013  * @offset: vec entry offset
1014  *
1015  * Attempt to add a page to the bio_vec maplist. This can fail for a
1016  * number of reasons, such as the bio being full or target block device
1017  * limitations. The target block device must allow bio's up to PAGE_SIZE,
1018  * so it is always possible to add a single page to an empty bio.
1019  *
1020  * This should only be used by passthrough bios.
1021  */
bio_add_pc_page(struct request_queue * q,struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1022 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1023 		struct page *page, unsigned int len, unsigned int offset)
1024 {
1025 	bool same_page = false;
1026 	return bio_add_hw_page(q, bio, page, len, offset,
1027 			queue_max_hw_sectors(q), &same_page);
1028 }
1029 EXPORT_SYMBOL(bio_add_pc_page);
1030 
1031 /**
1032  * bio_add_zone_append_page - attempt to add page to zone-append bio
1033  * @bio: destination bio
1034  * @page: page to add
1035  * @len: vec entry length
1036  * @offset: vec entry offset
1037  *
1038  * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
1039  * for a zone-append request. This can fail for a number of reasons, such as the
1040  * bio being full or the target block device is not a zoned block device or
1041  * other limitations of the target block device. The target block device must
1042  * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
1043  * to an empty bio.
1044  *
1045  * Returns: number of bytes added to the bio, or 0 in case of a failure.
1046  */
bio_add_zone_append_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1047 int bio_add_zone_append_page(struct bio *bio, struct page *page,
1048 			     unsigned int len, unsigned int offset)
1049 {
1050 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1051 	bool same_page = false;
1052 
1053 	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
1054 		return 0;
1055 
1056 	if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
1057 		return 0;
1058 
1059 	return bio_add_hw_page(q, bio, page, len, offset,
1060 			       queue_max_zone_append_sectors(q), &same_page);
1061 }
1062 EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
1063 
1064 /**
1065  * __bio_add_page - add page(s) to a bio in a new segment
1066  * @bio: destination bio
1067  * @page: start page to add
1068  * @len: length of the data to add, may cross pages
1069  * @off: offset of the data relative to @page, may cross pages
1070  *
1071  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
1072  * that @bio has space for another bvec.
1073  */
__bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int off)1074 void __bio_add_page(struct bio *bio, struct page *page,
1075 		unsigned int len, unsigned int off)
1076 {
1077 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1078 	WARN_ON_ONCE(bio_full(bio, len));
1079 
1080 	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1081 	bio->bi_iter.bi_size += len;
1082 	bio->bi_vcnt++;
1083 }
1084 EXPORT_SYMBOL_GPL(__bio_add_page);
1085 
1086 /**
1087  *	bio_add_page	-	attempt to add page(s) to bio
1088  *	@bio: destination bio
1089  *	@page: start page to add
1090  *	@len: vec entry length, may cross pages
1091  *	@offset: vec entry offset relative to @page, may cross pages
1092  *
1093  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1094  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1095  */
bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1096 int bio_add_page(struct bio *bio, struct page *page,
1097 		 unsigned int len, unsigned int offset)
1098 {
1099 	bool same_page = false;
1100 
1101 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1102 		return 0;
1103 	if (bio->bi_iter.bi_size > UINT_MAX - len)
1104 		return 0;
1105 
1106 	if (bio->bi_vcnt > 0 &&
1107 	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1108 				page, len, offset, &same_page)) {
1109 		bio->bi_iter.bi_size += len;
1110 		return len;
1111 	}
1112 
1113 	if (bio->bi_vcnt >= bio->bi_max_vecs)
1114 		return 0;
1115 	__bio_add_page(bio, page, len, offset);
1116 	return len;
1117 }
1118 EXPORT_SYMBOL(bio_add_page);
1119 
bio_add_folio_nofail(struct bio * bio,struct folio * folio,size_t len,size_t off)1120 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1121 			  size_t off)
1122 {
1123 	WARN_ON_ONCE(len > UINT_MAX);
1124 	WARN_ON_ONCE(off > UINT_MAX);
1125 	__bio_add_page(bio, &folio->page, len, off);
1126 }
1127 
1128 /**
1129  * bio_add_folio - Attempt to add part of a folio to a bio.
1130  * @bio: BIO to add to.
1131  * @folio: Folio to add.
1132  * @len: How many bytes from the folio to add.
1133  * @off: First byte in this folio to add.
1134  *
1135  * Filesystems that use folios can call this function instead of calling
1136  * bio_add_page() for each page in the folio.  If @off is bigger than
1137  * PAGE_SIZE, this function can create a bio_vec that starts in a page
1138  * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
1139  *
1140  * Return: Whether the addition was successful.
1141  */
bio_add_folio(struct bio * bio,struct folio * folio,size_t len,size_t off)1142 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1143 		   size_t off)
1144 {
1145 	if (len > UINT_MAX || off > UINT_MAX)
1146 		return false;
1147 	return bio_add_page(bio, &folio->page, len, off) > 0;
1148 }
1149 EXPORT_SYMBOL(bio_add_folio);
1150 
__bio_release_pages(struct bio * bio,bool mark_dirty)1151 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1152 {
1153 	struct folio_iter fi;
1154 
1155 	bio_for_each_folio_all(fi, bio) {
1156 		struct page *page;
1157 		size_t nr_pages;
1158 
1159 		if (mark_dirty) {
1160 			folio_lock(fi.folio);
1161 			folio_mark_dirty(fi.folio);
1162 			folio_unlock(fi.folio);
1163 		}
1164 		page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
1165 		nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1166 			   fi.offset / PAGE_SIZE + 1;
1167 		do {
1168 			bio_release_page(bio, page++);
1169 		} while (--nr_pages != 0);
1170 	}
1171 }
1172 EXPORT_SYMBOL_GPL(__bio_release_pages);
1173 
bio_iov_bvec_set(struct bio * bio,struct iov_iter * iter)1174 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1175 {
1176 	size_t size = iov_iter_count(iter);
1177 
1178 	WARN_ON_ONCE(bio->bi_max_vecs);
1179 
1180 	if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1181 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1182 		size_t max_sectors = queue_max_zone_append_sectors(q);
1183 
1184 		size = min(size, max_sectors << SECTOR_SHIFT);
1185 	}
1186 
1187 	bio->bi_vcnt = iter->nr_segs;
1188 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1189 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1190 	bio->bi_iter.bi_size = size;
1191 	bio_set_flag(bio, BIO_CLONED);
1192 }
1193 
bio_iov_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1194 static int bio_iov_add_page(struct bio *bio, struct page *page,
1195 		unsigned int len, unsigned int offset)
1196 {
1197 	bool same_page = false;
1198 
1199 	if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
1200 		return -EIO;
1201 
1202 	if (bio->bi_vcnt > 0 &&
1203 	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1204 				page, len, offset, &same_page)) {
1205 		bio->bi_iter.bi_size += len;
1206 		if (same_page)
1207 			bio_release_page(bio, page);
1208 		return 0;
1209 	}
1210 	__bio_add_page(bio, page, len, offset);
1211 	return 0;
1212 }
1213 
bio_iov_add_zone_append_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1214 static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1215 		unsigned int len, unsigned int offset)
1216 {
1217 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1218 	bool same_page = false;
1219 
1220 	if (bio_add_hw_page(q, bio, page, len, offset,
1221 			queue_max_zone_append_sectors(q), &same_page) != len)
1222 		return -EINVAL;
1223 	if (same_page)
1224 		bio_release_page(bio, page);
1225 	return 0;
1226 }
1227 
1228 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1229 
1230 /**
1231  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1232  * @bio: bio to add pages to
1233  * @iter: iov iterator describing the region to be mapped
1234  *
1235  * Extracts pages from *iter and appends them to @bio's bvec array.  The pages
1236  * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag.
1237  * For a multi-segment *iter, this function only adds pages from the next
1238  * non-empty segment of the iov iterator.
1239  */
__bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter)1240 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1241 {
1242 	iov_iter_extraction_t extraction_flags = 0;
1243 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1244 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1245 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1246 	struct page **pages = (struct page **)bv;
1247 	ssize_t size, left;
1248 	unsigned len, i = 0;
1249 	size_t offset;
1250 	int ret = 0;
1251 
1252 	/*
1253 	 * Move page array up in the allocated memory for the bio vecs as far as
1254 	 * possible so that we can start filling biovecs from the beginning
1255 	 * without overwriting the temporary page array.
1256 	 */
1257 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1258 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1259 
1260 	if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1261 		extraction_flags |= ITER_ALLOW_P2PDMA;
1262 
1263 	/*
1264 	 * Each segment in the iov is required to be a block size multiple.
1265 	 * However, we may not be able to get the entire segment if it spans
1266 	 * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the
1267 	 * result to ensure the bio's total size is correct. The remainder of
1268 	 * the iov data will be picked up in the next bio iteration.
1269 	 */
1270 	size = iov_iter_extract_pages(iter, &pages,
1271 				      UINT_MAX - bio->bi_iter.bi_size,
1272 				      nr_pages, extraction_flags, &offset);
1273 	if (unlikely(size <= 0))
1274 		return size ? size : -EFAULT;
1275 
1276 	nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1277 
1278 	if (bio->bi_bdev) {
1279 		size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1280 		iov_iter_revert(iter, trim);
1281 		size -= trim;
1282 	}
1283 
1284 	if (unlikely(!size)) {
1285 		ret = -EFAULT;
1286 		goto out;
1287 	}
1288 
1289 	for (left = size, i = 0; left > 0; left -= len, i++) {
1290 		struct page *page = pages[i];
1291 
1292 		len = min_t(size_t, PAGE_SIZE - offset, left);
1293 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1294 			ret = bio_iov_add_zone_append_page(bio, page, len,
1295 					offset);
1296 			if (ret)
1297 				break;
1298 		} else
1299 			bio_iov_add_page(bio, page, len, offset);
1300 
1301 		offset = 0;
1302 	}
1303 
1304 	iov_iter_revert(iter, left);
1305 out:
1306 	while (i < nr_pages)
1307 		bio_release_page(bio, pages[i++]);
1308 
1309 	return ret;
1310 }
1311 
1312 /**
1313  * bio_iov_iter_get_pages - add user or kernel pages to a bio
1314  * @bio: bio to add pages to
1315  * @iter: iov iterator describing the region to be added
1316  *
1317  * This takes either an iterator pointing to user memory, or one pointing to
1318  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1319  * map them into the kernel. On IO completion, the caller should put those
1320  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1321  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1322  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1323  * completed by a call to ->ki_complete() or returns with an error other than
1324  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1325  * on IO completion. If it isn't, then pages should be released.
1326  *
1327  * The function tries, but does not guarantee, to pin as many pages as
1328  * fit into the bio, or are requested in @iter, whatever is smaller. If
1329  * MM encounters an error pinning the requested pages, it stops. Error
1330  * is returned only if 0 pages could be pinned.
1331  */
bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter)1332 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1333 {
1334 	int ret = 0;
1335 
1336 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1337 		return -EIO;
1338 
1339 	if (iov_iter_is_bvec(iter)) {
1340 		bio_iov_bvec_set(bio, iter);
1341 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1342 		return 0;
1343 	}
1344 
1345 	if (iov_iter_extract_will_pin(iter))
1346 		bio_set_flag(bio, BIO_PAGE_PINNED);
1347 	do {
1348 		ret = __bio_iov_iter_get_pages(bio, iter);
1349 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1350 
1351 	return bio->bi_vcnt ? 0 : ret;
1352 }
1353 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
1354 
submit_bio_wait_endio(struct bio * bio)1355 static void submit_bio_wait_endio(struct bio *bio)
1356 {
1357 	complete(bio->bi_private);
1358 }
1359 
1360 /**
1361  * submit_bio_wait - submit a bio, and wait until it completes
1362  * @bio: The &struct bio which describes the I/O
1363  *
1364  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1365  * bio_endio() on failure.
1366  *
1367  * WARNING: Unlike to how submit_bio() is usually used, this function does not
1368  * result in bio reference to be consumed. The caller must drop the reference
1369  * on his own.
1370  */
submit_bio_wait(struct bio * bio)1371 int submit_bio_wait(struct bio *bio)
1372 {
1373 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1374 			bio->bi_bdev->bd_disk->lockdep_map);
1375 	unsigned long hang_check;
1376 
1377 	bio->bi_private = &done;
1378 	bio->bi_end_io = submit_bio_wait_endio;
1379 	bio->bi_opf |= REQ_SYNC;
1380 	submit_bio(bio);
1381 
1382 	/* Prevent hang_check timer from firing at us during very long I/O */
1383 	hang_check = sysctl_hung_task_timeout_secs;
1384 	if (hang_check)
1385 		while (!wait_for_completion_io_timeout(&done,
1386 					hang_check * (HZ/2)))
1387 			;
1388 	else
1389 		wait_for_completion_io(&done);
1390 
1391 	return blk_status_to_errno(bio->bi_status);
1392 }
1393 EXPORT_SYMBOL(submit_bio_wait);
1394 
__bio_advance(struct bio * bio,unsigned bytes)1395 void __bio_advance(struct bio *bio, unsigned bytes)
1396 {
1397 	if (bio_integrity(bio))
1398 		bio_integrity_advance(bio, bytes);
1399 
1400 	bio_crypt_advance(bio, bytes);
1401 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1402 }
1403 EXPORT_SYMBOL(__bio_advance);
1404 
bio_copy_data_iter(struct bio * dst,struct bvec_iter * dst_iter,struct bio * src,struct bvec_iter * src_iter)1405 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1406 			struct bio *src, struct bvec_iter *src_iter)
1407 {
1408 	while (src_iter->bi_size && dst_iter->bi_size) {
1409 		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1410 		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1411 		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1412 		void *src_buf = bvec_kmap_local(&src_bv);
1413 		void *dst_buf = bvec_kmap_local(&dst_bv);
1414 
1415 		memcpy(dst_buf, src_buf, bytes);
1416 
1417 		kunmap_local(dst_buf);
1418 		kunmap_local(src_buf);
1419 
1420 		bio_advance_iter_single(src, src_iter, bytes);
1421 		bio_advance_iter_single(dst, dst_iter, bytes);
1422 	}
1423 }
1424 EXPORT_SYMBOL(bio_copy_data_iter);
1425 
1426 /**
1427  * bio_copy_data - copy contents of data buffers from one bio to another
1428  * @src: source bio
1429  * @dst: destination bio
1430  *
1431  * Stops when it reaches the end of either @src or @dst - that is, copies
1432  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1433  */
bio_copy_data(struct bio * dst,struct bio * src)1434 void bio_copy_data(struct bio *dst, struct bio *src)
1435 {
1436 	struct bvec_iter src_iter = src->bi_iter;
1437 	struct bvec_iter dst_iter = dst->bi_iter;
1438 
1439 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1440 }
1441 EXPORT_SYMBOL(bio_copy_data);
1442 
bio_free_pages(struct bio * bio)1443 void bio_free_pages(struct bio *bio)
1444 {
1445 	struct bio_vec *bvec;
1446 	struct bvec_iter_all iter_all;
1447 
1448 	bio_for_each_segment_all(bvec, bio, iter_all)
1449 		__free_page(bvec->bv_page);
1450 }
1451 EXPORT_SYMBOL(bio_free_pages);
1452 
1453 /*
1454  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1455  * for performing direct-IO in BIOs.
1456  *
1457  * The problem is that we cannot run folio_mark_dirty() from interrupt context
1458  * because the required locks are not interrupt-safe.  So what we can do is to
1459  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1460  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1461  * in process context.
1462  *
1463  * Note that this code is very hard to test under normal circumstances because
1464  * direct-io pins the pages with get_user_pages().  This makes
1465  * is_page_cache_freeable return false, and the VM will not clean the pages.
1466  * But other code (eg, flusher threads) could clean the pages if they are mapped
1467  * pagecache.
1468  *
1469  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1470  * deferred bio dirtying paths.
1471  */
1472 
1473 /*
1474  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1475  */
bio_set_pages_dirty(struct bio * bio)1476 void bio_set_pages_dirty(struct bio *bio)
1477 {
1478 	struct folio_iter fi;
1479 
1480 	bio_for_each_folio_all(fi, bio) {
1481 		folio_lock(fi.folio);
1482 		folio_mark_dirty(fi.folio);
1483 		folio_unlock(fi.folio);
1484 	}
1485 }
1486 EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1487 
1488 /*
1489  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1490  * If they are, then fine.  If, however, some pages are clean then they must
1491  * have been written out during the direct-IO read.  So we take another ref on
1492  * the BIO and re-dirty the pages in process context.
1493  *
1494  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1495  * here on.  It will unpin each page and will run one bio_put() against the
1496  * BIO.
1497  */
1498 
1499 static void bio_dirty_fn(struct work_struct *work);
1500 
1501 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1502 static DEFINE_SPINLOCK(bio_dirty_lock);
1503 static struct bio *bio_dirty_list;
1504 
1505 /*
1506  * This runs in process context
1507  */
bio_dirty_fn(struct work_struct * work)1508 static void bio_dirty_fn(struct work_struct *work)
1509 {
1510 	struct bio *bio, *next;
1511 
1512 	spin_lock_irq(&bio_dirty_lock);
1513 	next = bio_dirty_list;
1514 	bio_dirty_list = NULL;
1515 	spin_unlock_irq(&bio_dirty_lock);
1516 
1517 	while ((bio = next) != NULL) {
1518 		next = bio->bi_private;
1519 
1520 		bio_release_pages(bio, true);
1521 		bio_put(bio);
1522 	}
1523 }
1524 
bio_check_pages_dirty(struct bio * bio)1525 void bio_check_pages_dirty(struct bio *bio)
1526 {
1527 	struct folio_iter fi;
1528 	unsigned long flags;
1529 
1530 	bio_for_each_folio_all(fi, bio) {
1531 		if (!folio_test_dirty(fi.folio))
1532 			goto defer;
1533 	}
1534 
1535 	bio_release_pages(bio, false);
1536 	bio_put(bio);
1537 	return;
1538 defer:
1539 	spin_lock_irqsave(&bio_dirty_lock, flags);
1540 	bio->bi_private = bio_dirty_list;
1541 	bio_dirty_list = bio;
1542 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1543 	schedule_work(&bio_dirty_work);
1544 }
1545 EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1546 
bio_remaining_done(struct bio * bio)1547 static inline bool bio_remaining_done(struct bio *bio)
1548 {
1549 	/*
1550 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1551 	 * we always end io on the first invocation.
1552 	 */
1553 	if (!bio_flagged(bio, BIO_CHAIN))
1554 		return true;
1555 
1556 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1557 
1558 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1559 		bio_clear_flag(bio, BIO_CHAIN);
1560 		return true;
1561 	}
1562 
1563 	return false;
1564 }
1565 
1566 /**
1567  * bio_endio - end I/O on a bio
1568  * @bio:	bio
1569  *
1570  * Description:
1571  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1572  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1573  *   bio unless they own it and thus know that it has an end_io function.
1574  *
1575  *   bio_endio() can be called several times on a bio that has been chained
1576  *   using bio_chain().  The ->bi_end_io() function will only be called the
1577  *   last time.
1578  **/
bio_endio(struct bio * bio)1579 void bio_endio(struct bio *bio)
1580 {
1581 again:
1582 	if (!bio_remaining_done(bio))
1583 		return;
1584 	if (!bio_integrity_endio(bio))
1585 		return;
1586 
1587 	rq_qos_done_bio(bio);
1588 
1589 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1590 		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1591 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1592 	}
1593 
1594 	/*
1595 	 * Need to have a real endio function for chained bios, otherwise
1596 	 * various corner cases will break (like stacking block devices that
1597 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1598 	 * recursion and blowing the stack. Tail call optimization would
1599 	 * handle this, but compiling with frame pointers also disables
1600 	 * gcc's sibling call optimization.
1601 	 */
1602 	if (bio->bi_end_io == bio_chain_endio) {
1603 		bio = __bio_chain_endio(bio);
1604 		goto again;
1605 	}
1606 
1607 	blk_throtl_bio_endio(bio);
1608 	/* release cgroup info */
1609 	bio_uninit(bio);
1610 	if (bio->bi_end_io)
1611 		bio->bi_end_io(bio);
1612 }
1613 EXPORT_SYMBOL(bio_endio);
1614 
1615 /**
1616  * bio_split - split a bio
1617  * @bio:	bio to split
1618  * @sectors:	number of sectors to split from the front of @bio
1619  * @gfp:	gfp mask
1620  * @bs:		bio set to allocate from
1621  *
1622  * Allocates and returns a new bio which represents @sectors from the start of
1623  * @bio, and updates @bio to represent the remaining sectors.
1624  *
1625  * Unless this is a discard request the newly allocated bio will point
1626  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1627  * neither @bio nor @bs are freed before the split bio.
1628  */
bio_split(struct bio * bio,int sectors,gfp_t gfp,struct bio_set * bs)1629 struct bio *bio_split(struct bio *bio, int sectors,
1630 		      gfp_t gfp, struct bio_set *bs)
1631 {
1632 	struct bio *split;
1633 
1634 	BUG_ON(sectors <= 0);
1635 	BUG_ON(sectors >= bio_sectors(bio));
1636 
1637 	/* Zone append commands cannot be split */
1638 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1639 		return NULL;
1640 
1641 	split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1642 	if (!split)
1643 		return NULL;
1644 
1645 	split->bi_iter.bi_size = sectors << 9;
1646 
1647 	if (bio_integrity(split))
1648 		bio_integrity_trim(split);
1649 
1650 	bio_advance(bio, split->bi_iter.bi_size);
1651 
1652 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1653 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1654 
1655 	return split;
1656 }
1657 EXPORT_SYMBOL(bio_split);
1658 
1659 /**
1660  * bio_trim - trim a bio
1661  * @bio:	bio to trim
1662  * @offset:	number of sectors to trim from the front of @bio
1663  * @size:	size we want to trim @bio to, in sectors
1664  *
1665  * This function is typically used for bios that are cloned and submitted
1666  * to the underlying device in parts.
1667  */
bio_trim(struct bio * bio,sector_t offset,sector_t size)1668 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1669 {
1670 	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1671 			 offset + size > bio_sectors(bio)))
1672 		return;
1673 
1674 	size <<= 9;
1675 	if (offset == 0 && size == bio->bi_iter.bi_size)
1676 		return;
1677 
1678 	bio_advance(bio, offset << 9);
1679 	bio->bi_iter.bi_size = size;
1680 
1681 	if (bio_integrity(bio))
1682 		bio_integrity_trim(bio);
1683 }
1684 EXPORT_SYMBOL_GPL(bio_trim);
1685 
1686 /*
1687  * create memory pools for biovec's in a bio_set.
1688  * use the global biovec slabs created for general use.
1689  */
biovec_init_pool(mempool_t * pool,int pool_entries)1690 int biovec_init_pool(mempool_t *pool, int pool_entries)
1691 {
1692 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1693 
1694 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1695 }
1696 
1697 /*
1698  * bioset_exit - exit a bioset initialized with bioset_init()
1699  *
1700  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1701  * kzalloc()).
1702  */
bioset_exit(struct bio_set * bs)1703 void bioset_exit(struct bio_set *bs)
1704 {
1705 	bio_alloc_cache_destroy(bs);
1706 	if (bs->rescue_workqueue)
1707 		destroy_workqueue(bs->rescue_workqueue);
1708 	bs->rescue_workqueue = NULL;
1709 
1710 	mempool_exit(&bs->bio_pool);
1711 	mempool_exit(&bs->bvec_pool);
1712 
1713 	bioset_integrity_free(bs);
1714 	if (bs->bio_slab)
1715 		bio_put_slab(bs);
1716 	bs->bio_slab = NULL;
1717 }
1718 EXPORT_SYMBOL(bioset_exit);
1719 
1720 /**
1721  * bioset_init - Initialize a bio_set
1722  * @bs:		pool to initialize
1723  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1724  * @front_pad:	Number of bytes to allocate in front of the returned bio
1725  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1726  *              and %BIOSET_NEED_RESCUER
1727  *
1728  * Description:
1729  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1730  *    to ask for a number of bytes to be allocated in front of the bio.
1731  *    Front pad allocation is useful for embedding the bio inside
1732  *    another structure, to avoid allocating extra data to go with the bio.
1733  *    Note that the bio must be embedded at the END of that structure always,
1734  *    or things will break badly.
1735  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1736  *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
1737  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1738  *    to dispatch queued requests when the mempool runs out of space.
1739  *
1740  */
bioset_init(struct bio_set * bs,unsigned int pool_size,unsigned int front_pad,int flags)1741 int bioset_init(struct bio_set *bs,
1742 		unsigned int pool_size,
1743 		unsigned int front_pad,
1744 		int flags)
1745 {
1746 	bs->front_pad = front_pad;
1747 	if (flags & BIOSET_NEED_BVECS)
1748 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1749 	else
1750 		bs->back_pad = 0;
1751 
1752 	spin_lock_init(&bs->rescue_lock);
1753 	bio_list_init(&bs->rescue_list);
1754 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1755 
1756 	bs->bio_slab = bio_find_or_create_slab(bs);
1757 	if (!bs->bio_slab)
1758 		return -ENOMEM;
1759 
1760 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1761 		goto bad;
1762 
1763 	if ((flags & BIOSET_NEED_BVECS) &&
1764 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1765 		goto bad;
1766 
1767 	if (flags & BIOSET_NEED_RESCUER) {
1768 		bs->rescue_workqueue = alloc_workqueue("bioset",
1769 							WQ_MEM_RECLAIM, 0);
1770 		if (!bs->rescue_workqueue)
1771 			goto bad;
1772 	}
1773 	if (flags & BIOSET_PERCPU_CACHE) {
1774 		bs->cache = alloc_percpu(struct bio_alloc_cache);
1775 		if (!bs->cache)
1776 			goto bad;
1777 		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1778 	}
1779 
1780 	return 0;
1781 bad:
1782 	bioset_exit(bs);
1783 	return -ENOMEM;
1784 }
1785 EXPORT_SYMBOL(bioset_init);
1786 
init_bio(void)1787 static int __init init_bio(void)
1788 {
1789 	int i;
1790 
1791 	BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1792 
1793 	bio_integrity_init();
1794 
1795 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1796 		struct biovec_slab *bvs = bvec_slabs + i;
1797 
1798 		bvs->slab = kmem_cache_create(bvs->name,
1799 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1800 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1801 	}
1802 
1803 	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1804 					bio_cpu_dead);
1805 
1806 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1807 			BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1808 		panic("bio: can't allocate bios\n");
1809 
1810 	if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
1811 		panic("bio: can't create integrity pool\n");
1812 
1813 	return 0;
1814 }
1815 subsys_initcall(init_bio);
1816