• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * 2.5 block I/O model
3  *
4  * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public Licens
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
19  */
20 #ifndef __LINUX_BIO_H
21 #define __LINUX_BIO_H
22 
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
25 #include <linux/ioprio.h>
26 
27 #ifdef CONFIG_BLOCK
28 
29 #include <asm/io.h>
30 
31 #define BIO_DEBUG
32 
33 #ifdef BIO_DEBUG
34 #define BIO_BUG_ON	BUG_ON
35 #else
36 #define BIO_BUG_ON
37 #endif
38 
39 #define BIO_MAX_PAGES		256
40 #define BIO_MAX_SIZE		(BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
41 #define BIO_MAX_SECTORS		(BIO_MAX_SIZE >> 9)
42 
43 /*
44  * was unsigned short, but we might as well be ready for > 64kB I/O pages
45  */
46 struct bio_vec {
47 	struct page	*bv_page;
48 	unsigned int	bv_len;
49 	unsigned int	bv_offset;
50 };
51 
52 struct bio_set;
53 struct bio;
54 struct bio_integrity_payload;
55 typedef void (bio_end_io_t) (struct bio *, int);
56 typedef void (bio_destructor_t) (struct bio *);
57 
58 /*
59  * main unit of I/O for the block layer and lower layers (ie drivers and
60  * stacking drivers)
61  */
62 struct bio {
63 	sector_t		bi_sector;	/* device address in 512 byte
64 						   sectors */
65 	struct bio		*bi_next;	/* request queue link */
66 	struct block_device	*bi_bdev;
67 	unsigned long		bi_flags;	/* status, command, etc */
68 	unsigned long		bi_rw;		/* bottom bits READ/WRITE,
69 						 * top bits priority
70 						 */
71 
72 	unsigned short		bi_vcnt;	/* how many bio_vec's */
73 	unsigned short		bi_idx;		/* current index into bvl_vec */
74 
75 	/* Number of segments in this BIO after
76 	 * physical address coalescing is performed.
77 	 */
78 	unsigned int		bi_phys_segments;
79 
80 	unsigned int		bi_size;	/* residual I/O count */
81 
82 	/*
83 	 * To keep track of the max segment size, we account for the
84 	 * sizes of the first and last mergeable segments in this bio.
85 	 */
86 	unsigned int		bi_seg_front_size;
87 	unsigned int		bi_seg_back_size;
88 
89 	unsigned int		bi_max_vecs;	/* max bvl_vecs we can hold */
90 
91 	unsigned int		bi_comp_cpu;	/* completion CPU */
92 
93 	atomic_t		bi_cnt;		/* pin count */
94 
95 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
96 
97 	bio_end_io_t		*bi_end_io;
98 
99 	void			*bi_private;
100 #if defined(CONFIG_BLK_DEV_INTEGRITY)
101 	struct bio_integrity_payload *bi_integrity;  /* data integrity */
102 #endif
103 
104 	bio_destructor_t	*bi_destructor;	/* destructor */
105 
106 	/*
107 	 * We can inline a number of vecs at the end of the bio, to avoid
108 	 * double allocations for a small number of bio_vecs. This member
109 	 * MUST obviously be kept at the very end of the bio.
110 	 */
111 	struct bio_vec		bi_inline_vecs[0];
112 };
113 
114 /*
115  * bio flags
116  */
117 #define BIO_UPTODATE	0	/* ok after I/O completion */
118 #define BIO_RW_BLOCK	1	/* RW_AHEAD set, and read/write would block */
119 #define BIO_EOF		2	/* out-out-bounds error */
120 #define BIO_SEG_VALID	3	/* bi_phys_segments valid */
121 #define BIO_CLONED	4	/* doesn't own data */
122 #define BIO_BOUNCED	5	/* bio is a bounce bio */
123 #define BIO_USER_MAPPED 6	/* contains user pages */
124 #define BIO_EOPNOTSUPP	7	/* not supported */
125 #define BIO_CPU_AFFINE	8	/* complete bio on same CPU as submitted */
126 #define BIO_NULL_MAPPED 9	/* contains invalid user pages */
127 #define BIO_FS_INTEGRITY 10	/* fs owns integrity data, not block layer */
128 #define BIO_QUIET	11	/* Make BIO Quiet */
129 #define bio_flagged(bio, flag)	((bio)->bi_flags & (1 << (flag)))
130 
131 /*
132  * top 4 bits of bio flags indicate the pool this bio came from
133  */
134 #define BIO_POOL_BITS		(4)
135 #define BIO_POOL_OFFSET		(BITS_PER_LONG - BIO_POOL_BITS)
136 #define BIO_POOL_MASK		(1UL << BIO_POOL_OFFSET)
137 #define BIO_POOL_IDX(bio)	((bio)->bi_flags >> BIO_POOL_OFFSET)
138 
139 /*
140  * bio bi_rw flags
141  *
142  * bit 0 -- data direction
143  *	If not set, bio is a read from device. If set, it's a write to device.
144  * bit 1 -- rw-ahead when set
145  * bit 2 -- barrier
146  *	Insert a serialization point in the IO queue, forcing previously
147  *	submitted IO to be completed before this one is issued.
148  * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
149  *	Note that this does NOT indicate that the IO itself is sync, just
150  *	that the block layer will not postpone issue of this IO by plugging.
151  * bit 4 -- metadata request
152  *	Used for tracing to differentiate metadata and data IO. May also
153  *	get some preferential treatment in the IO scheduler
154  * bit 5 -- discard sectors
155  *	Informs the lower level device that this range of sectors is no longer
156  *	used by the file system and may thus be freed by the device. Used
157  *	for flash based storage.
158  * bit 6 -- fail fast device errors
159  * bit 7 -- fail fast transport errors
160  * bit 8 -- fail fast driver errors
161  *	Don't want driver retries for any fast fail whatever the reason.
162  */
163 #define BIO_RW		0	/* Must match RW in req flags (blkdev.h) */
164 #define BIO_RW_AHEAD	1	/* Must match FAILFAST in req flags */
165 #define BIO_RW_BARRIER	2
166 #define BIO_RW_SYNCIO	3
167 #define BIO_RW_UNPLUG	4
168 #define BIO_RW_META	5
169 #define BIO_RW_DISCARD	6
170 #define BIO_RW_FAILFAST_DEV		7
171 #define BIO_RW_FAILFAST_TRANSPORT	8
172 #define BIO_RW_FAILFAST_DRIVER		9
173 
174 #define bio_rw_flagged(bio, flag)	((bio)->bi_rw & (1 << (flag)))
175 
176 /*
177  * Old defines, these should eventually be replaced by direct usage of
178  * bio_rw_flagged()
179  */
180 #define bio_barrier(bio)	bio_rw_flagged(bio, BIO_RW_BARRIER)
181 #define bio_sync(bio)		bio_rw_flagged(bio, BIO_RW_SYNCIO)
182 #define bio_unplug(bio)		bio_rw_flagged(bio, BIO_RW_UNPLUG)
183 #define bio_failfast_dev(bio)	bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
184 #define bio_failfast_transport(bio)	\
185 		bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
186 #define bio_failfast_driver(bio) 	\
187 		bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
188 #define bio_rw_ahead(bio)	bio_rw_flagged(bio, BIO_RW_AHEAD)
189 #define bio_rw_meta(bio)	bio_rw_flagged(bio, BIO_RW_META)
190 #define bio_discard(bio)	bio_rw_flagged(bio, BIO_RW_DISCARD)
191 
192 /*
193  * upper 16 bits of bi_rw define the io priority of this bio
194  */
195 #define BIO_PRIO_SHIFT	(8 * sizeof(unsigned long) - IOPRIO_BITS)
196 #define bio_prio(bio)	((bio)->bi_rw >> BIO_PRIO_SHIFT)
197 #define bio_prio_valid(bio)	ioprio_valid(bio_prio(bio))
198 
199 #define bio_set_prio(bio, prio)		do {			\
200 	WARN_ON(prio >= (1 << IOPRIO_BITS));			\
201 	(bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1);		\
202 	(bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT);	\
203 } while (0)
204 
205 /*
206  * various member access, note that bio_data should of course not be used
207  * on highmem page vectors
208  */
209 #define bio_iovec_idx(bio, idx)	(&((bio)->bi_io_vec[(idx)]))
210 #define bio_iovec(bio)		bio_iovec_idx((bio), (bio)->bi_idx)
211 #define bio_page(bio)		bio_iovec((bio))->bv_page
212 #define bio_offset(bio)		bio_iovec((bio))->bv_offset
213 #define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
214 #define bio_sectors(bio)	((bio)->bi_size >> 9)
215 #define bio_empty_barrier(bio)	(bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
216 
bio_cur_sectors(struct bio * bio)217 static inline unsigned int bio_cur_sectors(struct bio *bio)
218 {
219 	if (bio->bi_vcnt)
220 		return bio_iovec(bio)->bv_len >> 9;
221 	else /* dataless requests such as discard */
222 		return bio->bi_size >> 9;
223 }
224 
bio_data(struct bio * bio)225 static inline void *bio_data(struct bio *bio)
226 {
227 	if (bio->bi_vcnt)
228 		return page_address(bio_page(bio)) + bio_offset(bio);
229 
230 	return NULL;
231 }
232 
bio_has_allocated_vec(struct bio * bio)233 static inline int bio_has_allocated_vec(struct bio *bio)
234 {
235 	return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
236 }
237 
238 /*
239  * will die
240  */
241 #define bio_to_phys(bio)	(page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
242 #define bvec_to_phys(bv)	(page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
243 
244 /*
245  * queues that have highmem support enabled may still need to revert to
246  * PIO transfers occasionally and thus map high pages temporarily. For
247  * permanent PIO fall back, user is probably better off disabling highmem
248  * I/O completely on that queue (see ide-dma for example)
249  */
250 #define __bio_kmap_atomic(bio, idx, kmtype)				\
251 	(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) +	\
252 		bio_iovec_idx((bio), (idx))->bv_offset)
253 
254 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
255 
256 /*
257  * merge helpers etc
258  */
259 
260 #define __BVEC_END(bio)		bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
261 #define __BVEC_START(bio)	bio_iovec_idx((bio), (bio)->bi_idx)
262 
263 /* Default implementation of BIOVEC_PHYS_MERGEABLE */
264 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
265 	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
266 
267 /*
268  * allow arch override, for eg virtualized architectures (put in asm/io.h)
269  */
270 #ifndef BIOVEC_PHYS_MERGEABLE
271 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
272 	__BIOVEC_PHYS_MERGEABLE(vec1, vec2)
273 #endif
274 
275 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
276 	(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
277 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
278 	__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
279 #define BIO_SEG_BOUNDARY(q, b1, b2) \
280 	BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
281 
282 #define bio_io_error(bio) bio_endio((bio), -EIO)
283 
284 /*
285  * drivers should not use the __ version unless they _really_ want to
286  * run through the entire bio and not just pending pieces
287  */
288 #define __bio_for_each_segment(bvl, bio, i, start_idx)			\
289 	for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);	\
290 	     i < (bio)->bi_vcnt;					\
291 	     bvl++, i++)
292 
293 #define bio_for_each_segment(bvl, bio, i)				\
294 	__bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
295 
296 /*
297  * get a reference to a bio, so it won't disappear. the intended use is
298  * something like:
299  *
300  * bio_get(bio);
301  * submit_bio(rw, bio);
302  * if (bio->bi_flags ...)
303  *	do_something
304  * bio_put(bio);
305  *
306  * without the bio_get(), it could potentially complete I/O before submit_bio
307  * returns. and then bio would be freed memory when if (bio->bi_flags ...)
308  * runs
309  */
310 #define bio_get(bio)	atomic_inc(&(bio)->bi_cnt)
311 
312 #if defined(CONFIG_BLK_DEV_INTEGRITY)
313 /*
314  * bio integrity payload
315  */
316 struct bio_integrity_payload {
317 	struct bio		*bip_bio;	/* parent bio */
318 	struct bio_vec		*bip_vec;	/* integrity data vector */
319 
320 	sector_t		bip_sector;	/* virtual start sector */
321 
322 	void			*bip_buf;	/* generated integrity data */
323 	bio_end_io_t		*bip_end_io;	/* saved I/O completion fn */
324 
325 	unsigned int		bip_size;
326 
327 	unsigned short		bip_pool;	/* pool the ivec came from */
328 	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
329 	unsigned short		bip_idx;	/* current bip_vec index */
330 
331 	struct work_struct	bip_work;	/* I/O completion */
332 };
333 #endif /* CONFIG_BLK_DEV_INTEGRITY */
334 
335 /*
336  * A bio_pair is used when we need to split a bio.
337  * This can only happen for a bio that refers to just one
338  * page of data, and in the unusual situation when the
339  * page crosses a chunk/device boundary
340  *
341  * The address of the master bio is stored in bio1.bi_private
342  * The address of the pool the pair was allocated from is stored
343  *   in bio2.bi_private
344  */
345 struct bio_pair {
346 	struct bio			bio1, bio2;
347 	struct bio_vec			bv1, bv2;
348 #if defined(CONFIG_BLK_DEV_INTEGRITY)
349 	struct bio_integrity_payload	bip1, bip2;
350 	struct bio_vec			iv1, iv2;
351 #endif
352 	atomic_t			cnt;
353 	int				error;
354 };
355 extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
356 extern void bio_pair_release(struct bio_pair *dbio);
357 
358 extern struct bio_set *bioset_create(unsigned int, unsigned int);
359 extern void bioset_free(struct bio_set *);
360 
361 extern struct bio *bio_alloc(gfp_t, int);
362 extern struct bio *bio_kmalloc(gfp_t, int);
363 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
364 extern void bio_put(struct bio *);
365 extern void bio_free(struct bio *, struct bio_set *);
366 
367 extern void bio_endio(struct bio *, int);
368 struct request_queue;
369 extern int bio_phys_segments(struct request_queue *, struct bio *);
370 
371 extern void __bio_clone(struct bio *, struct bio *);
372 extern struct bio *bio_clone(struct bio *, gfp_t);
373 
374 extern void bio_init(struct bio *);
375 
376 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
377 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
378 			   unsigned int, unsigned int);
379 extern int bio_get_nr_vecs(struct block_device *);
380 extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
381 extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
382 				unsigned long, unsigned int, int, gfp_t);
383 struct sg_iovec;
384 struct rq_map_data;
385 extern struct bio *bio_map_user_iov(struct request_queue *,
386 				    struct block_device *,
387 				    struct sg_iovec *, int, int, gfp_t);
388 extern void bio_unmap_user(struct bio *);
389 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
390 				gfp_t);
391 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
392 				 gfp_t, int);
393 extern void bio_set_pages_dirty(struct bio *bio);
394 extern void bio_check_pages_dirty(struct bio *bio);
395 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
396 				 unsigned long, unsigned int, int, gfp_t);
397 extern struct bio *bio_copy_user_iov(struct request_queue *,
398 				     struct rq_map_data *, struct sg_iovec *,
399 				     int, int, gfp_t);
400 extern int bio_uncopy_user(struct bio *);
401 void zero_fill_bio(struct bio *bio);
402 extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
403 extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
404 extern unsigned int bvec_nr_vecs(unsigned short idx);
405 
406 /*
407  * Allow queuer to specify a completion CPU for this bio
408  */
bio_set_completion_cpu(struct bio * bio,unsigned int cpu)409 static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
410 {
411 	bio->bi_comp_cpu = cpu;
412 }
413 
414 /*
415  * bio_set is used to allow other portions of the IO system to
416  * allocate their own private memory pools for bio and iovec structures.
417  * These memory pools in turn all allocate from the bio_slab
418  * and the bvec_slabs[].
419  */
420 #define BIO_POOL_SIZE 2
421 #define BIOVEC_NR_POOLS 6
422 #define BIOVEC_MAX_IDX	(BIOVEC_NR_POOLS - 1)
423 
424 struct bio_set {
425 	struct kmem_cache *bio_slab;
426 	unsigned int front_pad;
427 
428 	mempool_t *bio_pool;
429 #if defined(CONFIG_BLK_DEV_INTEGRITY)
430 	mempool_t *bio_integrity_pool;
431 #endif
432 	mempool_t *bvec_pool;
433 };
434 
435 struct biovec_slab {
436 	int nr_vecs;
437 	char *name;
438 	struct kmem_cache *slab;
439 };
440 
441 extern struct bio_set *fs_bio_set;
442 extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
443 
444 /*
445  * a small number of entries is fine, not going to be performance critical.
446  * basically we just need to survive
447  */
448 #define BIO_SPLIT_ENTRIES 2
449 
450 #ifdef CONFIG_HIGHMEM
451 /*
452  * remember never ever reenable interrupts between a bvec_kmap_irq and
453  * bvec_kunmap_irq!
454  *
455  * This function MUST be inlined - it plays with the CPU interrupt flags.
456  */
bvec_kmap_irq(struct bio_vec * bvec,unsigned long * flags)457 static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec,
458 		unsigned long *flags)
459 {
460 	unsigned long addr;
461 
462 	/*
463 	 * might not be a highmem page, but the preempt/irq count
464 	 * balancing is a lot nicer this way
465 	 */
466 	local_irq_save(*flags);
467 	addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
468 
469 	BUG_ON(addr & ~PAGE_MASK);
470 
471 	return (char *) addr + bvec->bv_offset;
472 }
473 
bvec_kunmap_irq(char * buffer,unsigned long * flags)474 static __always_inline void bvec_kunmap_irq(char *buffer,
475 		unsigned long *flags)
476 {
477 	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
478 
479 	kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
480 	local_irq_restore(*flags);
481 }
482 
483 #else
484 #define bvec_kmap_irq(bvec, flags)	(page_address((bvec)->bv_page) + (bvec)->bv_offset)
485 #define bvec_kunmap_irq(buf, flags)	do { *(flags) = 0; } while (0)
486 #endif
487 
__bio_kmap_irq(struct bio * bio,unsigned short idx,unsigned long * flags)488 static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
489 				   unsigned long *flags)
490 {
491 	return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
492 }
493 #define __bio_kunmap_irq(buf, flags)	bvec_kunmap_irq(buf, flags)
494 
495 #define bio_kmap_irq(bio, flags) \
496 	__bio_kmap_irq((bio), (bio)->bi_idx, (flags))
497 #define bio_kunmap_irq(buf,flags)	__bio_kunmap_irq(buf, flags)
498 
499 /*
500  * Check whether this bio carries any data or not. A NULL bio is allowed.
501  */
bio_has_data(struct bio * bio)502 static inline int bio_has_data(struct bio *bio)
503 {
504 	return bio && bio->bi_io_vec != NULL;
505 }
506 
507 #if defined(CONFIG_BLK_DEV_INTEGRITY)
508 
509 #define bip_vec_idx(bip, idx)	(&(bip->bip_vec[(idx)]))
510 #define bip_vec(bip)		bip_vec_idx(bip, 0)
511 
512 #define __bip_for_each_vec(bvl, bip, i, start_idx)			\
513 	for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);	\
514 	     i < (bip)->bip_vcnt;					\
515 	     bvl++, i++)
516 
517 #define bip_for_each_vec(bvl, bip, i)					\
518 	__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
519 
520 #define bio_integrity(bio) (bio->bi_integrity != NULL)
521 
522 extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
523 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
524 extern void bio_integrity_free(struct bio *, struct bio_set *);
525 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
526 extern int bio_integrity_enabled(struct bio *bio);
527 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
528 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
529 extern int bio_integrity_prep(struct bio *);
530 extern void bio_integrity_endio(struct bio *, int);
531 extern void bio_integrity_advance(struct bio *, unsigned int);
532 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
533 extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
534 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
535 extern int bioset_integrity_create(struct bio_set *, int);
536 extern void bioset_integrity_free(struct bio_set *);
537 extern void bio_integrity_init_slab(void);
538 
539 #else /* CONFIG_BLK_DEV_INTEGRITY */
540 
541 #define bio_integrity(a)		(0)
542 #define bioset_integrity_create(a, b)	(0)
543 #define bio_integrity_prep(a)		(0)
544 #define bio_integrity_enabled(a)	(0)
545 #define bio_integrity_clone(a, b, c,d )	(0)
546 #define bioset_integrity_free(a)	do { } while (0)
547 #define bio_integrity_free(a, b)	do { } while (0)
548 #define bio_integrity_endio(a, b)	do { } while (0)
549 #define bio_integrity_advance(a, b)	do { } while (0)
550 #define bio_integrity_trim(a, b, c)	do { } while (0)
551 #define bio_integrity_split(a, b, c)	do { } while (0)
552 #define bio_integrity_set_tag(a, b, c)	do { } while (0)
553 #define bio_integrity_get_tag(a, b, c)	do { } while (0)
554 #define bio_integrity_init_slab(a)	do { } while (0)
555 
556 #endif /* CONFIG_BLK_DEV_INTEGRITY */
557 
558 #endif /* CONFIG_BLOCK */
559 #endif /* __LINUX_BIO_H */
560