• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/device.h>
12 #include <linux/ktime.h>
13 #include <linux/android_kabi.h>
14 
15 struct bio_set;
16 struct bio;
17 struct bio_integrity_payload;
18 struct page;
19 struct io_context;
20 struct cgroup_subsys_state;
21 typedef void (bio_end_io_t) (struct bio *);
22 struct bio_crypt_ctx;
23 
24 /*
25  * The basic unit of block I/O is a sector. It is used in a number of contexts
26  * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
27  * bytes. Variables of type sector_t represent an offset or size that is a
28  * multiple of 512 bytes. Hence these two constants.
29  */
30 #ifndef SECTOR_SHIFT
31 #define SECTOR_SHIFT 9
32 #endif
33 #ifndef SECTOR_SIZE
34 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
35 #endif
36 
37 #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
38 #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
39 #define SECTOR_MASK		(PAGE_SECTORS - 1)
40 
41 struct block_device {
42 	sector_t		bd_start_sect;
43 	sector_t		bd_nr_sectors;
44 	struct disk_stats __percpu *bd_stats;
45 	unsigned long		bd_stamp;
46 	bool			bd_read_only;	/* read-only policy */
47 	dev_t			bd_dev;
48 	atomic_t		bd_openers;
49 	struct inode *		bd_inode;	/* will die */
50 	struct super_block *	bd_super;
51 	void *			bd_claiming;
52 	struct device		bd_device;
53 	void *			bd_holder;
54 	int			bd_holders;
55 	bool			bd_write_holder;
56 	struct kobject		*bd_holder_dir;
57 	u8			bd_partno;
58 	spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */
59 	struct gendisk *	bd_disk;
60 	struct request_queue *	bd_queue;
61 
62 	/* The counter of freeze processes */
63 	int			bd_fsfreeze_count;
64 	/* Mutex for freeze */
65 	struct mutex		bd_fsfreeze_mutex;
66 	struct super_block	*bd_fsfreeze_sb;
67 
68 	struct partition_meta_info *bd_meta_info;
69 #ifdef CONFIG_FAIL_MAKE_REQUEST
70 	bool			bd_make_it_fail;
71 #endif
72 } __randomize_layout;
73 
74 #define bdev_whole(_bdev) \
75 	((_bdev)->bd_disk->part0)
76 
77 #define dev_to_bdev(device) \
78 	container_of((device), struct block_device, bd_device)
79 
80 #define bdev_kobj(_bdev) \
81 	(&((_bdev)->bd_device.kobj))
82 
83 /*
84  * Block error status values.  See block/blk-core:blk_errors for the details.
85  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
86  */
87 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
88 typedef u32 __bitwise blk_status_t;
89 typedef u32 blk_short_t;
90 #else
91 typedef u8 __bitwise blk_status_t;
92 typedef u16 blk_short_t;
93 #endif
94 #define	BLK_STS_OK 0
95 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
96 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
97 #define BLK_STS_NOSPC		((__force blk_status_t)3)
98 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
99 #define BLK_STS_TARGET		((__force blk_status_t)5)
100 #define BLK_STS_NEXUS		((__force blk_status_t)6)
101 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
102 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
103 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
104 #define BLK_STS_IOERR		((__force blk_status_t)10)
105 
106 /* hack for device mapper, don't use elsewhere: */
107 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
108 
109 /*
110  * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
111  * and the bio would block (cf bio_wouldblock_error())
112  */
113 #define BLK_STS_AGAIN		((__force blk_status_t)12)
114 
115 /*
116  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
117  * device related resources are unavailable, but the driver can guarantee
118  * that the queue will be rerun in the future once resources become
119  * available again. This is typically the case for device specific
120  * resources that are consumed for IO. If the driver fails allocating these
121  * resources, we know that inflight (or pending) IO will free these
122  * resource upon completion.
123  *
124  * This is different from BLK_STS_RESOURCE in that it explicitly references
125  * a device specific resource. For resources of wider scope, allocation
126  * failure can happen without having pending IO. This means that we can't
127  * rely on request completions freeing these resources, as IO may not be in
128  * flight. Examples of that are kernel memory allocations, DMA mappings, or
129  * any other system wide resources.
130  */
131 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
132 
133 /*
134  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
135  * related resources are unavailable, but the driver can guarantee the queue
136  * will be rerun in the future once the resources become available again.
137  *
138  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
139  * a zone specific resource and IO to a different zone on the same device could
140  * still be served. Examples of that are zones that are write-locked, but a read
141  * to the same zone could be served.
142  */
143 #define BLK_STS_ZONE_RESOURCE	((__force blk_status_t)14)
144 
145 /*
146  * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
147  * path if the device returns a status indicating that too many zone resources
148  * are currently open. The same command should be successful if resubmitted
149  * after the number of open zones decreases below the device's limits, which is
150  * reported in the request_queue's max_open_zones.
151  */
152 #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)15)
153 
154 /*
155  * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
156  * path if the device returns a status indicating that too many zone resources
157  * are currently active. The same command should be successful if resubmitted
158  * after the number of active zones decreases below the device's limits, which
159  * is reported in the request_queue's max_active_zones.
160  */
161 #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)16)
162 
163 /*
164  * BLK_STS_OFFLINE is returned from the driver when the target device is offline
165  * or is being taken offline. This could help differentiate the case where a
166  * device is intentionally being shut down from a real I/O error.
167  */
168 #define BLK_STS_OFFLINE		((__force blk_status_t)17)
169 
170 /**
171  * blk_path_error - returns true if error may be path related
172  * @error: status the request was completed with
173  *
174  * Description:
175  *     This classifies block error status into non-retryable errors and ones
176  *     that may be successful if retried on a failover path.
177  *
178  * Return:
179  *     %false - retrying failover path will not help
180  *     %true  - may succeed if retried
181  */
blk_path_error(blk_status_t error)182 static inline bool blk_path_error(blk_status_t error)
183 {
184 	switch (error) {
185 	case BLK_STS_NOTSUPP:
186 	case BLK_STS_NOSPC:
187 	case BLK_STS_TARGET:
188 	case BLK_STS_NEXUS:
189 	case BLK_STS_MEDIUM:
190 	case BLK_STS_PROTECTION:
191 		return false;
192 	}
193 
194 	/* Anything else could be a path failure, so should be retried */
195 	return true;
196 }
197 
198 /*
199  * From most significant bit:
200  * 1 bit: reserved for other usage, see below
201  * 12 bits: original size of bio
202  * 51 bits: issue time of bio
203  */
204 #define BIO_ISSUE_RES_BITS      1
205 #define BIO_ISSUE_SIZE_BITS     12
206 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
207 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
208 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
209 #define BIO_ISSUE_SIZE_MASK     \
210 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
211 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
212 
213 /* Reserved bit for blk-throtl */
214 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
215 
216 struct bio_issue {
217 	u64 value;
218 };
219 
__bio_issue_time(u64 time)220 static inline u64 __bio_issue_time(u64 time)
221 {
222 	return time & BIO_ISSUE_TIME_MASK;
223 }
224 
bio_issue_time(struct bio_issue * issue)225 static inline u64 bio_issue_time(struct bio_issue *issue)
226 {
227 	return __bio_issue_time(issue->value);
228 }
229 
bio_issue_size(struct bio_issue * issue)230 static inline sector_t bio_issue_size(struct bio_issue *issue)
231 {
232 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
233 }
234 
bio_issue_init(struct bio_issue * issue,sector_t size)235 static inline void bio_issue_init(struct bio_issue *issue,
236 				       sector_t size)
237 {
238 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
239 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
240 			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
241 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
242 }
243 
244 typedef __u32 __bitwise blk_opf_t;
245 
246 typedef unsigned int blk_qc_t;
247 #define BLK_QC_T_NONE		-1U
248 
249 /*
250  * main unit of I/O for the block layer and lower layers (ie drivers and
251  * stacking drivers)
252  */
253 struct bio {
254 	struct bio		*bi_next;	/* request queue link */
255 	struct block_device	*bi_bdev;
256 	blk_opf_t		bi_opf;		/* bottom bits REQ_OP, top bits
257 						 * req_flags.
258 						 */
259 	unsigned short		bi_flags;	/* BIO_* below */
260 	unsigned short		bi_ioprio;
261 	blk_status_t		bi_status;
262 	atomic_t		__bi_remaining;
263 
264 	struct bvec_iter	bi_iter;
265 
266 	blk_qc_t		bi_cookie;
267 	bio_end_io_t		*bi_end_io;
268 	void			*bi_private;
269 #ifdef CONFIG_BLK_CGROUP
270 	/*
271 	 * Represents the association of the css and request_queue for the bio.
272 	 * If a bio goes direct to device, it will not have a blkg as it will
273 	 * not have a request_queue associated with it.  The reference is put
274 	 * on release of the bio.
275 	 */
276 	struct blkcg_gq		*bi_blkg;
277 	struct bio_issue	bi_issue;
278 #ifdef CONFIG_BLK_CGROUP_IOCOST
279 	u64			bi_iocost_cost;
280 #endif
281 #endif
282 
283 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
284 	struct bio_crypt_ctx	*bi_crypt_context;
285 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
286 	bool			bi_skip_dm_default_key;
287 #endif
288 #endif
289 
290 	union {
291 #if defined(CONFIG_BLK_DEV_INTEGRITY)
292 		struct bio_integrity_payload *bi_integrity; /* data integrity */
293 #endif
294 	};
295 
296 	unsigned short		bi_vcnt;	/* how many bio_vec's */
297 
298 	/*
299 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
300 	 */
301 
302 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
303 
304 	atomic_t		__bi_cnt;	/* pin count */
305 
306 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
307 
308 	struct bio_set		*bi_pool;
309 
310 	ANDROID_OEM_DATA(1);
311 	ANDROID_KABI_RESERVE(1);
312 	ANDROID_KABI_RESERVE(2);
313 
314 	/*
315 	 * We can inline a number of vecs at the end of the bio, to avoid
316 	 * double allocations for a small number of bio_vecs. This member
317 	 * MUST obviously be kept at the very end of the bio.
318 	 */
319 	struct bio_vec		bi_inline_vecs[];
320 };
321 
322 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
323 #define BIO_MAX_SECTORS		(UINT_MAX >> SECTOR_SHIFT)
324 
325 /*
326  * bio flags
327  */
328 enum {
329 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
330 	BIO_CLONED,		/* doesn't own data */
331 	BIO_BOUNCED,		/* bio is a bounce bio */
332 	BIO_QUIET,		/* Make BIO Quiet */
333 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
334 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
335 	BIO_BPS_THROTTLED,	/* This bio has already been subjected to
336 				 * throttling rules. Don't do it again. */
337 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
338 				 * of this bio. */
339 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
340 	BIO_QOS_THROTTLED,	/* bio went through rq_qos throttle path */
341 	BIO_QOS_MERGED,		/* but went through rq_qos merge path */
342 	BIO_REMAPPED,
343 	BIO_ZONE_WRITE_LOCKED,	/* Owns a zoned device zone write lock */
344 	BIO_FLAG_LAST
345 };
346 
347 typedef __u32 __bitwise blk_mq_req_flags_t;
348 
349 #define REQ_OP_BITS	8
350 #define REQ_OP_MASK	(__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
351 #define REQ_FLAG_BITS	24
352 
353 /**
354  * enum req_op - Operations common to the bio and request structures.
355  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
356  *
357  * The least significant bit of the operation number indicates the data
358  * transfer direction:
359  *
360  *   - if the least significant bit is set transfers are TO the device
361  *   - if the least significant bit is not set transfers are FROM the device
362  *
363  * If a operation does not transfer data the least significant bit has no
364  * meaning.
365  */
366 enum req_op {
367 	/* read sectors from the device */
368 	REQ_OP_READ		= (__force blk_opf_t)0,
369 	/* write sectors to the device */
370 	REQ_OP_WRITE		= (__force blk_opf_t)1,
371 	/* flush the volatile write cache */
372 	REQ_OP_FLUSH		= (__force blk_opf_t)2,
373 	/* discard sectors */
374 	REQ_OP_DISCARD		= (__force blk_opf_t)3,
375 	/* securely erase sectors */
376 	REQ_OP_SECURE_ERASE	= (__force blk_opf_t)5,
377 	/* write the zero filled sector many times */
378 	REQ_OP_WRITE_ZEROES	= (__force blk_opf_t)9,
379 	/* Open a zone */
380 	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)10,
381 	/* Close a zone */
382 	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11,
383 	/* Transition a zone to full */
384 	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)12,
385 	/* write data at the current zone write pointer */
386 	REQ_OP_ZONE_APPEND	= (__force blk_opf_t)13,
387 	/* reset a zone write pointer */
388 	REQ_OP_ZONE_RESET	= (__force blk_opf_t)15,
389 	/* reset all the zone present on the device */
390 	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)17,
391 
392 	/* Driver private requests */
393 	REQ_OP_DRV_IN		= (__force blk_opf_t)34,
394 	REQ_OP_DRV_OUT		= (__force blk_opf_t)35,
395 
396 	REQ_OP_LAST		= (__force blk_opf_t)36,
397 };
398 
399 enum req_flag_bits {
400 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
401 		REQ_OP_BITS,
402 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
403 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
404 	__REQ_SYNC,		/* request is sync (sync write or read) */
405 	__REQ_META,		/* metadata io request */
406 	__REQ_PRIO,		/* boost priority in cfq */
407 	__REQ_NOMERGE,		/* don't touch this for merging */
408 	__REQ_IDLE,		/* anticipate more IO after this one */
409 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
410 	__REQ_FUA,		/* forced unit access */
411 	__REQ_PREFLUSH,		/* request for cache flush */
412 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
413 	__REQ_BACKGROUND,	/* background IO */
414 	__REQ_NOWAIT,           /* Don't wait if request will block */
415 	/*
416 	 * When a shared kthread needs to issue a bio for a cgroup, doing
417 	 * so synchronously can lead to priority inversions as the kthread
418 	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
419 	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
420 	 * work item to avoid such priority inversions.
421 	 */
422 	__REQ_CGROUP_PUNT,
423 	__REQ_POLLED,		/* caller polls for completion using bio_poll */
424 	__REQ_ALLOC_CACHE,	/* allocate IO from cache if available */
425 	__REQ_SWAP,		/* swap I/O */
426 	__REQ_DRV,		/* for driver use */
427 
428 	/*
429 	 * Command specific flags, keep last:
430 	 */
431 	/* for REQ_OP_WRITE_ZEROES: */
432 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
433 
434 	__REQ_NR_BITS,		/* stops here */
435 };
436 
437 #define REQ_FAILFAST_DEV	\
438 			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
439 #define REQ_FAILFAST_TRANSPORT	\
440 			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
441 #define REQ_FAILFAST_DRIVER	\
442 			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
443 #define REQ_SYNC	(__force blk_opf_t)(1ULL << __REQ_SYNC)
444 #define REQ_META	(__force blk_opf_t)(1ULL << __REQ_META)
445 #define REQ_PRIO	(__force blk_opf_t)(1ULL << __REQ_PRIO)
446 #define REQ_NOMERGE	(__force blk_opf_t)(1ULL << __REQ_NOMERGE)
447 #define REQ_IDLE	(__force blk_opf_t)(1ULL << __REQ_IDLE)
448 #define REQ_INTEGRITY	(__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
449 #define REQ_FUA		(__force blk_opf_t)(1ULL << __REQ_FUA)
450 #define REQ_PREFLUSH	(__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
451 #define REQ_RAHEAD	(__force blk_opf_t)(1ULL << __REQ_RAHEAD)
452 #define REQ_BACKGROUND	(__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
453 #define REQ_NOWAIT	(__force blk_opf_t)(1ULL << __REQ_NOWAIT)
454 #define REQ_CGROUP_PUNT	(__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT)
455 
456 #define REQ_NOUNMAP	(__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
457 #define REQ_POLLED	(__force blk_opf_t)(1ULL << __REQ_POLLED)
458 #define REQ_ALLOC_CACHE	(__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
459 
460 #define REQ_DRV		(__force blk_opf_t)(1ULL << __REQ_DRV)
461 #define REQ_SWAP	(__force blk_opf_t)(1ULL << __REQ_SWAP)
462 
463 #define REQ_FAILFAST_MASK \
464 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
465 
466 #define REQ_NOMERGE_FLAGS \
467 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
468 
469 enum stat_group {
470 	STAT_READ,
471 	STAT_WRITE,
472 	STAT_DISCARD,
473 	STAT_FLUSH,
474 
475 	NR_STAT_GROUPS
476 };
477 
bio_op(const struct bio * bio)478 static inline enum req_op bio_op(const struct bio *bio)
479 {
480 	return bio->bi_opf & REQ_OP_MASK;
481 }
482 
483 /* obsolete, don't use in new code */
bio_set_op_attrs(struct bio * bio,enum req_op op,blk_opf_t op_flags)484 static inline void bio_set_op_attrs(struct bio *bio, enum req_op op,
485 				    blk_opf_t op_flags)
486 {
487 	bio->bi_opf = op | op_flags;
488 }
489 
op_is_write(blk_opf_t op)490 static inline bool op_is_write(blk_opf_t op)
491 {
492 	return !!(op & (__force blk_opf_t)1);
493 }
494 
495 /*
496  * Check if the bio or request is one that needs special treatment in the
497  * flush state machine.
498  */
op_is_flush(blk_opf_t op)499 static inline bool op_is_flush(blk_opf_t op)
500 {
501 	return op & (REQ_FUA | REQ_PREFLUSH);
502 }
503 
504 /*
505  * Reads are always treated as synchronous, as are requests with the FUA or
506  * PREFLUSH flag.  Other operations may be marked as synchronous using the
507  * REQ_SYNC flag.
508  */
op_is_sync(blk_opf_t op)509 static inline bool op_is_sync(blk_opf_t op)
510 {
511 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
512 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
513 }
514 
op_is_discard(blk_opf_t op)515 static inline bool op_is_discard(blk_opf_t op)
516 {
517 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
518 }
519 
520 /*
521  * Check if a bio or request operation is a zone management operation, with
522  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
523  * due to its different handling in the block layer and device response in
524  * case of command failure.
525  */
op_is_zone_mgmt(enum req_op op)526 static inline bool op_is_zone_mgmt(enum req_op op)
527 {
528 	switch (op & REQ_OP_MASK) {
529 	case REQ_OP_ZONE_RESET:
530 	case REQ_OP_ZONE_OPEN:
531 	case REQ_OP_ZONE_CLOSE:
532 	case REQ_OP_ZONE_FINISH:
533 		return true;
534 	default:
535 		return false;
536 	}
537 }
538 
op_stat_group(enum req_op op)539 static inline int op_stat_group(enum req_op op)
540 {
541 	if (op_is_discard(op))
542 		return STAT_DISCARD;
543 	return op_is_write(op);
544 }
545 
546 struct blk_rq_stat {
547 	u64 mean;
548 	u64 min;
549 	u64 max;
550 	u32 nr_samples;
551 	u64 batch;
552 };
553 
554 #endif /* __LINUX_BLK_TYPES_H */
555