• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
12 #include <linux/android_kabi.h>
13 
14 struct bio_set;
15 struct bio;
16 struct bio_integrity_payload;
17 struct page;
18 struct block_device;
19 struct io_context;
20 struct cgroup_subsys_state;
21 typedef void (bio_end_io_t) (struct bio *);
22 struct bio_crypt_ctx;
23 
24 /*
25  * Block error status values.  See block/blk-core:blk_errors for the details.
26  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
27  */
28 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
29 typedef u32 __bitwise blk_status_t;
30 #else
31 typedef u8 __bitwise blk_status_t;
32 #endif
33 #define	BLK_STS_OK 0
34 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
35 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
36 #define BLK_STS_NOSPC		((__force blk_status_t)3)
37 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
38 #define BLK_STS_TARGET		((__force blk_status_t)5)
39 #define BLK_STS_NEXUS		((__force blk_status_t)6)
40 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
41 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
42 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
43 #define BLK_STS_IOERR		((__force blk_status_t)10)
44 
45 /* hack for device mapper, don't use elsewhere: */
46 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
47 
48 #define BLK_STS_AGAIN		((__force blk_status_t)12)
49 
50 /*
51  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
52  * device related resources are unavailable, but the driver can guarantee
53  * that the queue will be rerun in the future once resources become
54  * available again. This is typically the case for device specific
55  * resources that are consumed for IO. If the driver fails allocating these
56  * resources, we know that inflight (or pending) IO will free these
57  * resource upon completion.
58  *
59  * This is different from BLK_STS_RESOURCE in that it explicitly references
60  * a device specific resource. For resources of wider scope, allocation
61  * failure can happen without having pending IO. This means that we can't
62  * rely on request completions freeing these resources, as IO may not be in
63  * flight. Examples of that are kernel memory allocations, DMA mappings, or
64  * any other system wide resources.
65  */
66 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
67 
68 /**
69  * blk_path_error - returns true if error may be path related
70  * @error: status the request was completed with
71  *
72  * Description:
73  *     This classifies block error status into non-retryable errors and ones
74  *     that may be successful if retried on a failover path.
75  *
76  * Return:
77  *     %false - retrying failover path will not help
78  *     %true  - may succeed if retried
79  */
blk_path_error(blk_status_t error)80 static inline bool blk_path_error(blk_status_t error)
81 {
82 	switch (error) {
83 	case BLK_STS_NOTSUPP:
84 	case BLK_STS_NOSPC:
85 	case BLK_STS_TARGET:
86 	case BLK_STS_NEXUS:
87 	case BLK_STS_MEDIUM:
88 	case BLK_STS_PROTECTION:
89 		return false;
90 	}
91 
92 	/* Anything else could be a path failure, so should be retried */
93 	return true;
94 }
95 
96 /*
97  * From most significant bit:
98  * 1 bit: reserved for other usage, see below
99  * 12 bits: original size of bio
100  * 51 bits: issue time of bio
101  */
102 #define BIO_ISSUE_RES_BITS      1
103 #define BIO_ISSUE_SIZE_BITS     12
104 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
105 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
106 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
107 #define BIO_ISSUE_SIZE_MASK     \
108 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
109 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
110 
111 /* Reserved bit for blk-throtl */
112 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
113 
114 struct bio_issue {
115 	u64 value;
116 };
117 
__bio_issue_time(u64 time)118 static inline u64 __bio_issue_time(u64 time)
119 {
120 	return time & BIO_ISSUE_TIME_MASK;
121 }
122 
bio_issue_time(struct bio_issue * issue)123 static inline u64 bio_issue_time(struct bio_issue *issue)
124 {
125 	return __bio_issue_time(issue->value);
126 }
127 
bio_issue_size(struct bio_issue * issue)128 static inline sector_t bio_issue_size(struct bio_issue *issue)
129 {
130 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
131 }
132 
bio_issue_init(struct bio_issue * issue,sector_t size)133 static inline void bio_issue_init(struct bio_issue *issue,
134 				       sector_t size)
135 {
136 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
137 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
138 			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
139 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
140 }
141 
142 /*
143  * main unit of I/O for the block layer and lower layers (ie drivers and
144  * stacking drivers)
145  */
146 struct bio {
147 	struct bio		*bi_next;	/* request queue link */
148 	struct gendisk		*bi_disk;
149 	unsigned int		bi_opf;		/* bottom bits req flags,
150 						 * top bits REQ_OP. Use
151 						 * accessors.
152 						 */
153 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
154 	unsigned short		bi_ioprio;
155 	unsigned short		bi_write_hint;
156 	blk_status_t		bi_status;
157 	u8			bi_partno;
158 
159 	struct bvec_iter	bi_iter;
160 
161 	atomic_t		__bi_remaining;
162 	bio_end_io_t		*bi_end_io;
163 
164 	void			*bi_private;
165 #ifdef CONFIG_BLK_CGROUP
166 	/*
167 	 * Represents the association of the css and request_queue for the bio.
168 	 * If a bio goes direct to device, it will not have a blkg as it will
169 	 * not have a request_queue associated with it.  The reference is put
170 	 * on release of the bio.
171 	 */
172 	struct blkcg_gq		*bi_blkg;
173 	struct bio_issue	bi_issue;
174 #ifdef CONFIG_BLK_CGROUP_IOCOST
175 	u64			bi_iocost_cost;
176 #endif
177 #endif
178 
179 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
180 	struct bio_crypt_ctx	*bi_crypt_context;
181 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
182 	bool			bi_skip_dm_default_key;
183 #endif
184 #endif
185 
186 	union {
187 #if defined(CONFIG_BLK_DEV_INTEGRITY)
188 		struct bio_integrity_payload *bi_integrity; /* data integrity */
189 #endif
190 	};
191 
192 	unsigned short		bi_vcnt;	/* how many bio_vec's */
193 
194 	/*
195 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
196 	 */
197 
198 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
199 
200 	atomic_t		__bi_cnt;	/* pin count */
201 
202 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
203 
204 	struct bio_set		*bi_pool;
205 
206 	ANDROID_KABI_RESERVE(1);
207 	ANDROID_KABI_RESERVE(2);
208 
209 	/*
210 	 * We can inline a number of vecs at the end of the bio, to avoid
211 	 * double allocations for a small number of bio_vecs. This member
212 	 * MUST obviously be kept at the very end of the bio.
213 	 */
214 	struct bio_vec		bi_inline_vecs[0];
215 };
216 
217 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
218 
219 /*
220  * bio flags
221  */
222 enum {
223 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
224 	BIO_CLONED,		/* doesn't own data */
225 	BIO_BOUNCED,		/* bio is a bounce bio */
226 	BIO_USER_MAPPED,	/* contains user pages */
227 	BIO_NULL_MAPPED,	/* contains invalid user pages */
228 	BIO_WORKINGSET,		/* contains userspace workingset pages */
229 	BIO_QUIET,		/* Make BIO Quiet */
230 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
231 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
232 	BIO_THROTTLED,		/* This bio has already been subjected to
233 				 * throttling rules. Don't do it again. */
234 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
235 				 * of this bio. */
236 	BIO_QUEUE_ENTERED,	/* can use blk_queue_enter_live() */
237 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
238 	BIO_FLAG_LAST
239 };
240 
241 /* See BVEC_POOL_OFFSET below before adding new flags */
242 
243 /*
244  * We support 6 different bvec pools, the last one is magic in that it
245  * is backed by a mempool.
246  */
247 #define BVEC_POOL_NR		6
248 #define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
249 
250 /*
251  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
252  * 1 to the actual index so that 0 indicates that there are no bvecs to be
253  * freed.
254  */
255 #define BVEC_POOL_BITS		(3)
256 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
257 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
258 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
259 # error "BVEC_POOL_BITS is too small"
260 #endif
261 
262 /*
263  * Flags starting here get preserved by bio_reset() - this includes
264  * only BVEC_POOL_IDX()
265  */
266 #define BIO_RESET_BITS	BVEC_POOL_OFFSET
267 
268 typedef __u32 __bitwise blk_mq_req_flags_t;
269 
270 /*
271  * Operations and flags common to the bio and request structures.
272  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
273  *
274  * The least significant bit of the operation number indicates the data
275  * transfer direction:
276  *
277  *   - if the least significant bit is set transfers are TO the device
278  *   - if the least significant bit is not set transfers are FROM the device
279  *
280  * If a operation does not transfer data the least significant bit has no
281  * meaning.
282  */
283 #define REQ_OP_BITS	8
284 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
285 #define REQ_FLAG_BITS	24
286 
287 enum req_opf {
288 	/* read sectors from the device */
289 	REQ_OP_READ		= 0,
290 	/* write sectors to the device */
291 	REQ_OP_WRITE		= 1,
292 	/* flush the volatile write cache */
293 	REQ_OP_FLUSH		= 2,
294 	/* discard sectors */
295 	REQ_OP_DISCARD		= 3,
296 	/* securely erase sectors */
297 	REQ_OP_SECURE_ERASE	= 5,
298 	/* reset a zone write pointer */
299 	REQ_OP_ZONE_RESET	= 6,
300 	/* write the same sector many times */
301 	REQ_OP_WRITE_SAME	= 7,
302 	/* reset all the zone present on the device */
303 	REQ_OP_ZONE_RESET_ALL	= 8,
304 	/* write the zero filled sector many times */
305 	REQ_OP_WRITE_ZEROES	= 9,
306 	/* Open a zone */
307 	REQ_OP_ZONE_OPEN	= 10,
308 	/* Close a zone */
309 	REQ_OP_ZONE_CLOSE	= 11,
310 	/* Transition a zone to full */
311 	REQ_OP_ZONE_FINISH	= 12,
312 
313 	/* SCSI passthrough using struct scsi_request */
314 	REQ_OP_SCSI_IN		= 32,
315 	REQ_OP_SCSI_OUT		= 33,
316 	/* Driver private requests */
317 	REQ_OP_DRV_IN		= 34,
318 	REQ_OP_DRV_OUT		= 35,
319 
320 	REQ_OP_LAST,
321 };
322 
323 enum req_flag_bits {
324 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
325 		REQ_OP_BITS,
326 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
327 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
328 	__REQ_SYNC,		/* request is sync (sync write or read) */
329 	__REQ_META,		/* metadata io request */
330 	__REQ_PRIO,		/* boost priority in cfq */
331 	__REQ_NOMERGE,		/* don't touch this for merging */
332 	__REQ_IDLE,		/* anticipate more IO after this one */
333 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
334 	__REQ_FUA,		/* forced unit access */
335 	__REQ_PREFLUSH,		/* request for cache flush */
336 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
337 	__REQ_BACKGROUND,	/* background IO */
338 	__REQ_NOWAIT,           /* Don't wait if request will block */
339 	__REQ_NOWAIT_INLINE,	/* Return would-block error inline */
340 	/*
341 	 * When a shared kthread needs to issue a bio for a cgroup, doing
342 	 * so synchronously can lead to priority inversions as the kthread
343 	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
344 	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
345 	 * work item to avoid such priority inversions.
346 	 */
347 	__REQ_CGROUP_PUNT,
348 
349 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
350 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
351 
352 	__REQ_HIPRI,
353 
354 	/* for driver use */
355 	__REQ_DRV,
356 	__REQ_SWAP,		/* swapping request. */
357 	__REQ_NR_BITS,		/* stops here */
358 };
359 
360 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
361 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
362 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
363 #define REQ_SYNC		(1ULL << __REQ_SYNC)
364 #define REQ_META		(1ULL << __REQ_META)
365 #define REQ_PRIO		(1ULL << __REQ_PRIO)
366 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
367 #define REQ_IDLE		(1ULL << __REQ_IDLE)
368 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
369 #define REQ_FUA			(1ULL << __REQ_FUA)
370 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
371 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
372 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
373 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
374 #define REQ_NOWAIT_INLINE	(1ULL << __REQ_NOWAIT_INLINE)
375 #define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)
376 
377 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
378 #define REQ_HIPRI		(1ULL << __REQ_HIPRI)
379 
380 #define REQ_DRV			(1ULL << __REQ_DRV)
381 #define REQ_SWAP		(1ULL << __REQ_SWAP)
382 
383 #define REQ_FAILFAST_MASK \
384 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
385 
386 #define REQ_NOMERGE_FLAGS \
387 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
388 
389 enum stat_group {
390 	STAT_READ,
391 	STAT_WRITE,
392 	STAT_DISCARD,
393 
394 	NR_STAT_GROUPS
395 };
396 
397 #define bio_op(bio) \
398 	((bio)->bi_opf & REQ_OP_MASK)
399 #define req_op(req) \
400 	((req)->cmd_flags & REQ_OP_MASK)
401 
402 /* obsolete, don't use in new code */
bio_set_op_attrs(struct bio * bio,unsigned op,unsigned op_flags)403 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
404 		unsigned op_flags)
405 {
406 	bio->bi_opf = op | op_flags;
407 }
408 
op_is_write(unsigned int op)409 static inline bool op_is_write(unsigned int op)
410 {
411 	return (op & 1);
412 }
413 
414 /*
415  * Check if the bio or request is one that needs special treatment in the
416  * flush state machine.
417  */
op_is_flush(unsigned int op)418 static inline bool op_is_flush(unsigned int op)
419 {
420 	return op & (REQ_FUA | REQ_PREFLUSH);
421 }
422 
423 /*
424  * Reads are always treated as synchronous, as are requests with the FUA or
425  * PREFLUSH flag.  Other operations may be marked as synchronous using the
426  * REQ_SYNC flag.
427  */
op_is_sync(unsigned int op)428 static inline bool op_is_sync(unsigned int op)
429 {
430 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
431 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
432 }
433 
op_is_discard(unsigned int op)434 static inline bool op_is_discard(unsigned int op)
435 {
436 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
437 }
438 
439 /*
440  * Check if a bio or request operation is a zone management operation, with
441  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
442  * due to its different handling in the block layer and device response in
443  * case of command failure.
444  */
op_is_zone_mgmt(enum req_opf op)445 static inline bool op_is_zone_mgmt(enum req_opf op)
446 {
447 	switch (op & REQ_OP_MASK) {
448 	case REQ_OP_ZONE_RESET:
449 	case REQ_OP_ZONE_OPEN:
450 	case REQ_OP_ZONE_CLOSE:
451 	case REQ_OP_ZONE_FINISH:
452 		return true;
453 	default:
454 		return false;
455 	}
456 }
457 
op_stat_group(unsigned int op)458 static inline int op_stat_group(unsigned int op)
459 {
460 	if (op_is_discard(op))
461 		return STAT_DISCARD;
462 	return op_is_write(op);
463 }
464 
465 typedef unsigned int blk_qc_t;
466 #define BLK_QC_T_NONE		-1U
467 #define BLK_QC_T_EAGAIN		-2U
468 #define BLK_QC_T_SHIFT		16
469 #define BLK_QC_T_INTERNAL	(1U << 31)
470 
blk_qc_t_valid(blk_qc_t cookie)471 static inline bool blk_qc_t_valid(blk_qc_t cookie)
472 {
473 	return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
474 }
475 
blk_qc_t_to_queue_num(blk_qc_t cookie)476 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
477 {
478 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
479 }
480 
blk_qc_t_to_tag(blk_qc_t cookie)481 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
482 {
483 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
484 }
485 
blk_qc_t_is_internal(blk_qc_t cookie)486 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
487 {
488 	return (cookie & BLK_QC_T_INTERNAL) != 0;
489 }
490 
491 struct blk_rq_stat {
492 	u64 mean;
493 	u64 min;
494 	u64 max;
495 	u32 nr_samples;
496 	u64 batch;
497 };
498 
499 #endif /* __LINUX_BLK_TYPES_H */
500