• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
12 #include <linux/android_kabi.h>
13 
14 struct bio_set;
15 struct bio;
16 struct bio_integrity_payload;
17 struct page;
18 struct io_context;
19 struct cgroup_subsys_state;
20 typedef void (bio_end_io_t) (struct bio *);
21 struct bio_crypt_ctx;
22 
23 struct block_device {
24 	dev_t			bd_dev;
25 	int			bd_openers;
26 	struct inode *		bd_inode;	/* will die */
27 	struct super_block *	bd_super;
28 	struct mutex		bd_mutex;	/* open/close mutex */
29 	void *			bd_claiming;
30 	void *			bd_holder;
31 	int			bd_holders;
32 	bool			bd_write_holder;
33 #ifdef CONFIG_SYSFS
34 	struct list_head	bd_holder_disks;
35 #endif
36 	struct block_device *	bd_contains;
37 	u8			bd_partno;
38 	struct hd_struct *	bd_part;
39 	/* number of times partitions within this device have been opened. */
40 	unsigned		bd_part_count;
41 
42 	spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */
43 	struct gendisk *	bd_disk;
44 	struct backing_dev_info *bd_bdi;
45 
46 	/* The counter of freeze processes */
47 	int			bd_fsfreeze_count;
48 	/* Mutex for freeze */
49 	struct mutex		bd_fsfreeze_mutex;
50 	struct super_block	*bd_fsfreeze_sb;
51 
52 	ANDROID_KABI_RESERVE(1);
53 	ANDROID_KABI_RESERVE(2);
54 	ANDROID_KABI_RESERVE(3);
55 	ANDROID_KABI_RESERVE(4);
56 } __randomize_layout;
57 
58 /*
59  * Block error status values.  See block/blk-core:blk_errors for the details.
60  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
61  */
62 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
63 typedef u32 __bitwise blk_status_t;
64 #else
65 typedef u8 __bitwise blk_status_t;
66 #endif
67 #define	BLK_STS_OK 0
68 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
69 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
70 #define BLK_STS_NOSPC		((__force blk_status_t)3)
71 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
72 #define BLK_STS_TARGET		((__force blk_status_t)5)
73 #define BLK_STS_NEXUS		((__force blk_status_t)6)
74 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
75 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
76 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
77 #define BLK_STS_IOERR		((__force blk_status_t)10)
78 
79 /* hack for device mapper, don't use elsewhere: */
80 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
81 
82 #define BLK_STS_AGAIN		((__force blk_status_t)12)
83 
84 /*
85  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
86  * device related resources are unavailable, but the driver can guarantee
87  * that the queue will be rerun in the future once resources become
88  * available again. This is typically the case for device specific
89  * resources that are consumed for IO. If the driver fails allocating these
90  * resources, we know that inflight (or pending) IO will free these
91  * resource upon completion.
92  *
93  * This is different from BLK_STS_RESOURCE in that it explicitly references
94  * a device specific resource. For resources of wider scope, allocation
95  * failure can happen without having pending IO. This means that we can't
96  * rely on request completions freeing these resources, as IO may not be in
97  * flight. Examples of that are kernel memory allocations, DMA mappings, or
98  * any other system wide resources.
99  */
100 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
101 
102 /*
103  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
104  * related resources are unavailable, but the driver can guarantee the queue
105  * will be rerun in the future once the resources become available again.
106  *
107  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
108  * a zone specific resource and IO to a different zone on the same device could
109  * still be served. Examples of that are zones that are write-locked, but a read
110  * to the same zone could be served.
111  */
112 #define BLK_STS_ZONE_RESOURCE	((__force blk_status_t)14)
113 
114 /*
115  * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
116  * path if the device returns a status indicating that too many zone resources
117  * are currently open. The same command should be successful if resubmitted
118  * after the number of open zones decreases below the device's limits, which is
119  * reported in the request_queue's max_open_zones.
120  */
121 #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)15)
122 
123 /*
124  * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
125  * path if the device returns a status indicating that too many zone resources
126  * are currently active. The same command should be successful if resubmitted
127  * after the number of active zones decreases below the device's limits, which
128  * is reported in the request_queue's max_active_zones.
129  */
130 #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)16)
131 
132 /**
133  * blk_path_error - returns true if error may be path related
134  * @error: status the request was completed with
135  *
136  * Description:
137  *     This classifies block error status into non-retryable errors and ones
138  *     that may be successful if retried on a failover path.
139  *
140  * Return:
141  *     %false - retrying failover path will not help
142  *     %true  - may succeed if retried
143  */
blk_path_error(blk_status_t error)144 static inline bool blk_path_error(blk_status_t error)
145 {
146 	switch (error) {
147 	case BLK_STS_NOTSUPP:
148 	case BLK_STS_NOSPC:
149 	case BLK_STS_TARGET:
150 	case BLK_STS_NEXUS:
151 	case BLK_STS_MEDIUM:
152 	case BLK_STS_PROTECTION:
153 		return false;
154 	}
155 
156 	/* Anything else could be a path failure, so should be retried */
157 	return true;
158 }
159 
160 /*
161  * From most significant bit:
162  * 1 bit: reserved for other usage, see below
163  * 12 bits: original size of bio
164  * 51 bits: issue time of bio
165  */
166 #define BIO_ISSUE_RES_BITS      1
167 #define BIO_ISSUE_SIZE_BITS     12
168 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
169 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
170 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
171 #define BIO_ISSUE_SIZE_MASK     \
172 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
173 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
174 
175 /* Reserved bit for blk-throtl */
176 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
177 
178 struct bio_issue {
179 	u64 value;
180 };
181 
__bio_issue_time(u64 time)182 static inline u64 __bio_issue_time(u64 time)
183 {
184 	return time & BIO_ISSUE_TIME_MASK;
185 }
186 
bio_issue_time(struct bio_issue * issue)187 static inline u64 bio_issue_time(struct bio_issue *issue)
188 {
189 	return __bio_issue_time(issue->value);
190 }
191 
bio_issue_size(struct bio_issue * issue)192 static inline sector_t bio_issue_size(struct bio_issue *issue)
193 {
194 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
195 }
196 
bio_issue_init(struct bio_issue * issue,sector_t size)197 static inline void bio_issue_init(struct bio_issue *issue,
198 				       sector_t size)
199 {
200 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
201 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
202 			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
203 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
204 }
205 
206 /*
207  * main unit of I/O for the block layer and lower layers (ie drivers and
208  * stacking drivers)
209  */
210 struct bio {
211 	struct bio		*bi_next;	/* request queue link */
212 	struct gendisk		*bi_disk;
213 	unsigned int		bi_opf;		/* bottom bits req flags,
214 						 * top bits REQ_OP. Use
215 						 * accessors.
216 						 */
217 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
218 	unsigned short		bi_ioprio;
219 	unsigned short		bi_write_hint;
220 	blk_status_t		bi_status;
221 	u8			bi_partno;
222 	atomic_t		__bi_remaining;
223 
224 	struct bvec_iter	bi_iter;
225 
226 	bio_end_io_t		*bi_end_io;
227 
228 	void			*bi_private;
229 #ifdef CONFIG_BLK_CGROUP
230 	/*
231 	 * Represents the association of the css and request_queue for the bio.
232 	 * If a bio goes direct to device, it will not have a blkg as it will
233 	 * not have a request_queue associated with it.  The reference is put
234 	 * on release of the bio.
235 	 */
236 	struct blkcg_gq		*bi_blkg;
237 	struct bio_issue	bi_issue;
238 #ifdef CONFIG_BLK_CGROUP_IOCOST
239 	u64			bi_iocost_cost;
240 #endif
241 #endif
242 
243 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
244 	struct bio_crypt_ctx	*bi_crypt_context;
245 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
246 	bool			bi_skip_dm_default_key;
247 #endif
248 #endif
249 
250 	union {
251 #if defined(CONFIG_BLK_DEV_INTEGRITY)
252 		struct bio_integrity_payload *bi_integrity; /* data integrity */
253 #endif
254 	};
255 
256 	unsigned short		bi_vcnt;	/* how many bio_vec's */
257 
258 	/*
259 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
260 	 */
261 
262 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
263 
264 	atomic_t		__bi_cnt;	/* pin count */
265 
266 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
267 
268 	struct bio_set		*bi_pool;
269 
270 	ANDROID_KABI_RESERVE(1);
271 	ANDROID_KABI_RESERVE(2);
272 
273 	/*
274 	 * We can inline a number of vecs at the end of the bio, to avoid
275 	 * double allocations for a small number of bio_vecs. This member
276 	 * MUST obviously be kept at the very end of the bio.
277 	 */
278 	struct bio_vec		bi_inline_vecs[];
279 };
280 
281 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
282 
283 /*
284  * bio flags
285  */
286 enum {
287 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
288 	BIO_CLONED,		/* doesn't own data */
289 	BIO_BOUNCED,		/* bio is a bounce bio */
290 	BIO_WORKINGSET,		/* contains userspace workingset pages */
291 	BIO_QUIET,		/* Make BIO Quiet */
292 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
293 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
294 	BIO_THROTTLED,		/* This bio has already been subjected to
295 				 * throttling rules. Don't do it again. */
296 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
297 				 * of this bio. */
298 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
299 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
300 	BIO_FLAG_LAST
301 };
302 
303 /* See BVEC_POOL_OFFSET below before adding new flags */
304 
305 /*
306  * We support 6 different bvec pools, the last one is magic in that it
307  * is backed by a mempool.
308  */
309 #define BVEC_POOL_NR		6
310 #define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
311 
312 /*
313  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
314  * 1 to the actual index so that 0 indicates that there are no bvecs to be
315  * freed.
316  */
317 #define BVEC_POOL_BITS		(3)
318 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
319 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
320 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
321 # error "BVEC_POOL_BITS is too small"
322 #endif
323 
324 /*
325  * Flags starting here get preserved by bio_reset() - this includes
326  * only BVEC_POOL_IDX()
327  */
328 #define BIO_RESET_BITS	BVEC_POOL_OFFSET
329 
330 typedef __u32 __bitwise blk_mq_req_flags_t;
331 
332 /*
333  * Operations and flags common to the bio and request structures.
334  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
335  *
336  * The least significant bit of the operation number indicates the data
337  * transfer direction:
338  *
339  *   - if the least significant bit is set transfers are TO the device
340  *   - if the least significant bit is not set transfers are FROM the device
341  *
342  * If a operation does not transfer data the least significant bit has no
343  * meaning.
344  */
345 #define REQ_OP_BITS	8
346 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
347 #define REQ_FLAG_BITS	24
348 
349 enum req_opf {
350 	/* read sectors from the device */
351 	REQ_OP_READ		= 0,
352 	/* write sectors to the device */
353 	REQ_OP_WRITE		= 1,
354 	/* flush the volatile write cache */
355 	REQ_OP_FLUSH		= 2,
356 	/* discard sectors */
357 	REQ_OP_DISCARD		= 3,
358 	/* securely erase sectors */
359 	REQ_OP_SECURE_ERASE	= 5,
360 	/* write the same sector many times */
361 	REQ_OP_WRITE_SAME	= 7,
362 	/* write the zero filled sector many times */
363 	REQ_OP_WRITE_ZEROES	= 9,
364 	/* Open a zone */
365 	REQ_OP_ZONE_OPEN	= 10,
366 	/* Close a zone */
367 	REQ_OP_ZONE_CLOSE	= 11,
368 	/* Transition a zone to full */
369 	REQ_OP_ZONE_FINISH	= 12,
370 	/* write data at the current zone write pointer */
371 	REQ_OP_ZONE_APPEND	= 13,
372 	/* reset a zone write pointer */
373 	REQ_OP_ZONE_RESET	= 15,
374 	/* reset all the zone present on the device */
375 	REQ_OP_ZONE_RESET_ALL	= 17,
376 
377 	/* SCSI passthrough using struct scsi_request */
378 	REQ_OP_SCSI_IN		= 32,
379 	REQ_OP_SCSI_OUT		= 33,
380 	/* Driver private requests */
381 	REQ_OP_DRV_IN		= 34,
382 	REQ_OP_DRV_OUT		= 35,
383 
384 	REQ_OP_LAST,
385 };
386 
387 enum req_flag_bits {
388 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
389 		REQ_OP_BITS,
390 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
391 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
392 	__REQ_SYNC,		/* request is sync (sync write or read) */
393 	__REQ_META,		/* metadata io request */
394 	__REQ_PRIO,		/* boost priority in cfq */
395 	__REQ_NOMERGE,		/* don't touch this for merging */
396 	__REQ_IDLE,		/* anticipate more IO after this one */
397 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
398 	__REQ_FUA,		/* forced unit access */
399 	__REQ_PREFLUSH,		/* request for cache flush */
400 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
401 	__REQ_BACKGROUND,	/* background IO */
402 	__REQ_NOWAIT,           /* Don't wait if request will block */
403 	/*
404 	 * When a shared kthread needs to issue a bio for a cgroup, doing
405 	 * so synchronously can lead to priority inversions as the kthread
406 	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
407 	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
408 	 * work item to avoid such priority inversions.
409 	 */
410 	__REQ_CGROUP_PUNT,
411 
412 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
413 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
414 
415 	__REQ_HIPRI,
416 
417 	/* for driver use */
418 	__REQ_DRV,
419 	__REQ_SWAP,		/* swapping request. */
420 	__REQ_NR_BITS,		/* stops here */
421 };
422 
423 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
424 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
425 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
426 #define REQ_SYNC		(1ULL << __REQ_SYNC)
427 #define REQ_META		(1ULL << __REQ_META)
428 #define REQ_PRIO		(1ULL << __REQ_PRIO)
429 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
430 #define REQ_IDLE		(1ULL << __REQ_IDLE)
431 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
432 #define REQ_FUA			(1ULL << __REQ_FUA)
433 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
434 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
435 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
436 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
437 #define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)
438 
439 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
440 #define REQ_HIPRI		(1ULL << __REQ_HIPRI)
441 
442 #define REQ_DRV			(1ULL << __REQ_DRV)
443 #define REQ_SWAP		(1ULL << __REQ_SWAP)
444 
445 #define REQ_FAILFAST_MASK \
446 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
447 
448 #define REQ_NOMERGE_FLAGS \
449 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
450 
451 enum stat_group {
452 	STAT_READ,
453 	STAT_WRITE,
454 	STAT_DISCARD,
455 	STAT_FLUSH,
456 
457 	NR_STAT_GROUPS
458 };
459 
460 #define bio_op(bio) \
461 	((bio)->bi_opf & REQ_OP_MASK)
462 #define req_op(req) \
463 	((req)->cmd_flags & REQ_OP_MASK)
464 
465 /* obsolete, don't use in new code */
bio_set_op_attrs(struct bio * bio,unsigned op,unsigned op_flags)466 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
467 		unsigned op_flags)
468 {
469 	bio->bi_opf = op | op_flags;
470 }
471 
op_is_write(unsigned int op)472 static inline bool op_is_write(unsigned int op)
473 {
474 	return (op & 1);
475 }
476 
477 /*
478  * Check if the bio or request is one that needs special treatment in the
479  * flush state machine.
480  */
op_is_flush(unsigned int op)481 static inline bool op_is_flush(unsigned int op)
482 {
483 	return op & (REQ_FUA | REQ_PREFLUSH);
484 }
485 
486 /*
487  * Reads are always treated as synchronous, as are requests with the FUA or
488  * PREFLUSH flag.  Other operations may be marked as synchronous using the
489  * REQ_SYNC flag.
490  */
op_is_sync(unsigned int op)491 static inline bool op_is_sync(unsigned int op)
492 {
493 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
494 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
495 }
496 
op_is_discard(unsigned int op)497 static inline bool op_is_discard(unsigned int op)
498 {
499 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
500 }
501 
502 /*
503  * Check if a bio or request operation is a zone management operation, with
504  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
505  * due to its different handling in the block layer and device response in
506  * case of command failure.
507  */
op_is_zone_mgmt(enum req_opf op)508 static inline bool op_is_zone_mgmt(enum req_opf op)
509 {
510 	switch (op & REQ_OP_MASK) {
511 	case REQ_OP_ZONE_RESET:
512 	case REQ_OP_ZONE_OPEN:
513 	case REQ_OP_ZONE_CLOSE:
514 	case REQ_OP_ZONE_FINISH:
515 		return true;
516 	default:
517 		return false;
518 	}
519 }
520 
op_stat_group(unsigned int op)521 static inline int op_stat_group(unsigned int op)
522 {
523 	if (op_is_discard(op))
524 		return STAT_DISCARD;
525 	return op_is_write(op);
526 }
527 
528 typedef unsigned int blk_qc_t;
529 #define BLK_QC_T_NONE		-1U
530 #define BLK_QC_T_SHIFT		16
531 #define BLK_QC_T_INTERNAL	(1U << 31)
532 
blk_qc_t_valid(blk_qc_t cookie)533 static inline bool blk_qc_t_valid(blk_qc_t cookie)
534 {
535 	return cookie != BLK_QC_T_NONE;
536 }
537 
blk_qc_t_to_queue_num(blk_qc_t cookie)538 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
539 {
540 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
541 }
542 
blk_qc_t_to_tag(blk_qc_t cookie)543 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
544 {
545 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
546 }
547 
blk_qc_t_is_internal(blk_qc_t cookie)548 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
549 {
550 	return (cookie & BLK_QC_T_INTERNAL) != 0;
551 }
552 
553 struct blk_rq_stat {
554 	u64 mean;
555 	u64 min;
556 	u64 max;
557 	u32 nr_samples;
558 	u64 batch;
559 };
560 
561 #endif /* __LINUX_BLK_TYPES_H */
562