• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
12 
13 struct bio_set;
14 struct bio;
15 struct bio_integrity_payload;
16 struct page;
17 struct io_context;
18 struct cgroup_subsys_state;
19 typedef void (bio_end_io_t) (struct bio *);
20 struct bio_crypt_ctx;
21 
22 struct block_device {
23 	dev_t			bd_dev;
24 	int			bd_openers;
25 	struct inode *		bd_inode;	/* will die */
26 	struct super_block *	bd_super;
27 	struct mutex		bd_mutex;	/* open/close mutex */
28 	void *			bd_claiming;
29 	void *			bd_holder;
30 	int			bd_holders;
31 	bool			bd_write_holder;
32 #ifdef CONFIG_SYSFS
33 	struct list_head	bd_holder_disks;
34 #endif
35 	struct block_device *	bd_contains;
36 	u8			bd_partno;
37 	struct hd_struct *	bd_part;
38 	/* number of times partitions within this device have been opened. */
39 	unsigned		bd_part_count;
40 
41 	spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */
42 	struct gendisk *	bd_disk;
43 	struct backing_dev_info *bd_bdi;
44 
45 	/* The counter of freeze processes */
46 	int			bd_fsfreeze_count;
47 	/* Mutex for freeze */
48 	struct mutex		bd_fsfreeze_mutex;
49 } __randomize_layout;
50 
51 /*
52  * Block error status values.  See block/blk-core:blk_errors for the details.
53  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
54  */
55 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
56 typedef u32 __bitwise blk_status_t;
57 #else
58 typedef u8 __bitwise blk_status_t;
59 #endif
60 #define	BLK_STS_OK 0
61 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
62 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
63 #define BLK_STS_NOSPC		((__force blk_status_t)3)
64 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
65 #define BLK_STS_TARGET		((__force blk_status_t)5)
66 #define BLK_STS_NEXUS		((__force blk_status_t)6)
67 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
68 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
69 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
70 #define BLK_STS_IOERR		((__force blk_status_t)10)
71 
72 /* hack for device mapper, don't use elsewhere: */
73 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
74 
75 #define BLK_STS_AGAIN		((__force blk_status_t)12)
76 
77 /*
78  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
79  * device related resources are unavailable, but the driver can guarantee
80  * that the queue will be rerun in the future once resources become
81  * available again. This is typically the case for device specific
82  * resources that are consumed for IO. If the driver fails allocating these
83  * resources, we know that inflight (or pending) IO will free these
84  * resource upon completion.
85  *
86  * This is different from BLK_STS_RESOURCE in that it explicitly references
87  * a device specific resource. For resources of wider scope, allocation
88  * failure can happen without having pending IO. This means that we can't
89  * rely on request completions freeing these resources, as IO may not be in
90  * flight. Examples of that are kernel memory allocations, DMA mappings, or
91  * any other system wide resources.
92  */
93 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
94 
95 /*
96  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
97  * related resources are unavailable, but the driver can guarantee the queue
98  * will be rerun in the future once the resources become available again.
99  *
100  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
101  * a zone specific resource and IO to a different zone on the same device could
102  * still be served. Examples of that are zones that are write-locked, but a read
103  * to the same zone could be served.
104  */
105 #define BLK_STS_ZONE_RESOURCE	((__force blk_status_t)14)
106 
107 /*
108  * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
109  * path if the device returns a status indicating that too many zone resources
110  * are currently open. The same command should be successful if resubmitted
111  * after the number of open zones decreases below the device's limits, which is
112  * reported in the request_queue's max_open_zones.
113  */
114 #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)15)
115 
116 /*
117  * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
118  * path if the device returns a status indicating that too many zone resources
119  * are currently active. The same command should be successful if resubmitted
120  * after the number of active zones decreases below the device's limits, which
121  * is reported in the request_queue's max_active_zones.
122  */
123 #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)16)
124 
125 /**
126  * blk_path_error - returns true if error may be path related
127  * @error: status the request was completed with
128  *
129  * Description:
130  *     This classifies block error status into non-retryable errors and ones
131  *     that may be successful if retried on a failover path.
132  *
133  * Return:
134  *     %false - retrying failover path will not help
135  *     %true  - may succeed if retried
136  */
blk_path_error(blk_status_t error)137 static inline bool blk_path_error(blk_status_t error)
138 {
139 	switch (error) {
140 	case BLK_STS_NOTSUPP:
141 	case BLK_STS_NOSPC:
142 	case BLK_STS_TARGET:
143 	case BLK_STS_NEXUS:
144 	case BLK_STS_MEDIUM:
145 	case BLK_STS_PROTECTION:
146 		return false;
147 	}
148 
149 	/* Anything else could be a path failure, so should be retried */
150 	return true;
151 }
152 
153 /*
154  * From most significant bit:
155  * 1 bit: reserved for other usage, see below
156  * 12 bits: original size of bio
157  * 51 bits: issue time of bio
158  */
159 #define BIO_ISSUE_RES_BITS      1
160 #define BIO_ISSUE_SIZE_BITS     12
161 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
162 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
163 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
164 #define BIO_ISSUE_SIZE_MASK     \
165 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
166 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
167 
168 /* Reserved bit for blk-throtl */
169 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
170 
171 struct bio_issue {
172 	u64 value;
173 };
174 
__bio_issue_time(u64 time)175 static inline u64 __bio_issue_time(u64 time)
176 {
177 	return time & BIO_ISSUE_TIME_MASK;
178 }
179 
bio_issue_time(struct bio_issue * issue)180 static inline u64 bio_issue_time(struct bio_issue *issue)
181 {
182 	return __bio_issue_time(issue->value);
183 }
184 
bio_issue_size(struct bio_issue * issue)185 static inline sector_t bio_issue_size(struct bio_issue *issue)
186 {
187 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
188 }
189 
bio_issue_init(struct bio_issue * issue,sector_t size)190 static inline void bio_issue_init(struct bio_issue *issue,
191 				       sector_t size)
192 {
193 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
194 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
195 			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
196 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
197 }
198 
199 /*
200  * main unit of I/O for the block layer and lower layers (ie drivers and
201  * stacking drivers)
202  */
203 struct bio {
204 	struct bio		*bi_next;	/* request queue link */
205 	struct gendisk		*bi_disk;
206 	unsigned int		bi_opf;		/* bottom bits req flags,
207 						 * top bits REQ_OP. Use
208 						 * accessors.
209 						 */
210 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
211 	unsigned short		bi_ioprio;
212 	unsigned short		bi_write_hint;
213 	blk_status_t		bi_status;
214 	u8			bi_partno;
215 	atomic_t		__bi_remaining;
216 
217 	struct bvec_iter	bi_iter;
218 
219 	bio_end_io_t		*bi_end_io;
220 
221 	void			*bi_private;
222 #ifdef CONFIG_BLK_CGROUP
223 	/*
224 	 * Represents the association of the css and request_queue for the bio.
225 	 * If a bio goes direct to device, it will not have a blkg as it will
226 	 * not have a request_queue associated with it.  The reference is put
227 	 * on release of the bio.
228 	 */
229 	struct blkcg_gq		*bi_blkg;
230 	struct bio_issue	bi_issue;
231 #ifdef CONFIG_BLK_CGROUP_IOCOST
232 	u64			bi_iocost_cost;
233 #endif
234 #endif
235 
236 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
237 	struct bio_crypt_ctx	*bi_crypt_context;
238 #endif
239 
240 	union {
241 #if defined(CONFIG_BLK_DEV_INTEGRITY)
242 		struct bio_integrity_payload *bi_integrity; /* data integrity */
243 #endif
244 	};
245 
246 	unsigned short		bi_vcnt;	/* how many bio_vec's */
247 
248 	/*
249 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
250 	 */
251 
252 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
253 
254 	atomic_t		__bi_cnt;	/* pin count */
255 
256 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
257 
258 	struct bio_set		*bi_pool;
259 
260 	/*
261 	 * We can inline a number of vecs at the end of the bio, to avoid
262 	 * double allocations for a small number of bio_vecs. This member
263 	 * MUST obviously be kept at the very end of the bio.
264 	 */
265 	struct bio_vec		bi_inline_vecs[];
266 };
267 
268 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
269 
270 /*
271  * bio flags
272  */
273 enum {
274 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
275 	BIO_CLONED,		/* doesn't own data */
276 	BIO_BOUNCED,		/* bio is a bounce bio */
277 	BIO_WORKINGSET,		/* contains userspace workingset pages */
278 	BIO_QUIET,		/* Make BIO Quiet */
279 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
280 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
281 	BIO_THROTTLED,		/* This bio has already been subjected to
282 				 * throttling rules. Don't do it again. */
283 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
284 				 * of this bio. */
285 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
286 	BIO_TRACKED,		/* set if bio goes through the rq_qos path */
287 	BIO_FLAG_LAST
288 };
289 
290 /* See BVEC_POOL_OFFSET below before adding new flags */
291 
292 /*
293  * We support 6 different bvec pools, the last one is magic in that it
294  * is backed by a mempool.
295  */
296 #define BVEC_POOL_NR		6
297 #define BVEC_POOL_MAX		(BVEC_POOL_NR - 1)
298 
299 /*
300  * Top 3 bits of bio flags indicate the pool the bvecs came from.  We add
301  * 1 to the actual index so that 0 indicates that there are no bvecs to be
302  * freed.
303  */
304 #define BVEC_POOL_BITS		(3)
305 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
306 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
307 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
308 # error "BVEC_POOL_BITS is too small"
309 #endif
310 
311 /*
312  * Flags starting here get preserved by bio_reset() - this includes
313  * only BVEC_POOL_IDX()
314  */
315 #define BIO_RESET_BITS	BVEC_POOL_OFFSET
316 
317 typedef __u32 __bitwise blk_mq_req_flags_t;
318 
319 /*
320  * Operations and flags common to the bio and request structures.
321  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
322  *
323  * The least significant bit of the operation number indicates the data
324  * transfer direction:
325  *
326  *   - if the least significant bit is set transfers are TO the device
327  *   - if the least significant bit is not set transfers are FROM the device
328  *
329  * If a operation does not transfer data the least significant bit has no
330  * meaning.
331  */
332 #define REQ_OP_BITS	8
333 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
334 #define REQ_FLAG_BITS	24
335 
336 enum req_opf {
337 	/* read sectors from the device */
338 	REQ_OP_READ		= 0,
339 	/* write sectors to the device */
340 	REQ_OP_WRITE		= 1,
341 	/* flush the volatile write cache */
342 	REQ_OP_FLUSH		= 2,
343 	/* discard sectors */
344 	REQ_OP_DISCARD		= 3,
345 	/* securely erase sectors */
346 	REQ_OP_SECURE_ERASE	= 5,
347 	/* write the same sector many times */
348 	REQ_OP_WRITE_SAME	= 7,
349 	/* write the zero filled sector many times */
350 	REQ_OP_WRITE_ZEROES	= 9,
351 	/* Open a zone */
352 	REQ_OP_ZONE_OPEN	= 10,
353 	/* Close a zone */
354 	REQ_OP_ZONE_CLOSE	= 11,
355 	/* Transition a zone to full */
356 	REQ_OP_ZONE_FINISH	= 12,
357 	/* write data at the current zone write pointer */
358 	REQ_OP_ZONE_APPEND	= 13,
359 	/* reset a zone write pointer */
360 	REQ_OP_ZONE_RESET	= 15,
361 	/* reset all the zone present on the device */
362 	REQ_OP_ZONE_RESET_ALL	= 17,
363 
364 	/* SCSI passthrough using struct scsi_request */
365 	REQ_OP_SCSI_IN		= 32,
366 	REQ_OP_SCSI_OUT		= 33,
367 	/* Driver private requests */
368 	REQ_OP_DRV_IN		= 34,
369 	REQ_OP_DRV_OUT		= 35,
370 
371 	REQ_OP_LAST,
372 };
373 
374 enum req_flag_bits {
375 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
376 		REQ_OP_BITS,
377 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
378 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
379 	__REQ_SYNC,		/* request is sync (sync write or read) */
380 	__REQ_META,		/* metadata io request */
381 	__REQ_PRIO,		/* boost priority in cfq */
382 	__REQ_NOMERGE,		/* don't touch this for merging */
383 	__REQ_IDLE,		/* anticipate more IO after this one */
384 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
385 	__REQ_FUA,		/* forced unit access */
386 	__REQ_PREFLUSH,		/* request for cache flush */
387 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
388 	__REQ_BACKGROUND,	/* background IO */
389 	__REQ_NOWAIT,           /* Don't wait if request will block */
390 	/*
391 	 * When a shared kthread needs to issue a bio for a cgroup, doing
392 	 * so synchronously can lead to priority inversions as the kthread
393 	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
394 	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
395 	 * work item to avoid such priority inversions.
396 	 */
397 	__REQ_CGROUP_PUNT,
398 
399 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
400 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
401 
402 	__REQ_HIPRI,
403 
404 	/* for driver use */
405 	__REQ_DRV,
406 	__REQ_SWAP,		/* swapping request. */
407 	__REQ_NR_BITS,		/* stops here */
408 };
409 
410 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
411 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
412 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
413 #define REQ_SYNC		(1ULL << __REQ_SYNC)
414 #define REQ_META		(1ULL << __REQ_META)
415 #define REQ_PRIO		(1ULL << __REQ_PRIO)
416 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
417 #define REQ_IDLE		(1ULL << __REQ_IDLE)
418 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
419 #define REQ_FUA			(1ULL << __REQ_FUA)
420 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
421 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
422 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
423 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
424 #define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)
425 
426 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
427 #define REQ_HIPRI		(1ULL << __REQ_HIPRI)
428 
429 #define REQ_DRV			(1ULL << __REQ_DRV)
430 #define REQ_SWAP		(1ULL << __REQ_SWAP)
431 
432 #define REQ_FAILFAST_MASK \
433 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
434 
435 #define REQ_NOMERGE_FLAGS \
436 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
437 
438 enum stat_group {
439 	STAT_READ,
440 	STAT_WRITE,
441 	STAT_DISCARD,
442 	STAT_FLUSH,
443 
444 	NR_STAT_GROUPS
445 };
446 
447 #define bio_op(bio) \
448 	((bio)->bi_opf & REQ_OP_MASK)
449 #define req_op(req) \
450 	((req)->cmd_flags & REQ_OP_MASK)
451 
452 /* obsolete, don't use in new code */
bio_set_op_attrs(struct bio * bio,unsigned op,unsigned op_flags)453 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
454 		unsigned op_flags)
455 {
456 	bio->bi_opf = op | op_flags;
457 }
458 
op_is_write(unsigned int op)459 static inline bool op_is_write(unsigned int op)
460 {
461 	return (op & 1);
462 }
463 
464 /*
465  * Check if the bio or request is one that needs special treatment in the
466  * flush state machine.
467  */
op_is_flush(unsigned int op)468 static inline bool op_is_flush(unsigned int op)
469 {
470 	return op & (REQ_FUA | REQ_PREFLUSH);
471 }
472 
473 /*
474  * Reads are always treated as synchronous, as are requests with the FUA or
475  * PREFLUSH flag.  Other operations may be marked as synchronous using the
476  * REQ_SYNC flag.
477  */
op_is_sync(unsigned int op)478 static inline bool op_is_sync(unsigned int op)
479 {
480 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
481 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
482 }
483 
op_is_discard(unsigned int op)484 static inline bool op_is_discard(unsigned int op)
485 {
486 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
487 }
488 
489 /*
490  * Check if a bio or request operation is a zone management operation, with
491  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
492  * due to its different handling in the block layer and device response in
493  * case of command failure.
494  */
op_is_zone_mgmt(enum req_opf op)495 static inline bool op_is_zone_mgmt(enum req_opf op)
496 {
497 	switch (op & REQ_OP_MASK) {
498 	case REQ_OP_ZONE_RESET:
499 	case REQ_OP_ZONE_OPEN:
500 	case REQ_OP_ZONE_CLOSE:
501 	case REQ_OP_ZONE_FINISH:
502 		return true;
503 	default:
504 		return false;
505 	}
506 }
507 
op_stat_group(unsigned int op)508 static inline int op_stat_group(unsigned int op)
509 {
510 	if (op_is_discard(op))
511 		return STAT_DISCARD;
512 	return op_is_write(op);
513 }
514 
515 typedef unsigned int blk_qc_t;
516 #define BLK_QC_T_NONE		-1U
517 #define BLK_QC_T_SHIFT		16
518 #define BLK_QC_T_INTERNAL	(1U << 31)
519 
blk_qc_t_valid(blk_qc_t cookie)520 static inline bool blk_qc_t_valid(blk_qc_t cookie)
521 {
522 	return cookie != BLK_QC_T_NONE;
523 }
524 
blk_qc_t_to_queue_num(blk_qc_t cookie)525 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
526 {
527 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
528 }
529 
blk_qc_t_to_tag(blk_qc_t cookie)530 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
531 {
532 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
533 }
534 
blk_qc_t_is_internal(blk_qc_t cookie)535 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
536 {
537 	return (cookie & BLK_QC_T_INTERNAL) != 0;
538 }
539 
540 struct blk_rq_stat {
541 	u64 mean;
542 	u64 min;
543 	u64 max;
544 	u32 nr_samples;
545 	u64 batch;
546 };
547 
548 #endif /* __LINUX_BLK_TYPES_H */
549