• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/device.h>
12 #include <linux/ktime.h>
13 #include <linux/android_kabi.h>
14 
15 struct bio_set;
16 struct bio;
17 struct bio_integrity_payload;
18 struct page;
19 struct io_context;
20 struct cgroup_subsys_state;
21 typedef void (bio_end_io_t) (struct bio *);
22 struct bio_crypt_ctx;
23 
24 struct block_device {
25 	sector_t		bd_start_sect;
26 	struct disk_stats __percpu *bd_stats;
27 	unsigned long		bd_stamp;
28 	bool			bd_read_only;	/* read-only policy */
29 	dev_t			bd_dev;
30 	int			bd_openers;
31 	struct inode *		bd_inode;	/* will die */
32 	struct super_block *	bd_super;
33 	void *			bd_claiming;
34 	struct device		bd_device;
35 	void *			bd_holder;
36 	int			bd_holders;
37 	bool			bd_write_holder;
38 	struct kobject		*bd_holder_dir;
39 	u8			bd_partno;
40 	spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */
41 	struct gendisk *	bd_disk;
42 
43 	/* The counter of freeze processes */
44 	int			bd_fsfreeze_count;
45 	/* Mutex for freeze */
46 	struct mutex		bd_fsfreeze_mutex;
47 	struct super_block	*bd_fsfreeze_sb;
48 
49 	struct partition_meta_info *bd_meta_info;
50 #ifdef CONFIG_FAIL_MAKE_REQUEST
51 	bool			bd_make_it_fail;
52 #endif
53 
54 	ANDROID_KABI_RESERVE(1);
55 	ANDROID_KABI_RESERVE(2);
56 	ANDROID_KABI_RESERVE(3);
57 	ANDROID_KABI_RESERVE(4);
58 } __randomize_layout;
59 
60 #define bdev_whole(_bdev) \
61 	((_bdev)->bd_disk->part0)
62 
63 #define dev_to_bdev(device) \
64 	container_of((device), struct block_device, bd_device)
65 
66 #define bdev_kobj(_bdev) \
67 	(&((_bdev)->bd_device.kobj))
68 
69 /*
70  * Block error status values.  See block/blk-core:blk_errors for the details.
71  * Alpha cannot write a byte atomically, so we need to use 32-bit value.
72  */
73 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
74 typedef u32 __bitwise blk_status_t;
75 #else
76 typedef u8 __bitwise blk_status_t;
77 #endif
78 #define	BLK_STS_OK 0
79 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
80 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
81 #define BLK_STS_NOSPC		((__force blk_status_t)3)
82 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
83 #define BLK_STS_TARGET		((__force blk_status_t)5)
84 #define BLK_STS_NEXUS		((__force blk_status_t)6)
85 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
86 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
87 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
88 #define BLK_STS_IOERR		((__force blk_status_t)10)
89 
90 /* hack for device mapper, don't use elsewhere: */
91 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
92 
93 #define BLK_STS_AGAIN		((__force blk_status_t)12)
94 
95 /*
96  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
97  * device related resources are unavailable, but the driver can guarantee
98  * that the queue will be rerun in the future once resources become
99  * available again. This is typically the case for device specific
100  * resources that are consumed for IO. If the driver fails allocating these
101  * resources, we know that inflight (or pending) IO will free these
102  * resource upon completion.
103  *
104  * This is different from BLK_STS_RESOURCE in that it explicitly references
105  * a device specific resource. For resources of wider scope, allocation
106  * failure can happen without having pending IO. This means that we can't
107  * rely on request completions freeing these resources, as IO may not be in
108  * flight. Examples of that are kernel memory allocations, DMA mappings, or
109  * any other system wide resources.
110  */
111 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
112 
113 /*
114  * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
115  * related resources are unavailable, but the driver can guarantee the queue
116  * will be rerun in the future once the resources become available again.
117  *
118  * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
119  * a zone specific resource and IO to a different zone on the same device could
120  * still be served. Examples of that are zones that are write-locked, but a read
121  * to the same zone could be served.
122  */
123 #define BLK_STS_ZONE_RESOURCE	((__force blk_status_t)14)
124 
125 /*
126  * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
127  * path if the device returns a status indicating that too many zone resources
128  * are currently open. The same command should be successful if resubmitted
129  * after the number of open zones decreases below the device's limits, which is
130  * reported in the request_queue's max_open_zones.
131  */
132 #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)15)
133 
134 /*
135  * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
136  * path if the device returns a status indicating that too many zone resources
137  * are currently active. The same command should be successful if resubmitted
138  * after the number of active zones decreases below the device's limits, which
139  * is reported in the request_queue's max_active_zones.
140  */
141 #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)16)
142 
143 /**
144  * blk_path_error - returns true if error may be path related
145  * @error: status the request was completed with
146  *
147  * Description:
148  *     This classifies block error status into non-retryable errors and ones
149  *     that may be successful if retried on a failover path.
150  *
151  * Return:
152  *     %false - retrying failover path will not help
153  *     %true  - may succeed if retried
154  */
blk_path_error(blk_status_t error)155 static inline bool blk_path_error(blk_status_t error)
156 {
157 	switch (error) {
158 	case BLK_STS_NOTSUPP:
159 	case BLK_STS_NOSPC:
160 	case BLK_STS_TARGET:
161 	case BLK_STS_NEXUS:
162 	case BLK_STS_MEDIUM:
163 	case BLK_STS_PROTECTION:
164 		return false;
165 	}
166 
167 	/* Anything else could be a path failure, so should be retried */
168 	return true;
169 }
170 
171 /*
172  * From most significant bit:
173  * 1 bit: reserved for other usage, see below
174  * 12 bits: original size of bio
175  * 51 bits: issue time of bio
176  */
177 #define BIO_ISSUE_RES_BITS      1
178 #define BIO_ISSUE_SIZE_BITS     12
179 #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
180 #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
181 #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
182 #define BIO_ISSUE_SIZE_MASK     \
183 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
184 #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
185 
186 /* Reserved bit for blk-throtl */
187 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
188 
189 struct bio_issue {
190 	u64 value;
191 };
192 
__bio_issue_time(u64 time)193 static inline u64 __bio_issue_time(u64 time)
194 {
195 	return time & BIO_ISSUE_TIME_MASK;
196 }
197 
bio_issue_time(struct bio_issue * issue)198 static inline u64 bio_issue_time(struct bio_issue *issue)
199 {
200 	return __bio_issue_time(issue->value);
201 }
202 
bio_issue_size(struct bio_issue * issue)203 static inline sector_t bio_issue_size(struct bio_issue *issue)
204 {
205 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
206 }
207 
bio_issue_init(struct bio_issue * issue,sector_t size)208 static inline void bio_issue_init(struct bio_issue *issue,
209 				       sector_t size)
210 {
211 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
212 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
213 			(ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
214 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
215 }
216 
217 /*
218  * main unit of I/O for the block layer and lower layers (ie drivers and
219  * stacking drivers)
220  */
221 struct bio {
222 	struct bio		*bi_next;	/* request queue link */
223 	struct block_device	*bi_bdev;
224 	unsigned int		bi_opf;		/* bottom bits REQ_OP, top bits
225 						 * req_flags.
226 						 */
227 	unsigned short		bi_flags;	/* BIO_* below */
228 	unsigned short		bi_ioprio;
229 	unsigned short		bi_write_hint;
230 	blk_status_t		bi_status;
231 	atomic_t		__bi_remaining;
232 
233 	struct bvec_iter	bi_iter;
234 
235 	bio_end_io_t		*bi_end_io;
236 
237 	void			*bi_private;
238 #ifdef CONFIG_BLK_CGROUP
239 	/*
240 	 * Represents the association of the css and request_queue for the bio.
241 	 * If a bio goes direct to device, it will not have a blkg as it will
242 	 * not have a request_queue associated with it.  The reference is put
243 	 * on release of the bio.
244 	 */
245 	struct blkcg_gq		*bi_blkg;
246 	struct bio_issue	bi_issue;
247 #ifdef CONFIG_BLK_CGROUP_IOCOST
248 	u64			bi_iocost_cost;
249 #endif
250 #endif
251 
252 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
253 	struct bio_crypt_ctx	*bi_crypt_context;
254 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
255 	bool			bi_skip_dm_default_key;
256 #endif
257 #endif
258 
259 	union {
260 #if defined(CONFIG_BLK_DEV_INTEGRITY)
261 		struct bio_integrity_payload *bi_integrity; /* data integrity */
262 #endif
263 	};
264 
265 	unsigned short		bi_vcnt;	/* how many bio_vec's */
266 
267 	/*
268 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
269 	 */
270 
271 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
272 
273 	atomic_t		__bi_cnt;	/* pin count */
274 
275 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
276 
277 	struct bio_set		*bi_pool;
278 
279 	ANDROID_OEM_DATA(1);
280 	ANDROID_KABI_RESERVE(1);
281 	ANDROID_KABI_RESERVE(2);
282 
283 	/*
284 	 * We can inline a number of vecs at the end of the bio, to avoid
285 	 * double allocations for a small number of bio_vecs. This member
286 	 * MUST obviously be kept at the very end of the bio.
287 	 */
288 	struct bio_vec		bi_inline_vecs[];
289 };
290 
291 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
292 #define BIO_MAX_SECTORS		(UINT_MAX >> SECTOR_SHIFT)
293 
294 /*
295  * bio flags
296  */
297 enum {
298 	BIO_NO_PAGE_REF,	/* don't put release vec pages */
299 	BIO_CLONED,		/* doesn't own data */
300 	BIO_BOUNCED,		/* bio is a bounce bio */
301 	BIO_WORKINGSET,		/* contains userspace workingset pages */
302 	BIO_QUIET,		/* Make BIO Quiet */
303 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
304 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
305 	BIO_THROTTLED,		/* This bio has already been subjected to
306 				 * throttling rules. Don't do it again. */
307 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
308 				 * of this bio. */
309 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
310 	BIO_QOS_THROTTLED,	/* bio went through rq_qos throttle path */
311 	BIO_QOS_MERGED,		/* but went through rq_qos merge path */
312 	BIO_REMAPPED,
313 	BIO_ZONE_WRITE_LOCKED,	/* Owns a zoned device zone write lock */
314 	BIO_PERCPU_CACHE,	/* can participate in per-cpu alloc cache */
315 	BIO_FLAG_LAST
316 };
317 
318 typedef __u32 __bitwise blk_mq_req_flags_t;
319 
320 /*
321  * Operations and flags common to the bio and request structures.
322  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
323  *
324  * The least significant bit of the operation number indicates the data
325  * transfer direction:
326  *
327  *   - if the least significant bit is set transfers are TO the device
328  *   - if the least significant bit is not set transfers are FROM the device
329  *
330  * If a operation does not transfer data the least significant bit has no
331  * meaning.
332  */
333 #define REQ_OP_BITS	8
334 #define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
335 #define REQ_FLAG_BITS	24
336 
337 enum req_opf {
338 	/* read sectors from the device */
339 	REQ_OP_READ		= 0,
340 	/* write sectors to the device */
341 	REQ_OP_WRITE		= 1,
342 	/* flush the volatile write cache */
343 	REQ_OP_FLUSH		= 2,
344 	/* discard sectors */
345 	REQ_OP_DISCARD		= 3,
346 	/* securely erase sectors */
347 	REQ_OP_SECURE_ERASE	= 5,
348 	/* write the same sector many times */
349 	REQ_OP_WRITE_SAME	= 7,
350 	/* write the zero filled sector many times */
351 	REQ_OP_WRITE_ZEROES	= 9,
352 	/* Open a zone */
353 	REQ_OP_ZONE_OPEN	= 10,
354 	/* Close a zone */
355 	REQ_OP_ZONE_CLOSE	= 11,
356 	/* Transition a zone to full */
357 	REQ_OP_ZONE_FINISH	= 12,
358 	/* write data at the current zone write pointer */
359 	REQ_OP_ZONE_APPEND	= 13,
360 	/* reset a zone write pointer */
361 	REQ_OP_ZONE_RESET	= 15,
362 	/* reset all the zone present on the device */
363 	REQ_OP_ZONE_RESET_ALL	= 17,
364 
365 	/* Driver private requests */
366 	REQ_OP_DRV_IN		= 34,
367 	REQ_OP_DRV_OUT		= 35,
368 
369 	REQ_OP_LAST,
370 };
371 
372 enum req_flag_bits {
373 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
374 		REQ_OP_BITS,
375 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
376 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
377 	__REQ_SYNC,		/* request is sync (sync write or read) */
378 	__REQ_META,		/* metadata io request */
379 	__REQ_PRIO,		/* boost priority in cfq */
380 	__REQ_NOMERGE,		/* don't touch this for merging */
381 	__REQ_IDLE,		/* anticipate more IO after this one */
382 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
383 	__REQ_FUA,		/* forced unit access */
384 	__REQ_PREFLUSH,		/* request for cache flush */
385 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
386 	__REQ_BACKGROUND,	/* background IO */
387 	__REQ_NOWAIT,           /* Don't wait if request will block */
388 	/*
389 	 * When a shared kthread needs to issue a bio for a cgroup, doing
390 	 * so synchronously can lead to priority inversions as the kthread
391 	 * can be trapped waiting for that cgroup.  CGROUP_PUNT flag makes
392 	 * submit_bio() punt the actual issuing to a dedicated per-blkcg
393 	 * work item to avoid such priority inversions.
394 	 */
395 	__REQ_CGROUP_PUNT,
396 
397 	/* command specific flags for REQ_OP_WRITE_ZEROES: */
398 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
399 
400 	__REQ_HIPRI,
401 
402 	/* for driver use */
403 	__REQ_DRV,
404 	__REQ_SWAP,		/* swapping request. */
405 	__REQ_NR_BITS,		/* stops here */
406 };
407 
408 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
409 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
410 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
411 #define REQ_SYNC		(1ULL << __REQ_SYNC)
412 #define REQ_META		(1ULL << __REQ_META)
413 #define REQ_PRIO		(1ULL << __REQ_PRIO)
414 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
415 #define REQ_IDLE		(1ULL << __REQ_IDLE)
416 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
417 #define REQ_FUA			(1ULL << __REQ_FUA)
418 #define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
419 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
420 #define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
421 #define REQ_NOWAIT		(1ULL << __REQ_NOWAIT)
422 #define REQ_CGROUP_PUNT		(1ULL << __REQ_CGROUP_PUNT)
423 
424 #define REQ_NOUNMAP		(1ULL << __REQ_NOUNMAP)
425 #define REQ_HIPRI		(1ULL << __REQ_HIPRI)
426 
427 #define REQ_DRV			(1ULL << __REQ_DRV)
428 #define REQ_SWAP		(1ULL << __REQ_SWAP)
429 
430 #define REQ_FAILFAST_MASK \
431 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
432 
433 #define REQ_NOMERGE_FLAGS \
434 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
435 
436 enum stat_group {
437 	STAT_READ,
438 	STAT_WRITE,
439 	STAT_DISCARD,
440 	STAT_FLUSH,
441 
442 	NR_STAT_GROUPS
443 };
444 
445 #define bio_op(bio) \
446 	((bio)->bi_opf & REQ_OP_MASK)
447 #define req_op(req) \
448 	((req)->cmd_flags & REQ_OP_MASK)
449 
450 /* obsolete, don't use in new code */
bio_set_op_attrs(struct bio * bio,unsigned op,unsigned op_flags)451 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
452 		unsigned op_flags)
453 {
454 	bio->bi_opf = op | op_flags;
455 }
456 
op_is_write(unsigned int op)457 static inline bool op_is_write(unsigned int op)
458 {
459 	return (op & 1);
460 }
461 
462 /*
463  * Check if the bio or request is one that needs special treatment in the
464  * flush state machine.
465  */
op_is_flush(unsigned int op)466 static inline bool op_is_flush(unsigned int op)
467 {
468 	return op & (REQ_FUA | REQ_PREFLUSH);
469 }
470 
471 /*
472  * Reads are always treated as synchronous, as are requests with the FUA or
473  * PREFLUSH flag.  Other operations may be marked as synchronous using the
474  * REQ_SYNC flag.
475  */
op_is_sync(unsigned int op)476 static inline bool op_is_sync(unsigned int op)
477 {
478 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
479 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
480 }
481 
op_is_discard(unsigned int op)482 static inline bool op_is_discard(unsigned int op)
483 {
484 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
485 }
486 
487 /*
488  * Check if a bio or request operation is a zone management operation, with
489  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
490  * due to its different handling in the block layer and device response in
491  * case of command failure.
492  */
op_is_zone_mgmt(enum req_opf op)493 static inline bool op_is_zone_mgmt(enum req_opf op)
494 {
495 	switch (op & REQ_OP_MASK) {
496 	case REQ_OP_ZONE_RESET:
497 	case REQ_OP_ZONE_OPEN:
498 	case REQ_OP_ZONE_CLOSE:
499 	case REQ_OP_ZONE_FINISH:
500 		return true;
501 	default:
502 		return false;
503 	}
504 }
505 
op_stat_group(unsigned int op)506 static inline int op_stat_group(unsigned int op)
507 {
508 	if (op_is_discard(op))
509 		return STAT_DISCARD;
510 	return op_is_write(op);
511 }
512 
513 typedef unsigned int blk_qc_t;
514 #define BLK_QC_T_NONE		-1U
515 #define BLK_QC_T_SHIFT		16
516 #define BLK_QC_T_INTERNAL	(1U << 31)
517 
blk_qc_t_valid(blk_qc_t cookie)518 static inline bool blk_qc_t_valid(blk_qc_t cookie)
519 {
520 	return cookie != BLK_QC_T_NONE;
521 }
522 
blk_qc_t_to_queue_num(blk_qc_t cookie)523 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
524 {
525 	return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
526 }
527 
blk_qc_t_to_tag(blk_qc_t cookie)528 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
529 {
530 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
531 }
532 
blk_qc_t_is_internal(blk_qc_t cookie)533 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
534 {
535 	return (cookie & BLK_QC_T_INTERNAL) != 0;
536 }
537 
538 struct blk_rq_stat {
539 	u64 mean;
540 	u64 min;
541 	u64 max;
542 	u32 nr_samples;
543 	u64 batch;
544 };
545 
546 #endif /* __LINUX_BLK_TYPES_H */
547