1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Block data types and constants.  Directly include this file only to
4  * break include dependency loop.
5  */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8 
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/device.h>
12 #include <linux/ktime.h>
13 #include <linux/rw_hint.h>
14 #include <linux/android_kabi.h>
15 
16 struct bio_set;
17 struct bio;
18 struct bio_integrity_payload;
19 struct page;
20 struct io_context;
21 struct cgroup_subsys_state;
22 typedef void (bio_end_io_t) (struct bio *);
23 struct bio_crypt_ctx;
24 
25 /*
26  * The basic unit of block I/O is a sector. It is used in a number of contexts
27  * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
28  * bytes. Variables of type sector_t represent an offset or size that is a
29  * multiple of 512 bytes. Hence these two constants.
30  */
31 #ifndef SECTOR_SHIFT
32 #define SECTOR_SHIFT 9
33 #endif
34 #ifndef SECTOR_SIZE
35 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
36 #endif
37 
38 #define PAGE_SECTORS_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
39 #define PAGE_SECTORS		(1 << PAGE_SECTORS_SHIFT)
40 #define SECTOR_MASK		(PAGE_SECTORS - 1)
41 
42 struct block_device {
43 	sector_t		bd_start_sect;
44 	sector_t		bd_nr_sectors;
45 	struct gendisk *	bd_disk;
46 	struct request_queue *	bd_queue;
47 	struct disk_stats __percpu *bd_stats;
48 	unsigned long		bd_stamp;
49 	atomic_t		__bd_flags;	// partition number + flags
50 #define BD_PARTNO		255	// lower 8 bits; assign-once
51 #define BD_READ_ONLY		(1u<<8) // read-only policy
52 #define BD_WRITE_HOLDER		(1u<<9)
53 #define BD_HAS_SUBMIT_BIO	(1u<<10)
54 #define BD_RO_WARNED		(1u<<11)
55 #ifdef CONFIG_FAIL_MAKE_REQUEST
56 #define BD_MAKE_IT_FAIL		(1u<<12)
57 #endif
58 	dev_t			bd_dev;
59 	struct address_space	*bd_mapping;	/* page cache */
60 
61 	atomic_t		bd_openers;
62 	spinlock_t		bd_size_lock; /* for bd_inode->i_size updates */
63 	void *			bd_claiming;
64 	void *			bd_holder;
65 	const struct blk_holder_ops *bd_holder_ops;
66 	struct mutex		bd_holder_lock;
67 	int			bd_holders;
68 	struct kobject		*bd_holder_dir;
69 
70 	atomic_t		bd_fsfreeze_count; /* number of freeze requests */
71 	struct mutex		bd_fsfreeze_mutex; /* serialize freeze/thaw */
72 
73 	struct partition_meta_info *bd_meta_info;
74 	int			bd_writers;
75 #ifdef CONFIG_SECURITY
76 	void			*bd_security;
77 #endif
78 	/*
79 	 * keep this out-of-line as it's both big and not needed in the fast
80 	 * path
81 	 */
82 	struct device		bd_device;
83 } __randomize_layout;
84 
85 #define bdev_whole(_bdev) \
86 	((_bdev)->bd_disk->part0)
87 
88 #define dev_to_bdev(device) \
89 	container_of((device), struct block_device, bd_device)
90 
91 #define bdev_kobj(_bdev) \
92 	(&((_bdev)->bd_device.kobj))
93 
94 /*
95  * Block error status values.  See block/blk-core:blk_errors for the details.
96  */
97 typedef u8 __bitwise blk_status_t;
98 typedef u16 blk_short_t;
99 #define	BLK_STS_OK 0
100 #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
101 #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
102 #define BLK_STS_NOSPC		((__force blk_status_t)3)
103 #define BLK_STS_TRANSPORT	((__force blk_status_t)4)
104 #define BLK_STS_TARGET		((__force blk_status_t)5)
105 #define BLK_STS_RESV_CONFLICT	((__force blk_status_t)6)
106 #define BLK_STS_MEDIUM		((__force blk_status_t)7)
107 #define BLK_STS_PROTECTION	((__force blk_status_t)8)
108 #define BLK_STS_RESOURCE	((__force blk_status_t)9)
109 #define BLK_STS_IOERR		((__force blk_status_t)10)
110 
111 /* hack for device mapper, don't use elsewhere: */
112 #define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
113 
114 /*
115  * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
116  * and the bio would block (cf bio_wouldblock_error())
117  */
118 #define BLK_STS_AGAIN		((__force blk_status_t)12)
119 
120 /*
121  * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
122  * device related resources are unavailable, but the driver can guarantee
123  * that the queue will be rerun in the future once resources become
124  * available again. This is typically the case for device specific
125  * resources that are consumed for IO. If the driver fails allocating these
126  * resources, we know that inflight (or pending) IO will free these
127  * resource upon completion.
128  *
129  * This is different from BLK_STS_RESOURCE in that it explicitly references
130  * a device specific resource. For resources of wider scope, allocation
131  * failure can happen without having pending IO. This means that we can't
132  * rely on request completions freeing these resources, as IO may not be in
133  * flight. Examples of that are kernel memory allocations, DMA mappings, or
134  * any other system wide resources.
135  */
136 #define BLK_STS_DEV_RESOURCE	((__force blk_status_t)13)
137 
138 /*
139  * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
140  * path if the device returns a status indicating that too many zone resources
141  * are currently open. The same command should be successful if resubmitted
142  * after the number of open zones decreases below the device's limits, which is
143  * reported in the request_queue's max_open_zones.
144  */
145 #define BLK_STS_ZONE_OPEN_RESOURCE	((__force blk_status_t)14)
146 
147 /*
148  * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
149  * path if the device returns a status indicating that too many zone resources
150  * are currently active. The same command should be successful if resubmitted
151  * after the number of active zones decreases below the device's limits, which
152  * is reported in the request_queue's max_active_zones.
153  */
154 #define BLK_STS_ZONE_ACTIVE_RESOURCE	((__force blk_status_t)15)
155 
156 /*
157  * BLK_STS_OFFLINE is returned from the driver when the target device is offline
158  * or is being taken offline. This could help differentiate the case where a
159  * device is intentionally being shut down from a real I/O error.
160  */
161 #define BLK_STS_OFFLINE		((__force blk_status_t)16)
162 
163 /*
164  * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
165  * aborted the command because it exceeded one of its Command Duration Limits.
166  */
167 #define BLK_STS_DURATION_LIMIT	((__force blk_status_t)17)
168 
169 /*
170  * Invalid size or alignment.
171  */
172 #define BLK_STS_INVAL	((__force blk_status_t)19)
173 
174 /**
175  * blk_path_error - returns true if error may be path related
176  * @error: status the request was completed with
177  *
178  * Description:
179  *     This classifies block error status into non-retryable errors and ones
180  *     that may be successful if retried on a failover path.
181  *
182  * Return:
183  *     %false - retrying failover path will not help
184  *     %true  - may succeed if retried
185  */
blk_path_error(blk_status_t error)186 static inline bool blk_path_error(blk_status_t error)
187 {
188 	switch (error) {
189 	case BLK_STS_NOTSUPP:
190 	case BLK_STS_NOSPC:
191 	case BLK_STS_TARGET:
192 	case BLK_STS_RESV_CONFLICT:
193 	case BLK_STS_MEDIUM:
194 	case BLK_STS_PROTECTION:
195 		return false;
196 	}
197 
198 	/* Anything else could be a path failure, so should be retried */
199 	return true;
200 }
201 
202 struct bio_issue {
203 	u64 value;
204 };
205 
206 typedef __u32 __bitwise blk_opf_t;
207 
208 typedef unsigned int blk_qc_t;
209 #define BLK_QC_T_NONE		-1U
210 
211 /*
212  * main unit of I/O for the block layer and lower layers (ie drivers and
213  * stacking drivers)
214  */
215 struct bio {
216 	struct bio		*bi_next;	/* request queue link */
217 	struct block_device	*bi_bdev;
218 	blk_opf_t		bi_opf;		/* bottom bits REQ_OP, top bits
219 						 * req_flags.
220 						 */
221 	unsigned short		bi_flags;	/* BIO_* below */
222 	unsigned short		bi_ioprio;
223 	enum rw_hint		bi_write_hint;
224 	blk_status_t		bi_status;
225 	atomic_t		__bi_remaining;
226 
227 	struct bvec_iter	bi_iter;
228 
229 	union {
230 		/* for polled bios: */
231 		blk_qc_t		bi_cookie;
232 		/* for plugged zoned writes only: */
233 		unsigned int		__bi_nr_segments;
234 	};
235 	bio_end_io_t		*bi_end_io;
236 	void			*bi_private;
237 #ifdef CONFIG_BLK_CGROUP
238 	/*
239 	 * Represents the association of the css and request_queue for the bio.
240 	 * If a bio goes direct to device, it will not have a blkg as it will
241 	 * not have a request_queue associated with it.  The reference is put
242 	 * on release of the bio.
243 	 */
244 	struct blkcg_gq		*bi_blkg;
245 	struct bio_issue	bi_issue;
246 #ifdef CONFIG_BLK_CGROUP_IOCOST
247 	u64			bi_iocost_cost;
248 #endif
249 #endif
250 
251 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
252 	struct bio_crypt_ctx	*bi_crypt_context;
253 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
254 	bool			bi_skip_dm_default_key;
255 #endif
256 #endif
257 
258 #if defined(CONFIG_BLK_DEV_INTEGRITY)
259 	struct bio_integrity_payload *bi_integrity; /* data integrity */
260 #endif
261 
262 	unsigned short		bi_vcnt;	/* how many bio_vec's */
263 
264 	/*
265 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
266 	 */
267 
268 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
269 
270 	atomic_t		__bi_cnt;	/* pin count */
271 
272 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
273 
274 	struct bio_set		*bi_pool;
275 
276 	ANDROID_OEM_DATA(1);
277 	ANDROID_KABI_RESERVE(1);
278 	ANDROID_KABI_RESERVE(2);
279 
280 	/*
281 	 * We can inline a number of vecs at the end of the bio, to avoid
282 	 * double allocations for a small number of bio_vecs. This member
283 	 * MUST obviously be kept at the very end of the bio.
284 	 */
285 	struct bio_vec		bi_inline_vecs[];
286 };
287 
288 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
289 #define BIO_MAX_SECTORS		(UINT_MAX >> SECTOR_SHIFT)
290 
291 /*
292  * bio flags
293  */
294 enum {
295 	BIO_PAGE_PINNED,	/* Unpin pages in bio_release_pages() */
296 	BIO_CLONED,		/* doesn't own data */
297 	BIO_BOUNCED,		/* bio is a bounce bio */
298 	BIO_QUIET,		/* Make BIO Quiet */
299 	BIO_CHAIN,		/* chained bio, ->bi_remaining in effect */
300 	BIO_REFFED,		/* bio has elevated ->bi_cnt */
301 	BIO_BPS_THROTTLED,	/* This bio has already been subjected to
302 				 * throttling rules. Don't do it again. */
303 	BIO_TRACE_COMPLETION,	/* bio_endio() should trace the final completion
304 				 * of this bio. */
305 	BIO_CGROUP_ACCT,	/* has been accounted to a cgroup */
306 	BIO_QOS_THROTTLED,	/* bio went through rq_qos throttle path */
307 	BIO_QOS_MERGED,		/* but went through rq_qos merge path */
308 	BIO_REMAPPED,
309 	BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
310 	BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
311 	BIO_FLAG_LAST
312 };
313 
314 typedef __u32 __bitwise blk_mq_req_flags_t;
315 
316 #define REQ_OP_BITS	8
317 #define REQ_OP_MASK	(__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
318 #define REQ_FLAG_BITS	24
319 
320 /**
321  * enum req_op - Operations common to the bio and request structures.
322  * We use 8 bits for encoding the operation, and the remaining 24 for flags.
323  *
324  * The least significant bit of the operation number indicates the data
325  * transfer direction:
326  *
327  *   - if the least significant bit is set transfers are TO the device
328  *   - if the least significant bit is not set transfers are FROM the device
329  *
330  * If a operation does not transfer data the least significant bit has no
331  * meaning.
332  */
333 enum req_op {
334 	/* read sectors from the device */
335 	REQ_OP_READ		= (__force blk_opf_t)0,
336 	/* write sectors to the device */
337 	REQ_OP_WRITE		= (__force blk_opf_t)1,
338 	/* flush the volatile write cache */
339 	REQ_OP_FLUSH		= (__force blk_opf_t)2,
340 	/* discard sectors */
341 	REQ_OP_DISCARD		= (__force blk_opf_t)3,
342 	/* securely erase sectors */
343 	REQ_OP_SECURE_ERASE	= (__force blk_opf_t)5,
344 	/* write data at the current zone write pointer */
345 	REQ_OP_ZONE_APPEND	= (__force blk_opf_t)7,
346 	/* write the zero filled sector many times */
347 	REQ_OP_WRITE_ZEROES	= (__force blk_opf_t)9,
348 	/* Open a zone */
349 	REQ_OP_ZONE_OPEN	= (__force blk_opf_t)10,
350 	/* Close a zone */
351 	REQ_OP_ZONE_CLOSE	= (__force blk_opf_t)11,
352 	/* Transition a zone to full */
353 	REQ_OP_ZONE_FINISH	= (__force blk_opf_t)12,
354 	/* reset a zone write pointer */
355 	REQ_OP_ZONE_RESET	= (__force blk_opf_t)13,
356 	/* reset all the zone present on the device */
357 	REQ_OP_ZONE_RESET_ALL	= (__force blk_opf_t)15,
358 
359 	/* Driver private requests */
360 	REQ_OP_DRV_IN		= (__force blk_opf_t)34,
361 	REQ_OP_DRV_OUT		= (__force blk_opf_t)35,
362 
363 	REQ_OP_LAST		= (__force blk_opf_t)36,
364 };
365 
366 /* Keep cmd_flag_name[] in sync with the definitions below */
367 enum req_flag_bits {
368 	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
369 		REQ_OP_BITS,
370 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
371 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
372 	__REQ_SYNC,		/* request is sync (sync write or read) */
373 	__REQ_META,		/* metadata io request */
374 	__REQ_PRIO,		/* boost priority in cfq */
375 	__REQ_NOMERGE,		/* don't touch this for merging */
376 	__REQ_IDLE,		/* anticipate more IO after this one */
377 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
378 	__REQ_FUA,		/* forced unit access */
379 	__REQ_PREFLUSH,		/* request for cache flush */
380 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
381 	__REQ_BACKGROUND,	/* background IO */
382 	__REQ_NOWAIT,           /* Don't wait if request will block */
383 	__REQ_POLLED,		/* caller polls for completion using bio_poll */
384 	__REQ_ALLOC_CACHE,	/* allocate IO from cache if available */
385 	__REQ_SWAP,		/* swap I/O */
386 	__REQ_DRV,		/* for driver use */
387 	__REQ_FS_PRIVATE,	/* for file system (submitter) use */
388 	__REQ_ATOMIC,		/* for atomic write operations */
389 	/*
390 	 * Command specific flags, keep last:
391 	 */
392 	/* for REQ_OP_WRITE_ZEROES: */
393 	__REQ_NOUNMAP,		/* do not free blocks when zeroing */
394 
395 	__REQ_NR_BITS,		/* stops here */
396 };
397 
398 #define REQ_FAILFAST_DEV	\
399 			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
400 #define REQ_FAILFAST_TRANSPORT	\
401 			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
402 #define REQ_FAILFAST_DRIVER	\
403 			(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
404 #define REQ_SYNC	(__force blk_opf_t)(1ULL << __REQ_SYNC)
405 #define REQ_META	(__force blk_opf_t)(1ULL << __REQ_META)
406 #define REQ_PRIO	(__force blk_opf_t)(1ULL << __REQ_PRIO)
407 #define REQ_NOMERGE	(__force blk_opf_t)(1ULL << __REQ_NOMERGE)
408 #define REQ_IDLE	(__force blk_opf_t)(1ULL << __REQ_IDLE)
409 #define REQ_INTEGRITY	(__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
410 #define REQ_FUA		(__force blk_opf_t)(1ULL << __REQ_FUA)
411 #define REQ_PREFLUSH	(__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
412 #define REQ_RAHEAD	(__force blk_opf_t)(1ULL << __REQ_RAHEAD)
413 #define REQ_BACKGROUND	(__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
414 #define REQ_NOWAIT	(__force blk_opf_t)(1ULL << __REQ_NOWAIT)
415 #define REQ_POLLED	(__force blk_opf_t)(1ULL << __REQ_POLLED)
416 #define REQ_ALLOC_CACHE	(__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
417 #define REQ_SWAP	(__force blk_opf_t)(1ULL << __REQ_SWAP)
418 #define REQ_DRV		(__force blk_opf_t)(1ULL << __REQ_DRV)
419 #define REQ_FS_PRIVATE	(__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
420 #define REQ_ATOMIC	(__force blk_opf_t)(1ULL << __REQ_ATOMIC)
421 
422 #define REQ_NOUNMAP	(__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
423 
424 #define REQ_FAILFAST_MASK \
425 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
426 
427 #define REQ_NOMERGE_FLAGS \
428 	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
429 
430 enum stat_group {
431 	STAT_READ,
432 	STAT_WRITE,
433 	STAT_DISCARD,
434 	STAT_FLUSH,
435 
436 	NR_STAT_GROUPS
437 };
438 
bio_op(const struct bio * bio)439 static inline enum req_op bio_op(const struct bio *bio)
440 {
441 	return bio->bi_opf & REQ_OP_MASK;
442 }
443 
op_is_write(blk_opf_t op)444 static inline bool op_is_write(blk_opf_t op)
445 {
446 	return (op & (__force blk_opf_t)1) || op == REQ_OP_ZONE_FINISH;
447 }
448 
449 /*
450  * Check if the bio or request is one that needs special treatment in the
451  * flush state machine.
452  */
op_is_flush(blk_opf_t op)453 static inline bool op_is_flush(blk_opf_t op)
454 {
455 	return op & (REQ_FUA | REQ_PREFLUSH);
456 }
457 
458 /*
459  * Reads are always treated as synchronous, as are requests with the FUA or
460  * PREFLUSH flag.  Other operations may be marked as synchronous using the
461  * REQ_SYNC flag.
462  */
op_is_sync(blk_opf_t op)463 static inline bool op_is_sync(blk_opf_t op)
464 {
465 	return (op & REQ_OP_MASK) == REQ_OP_READ ||
466 		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
467 }
468 
op_is_discard(blk_opf_t op)469 static inline bool op_is_discard(blk_opf_t op)
470 {
471 	return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
472 }
473 
474 /*
475  * Check if a bio or request operation is a zone management operation, with
476  * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
477  * due to its different handling in the block layer and device response in
478  * case of command failure.
479  */
op_is_zone_mgmt(enum req_op op)480 static inline bool op_is_zone_mgmt(enum req_op op)
481 {
482 	switch (op & REQ_OP_MASK) {
483 	case REQ_OP_ZONE_RESET:
484 	case REQ_OP_ZONE_OPEN:
485 	case REQ_OP_ZONE_CLOSE:
486 	case REQ_OP_ZONE_FINISH:
487 		return true;
488 	default:
489 		return false;
490 	}
491 }
492 
op_stat_group(enum req_op op)493 static inline int op_stat_group(enum req_op op)
494 {
495 	if (op_is_discard(op))
496 		return STAT_DISCARD;
497 	return op_is_write(op);
498 }
499 
500 struct blk_rq_stat {
501 	u64 mean;
502 	u64 min;
503 	u64 max;
504 	u32 nr_samples;
505 	u64 batch;
506 };
507 
508 #endif /* __LINUX_BLK_TYPES_H */
509