1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
5 */
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
8
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
12
13 struct bio_set;
14 struct bio;
15 struct bio_integrity_payload;
16 struct page;
17 struct block_device;
18 struct io_context;
19 struct cgroup_subsys_state;
20 typedef void (bio_end_io_t) (struct bio *);
21 struct bio_crypt_ctx;
22
23 /*
24 * Block error status values. See block/blk-core:blk_errors for the details.
25 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
26 */
27 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
28 typedef u32 __bitwise blk_status_t;
29 #else
30 typedef u8 __bitwise blk_status_t;
31 #endif
32 #define BLK_STS_OK 0
33 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
34 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
35 #define BLK_STS_NOSPC ((__force blk_status_t)3)
36 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
37 #define BLK_STS_TARGET ((__force blk_status_t)5)
38 #define BLK_STS_NEXUS ((__force blk_status_t)6)
39 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
40 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
41 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
42 #define BLK_STS_IOERR ((__force blk_status_t)10)
43
44 /* hack for device mapper, don't use elsewhere: */
45 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
46
47 #define BLK_STS_AGAIN ((__force blk_status_t)12)
48
49 /*
50 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
51 * device related resources are unavailable, but the driver can guarantee
52 * that the queue will be rerun in the future once resources become
53 * available again. This is typically the case for device specific
54 * resources that are consumed for IO. If the driver fails allocating these
55 * resources, we know that inflight (or pending) IO will free these
56 * resource upon completion.
57 *
58 * This is different from BLK_STS_RESOURCE in that it explicitly references
59 * a device specific resource. For resources of wider scope, allocation
60 * failure can happen without having pending IO. This means that we can't
61 * rely on request completions freeing these resources, as IO may not be in
62 * flight. Examples of that are kernel memory allocations, DMA mappings, or
63 * any other system wide resources.
64 */
65 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
66
67 /**
68 * blk_path_error - returns true if error may be path related
69 * @error: status the request was completed with
70 *
71 * Description:
72 * This classifies block error status into non-retryable errors and ones
73 * that may be successful if retried on a failover path.
74 *
75 * Return:
76 * %false - retrying failover path will not help
77 * %true - may succeed if retried
78 */
blk_path_error(blk_status_t error)79 static inline bool blk_path_error(blk_status_t error)
80 {
81 switch (error) {
82 case BLK_STS_NOTSUPP:
83 case BLK_STS_NOSPC:
84 case BLK_STS_TARGET:
85 case BLK_STS_NEXUS:
86 case BLK_STS_MEDIUM:
87 case BLK_STS_PROTECTION:
88 return false;
89 }
90
91 /* Anything else could be a path failure, so should be retried */
92 return true;
93 }
94
95 /*
96 * From most significant bit:
97 * 1 bit: reserved for other usage, see below
98 * 12 bits: original size of bio
99 * 51 bits: issue time of bio
100 */
101 #define BIO_ISSUE_RES_BITS 1
102 #define BIO_ISSUE_SIZE_BITS 12
103 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
104 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
105 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
106 #define BIO_ISSUE_SIZE_MASK \
107 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
108 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
109
110 /* Reserved bit for blk-throtl */
111 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
112
113 struct bio_issue {
114 u64 value;
115 };
116
__bio_issue_time(u64 time)117 static inline u64 __bio_issue_time(u64 time)
118 {
119 return time & BIO_ISSUE_TIME_MASK;
120 }
121
bio_issue_time(struct bio_issue * issue)122 static inline u64 bio_issue_time(struct bio_issue *issue)
123 {
124 return __bio_issue_time(issue->value);
125 }
126
bio_issue_size(struct bio_issue * issue)127 static inline sector_t bio_issue_size(struct bio_issue *issue)
128 {
129 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
130 }
131
bio_issue_init(struct bio_issue * issue,sector_t size)132 static inline void bio_issue_init(struct bio_issue *issue,
133 sector_t size)
134 {
135 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
136 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
137 (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
138 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
139 }
140
141 /*
142 * main unit of I/O for the block layer and lower layers (ie drivers and
143 * stacking drivers)
144 */
145 struct bio {
146 struct bio *bi_next; /* request queue link */
147 struct gendisk *bi_disk;
148 unsigned int bi_opf; /* bottom bits req flags,
149 * top bits REQ_OP. Use
150 * accessors.
151 */
152 unsigned short bi_flags; /* status, etc and bvec pool number */
153 unsigned short bi_ioprio;
154 unsigned short bi_write_hint;
155 blk_status_t bi_status;
156 u8 bi_partno;
157
158 struct bvec_iter bi_iter;
159
160 atomic_t __bi_remaining;
161 bio_end_io_t *bi_end_io;
162
163 void *bi_private;
164 #ifdef CONFIG_BLK_CGROUP
165 /*
166 * Represents the association of the css and request_queue for the bio.
167 * If a bio goes direct to device, it will not have a blkg as it will
168 * not have a request_queue associated with it. The reference is put
169 * on release of the bio.
170 */
171 struct blkcg_gq *bi_blkg;
172 struct bio_issue bi_issue;
173 #ifdef CONFIG_BLK_CGROUP_IOCOST
174 u64 bi_iocost_cost;
175 #endif
176 #endif
177
178 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
179 struct bio_crypt_ctx *bi_crypt_context;
180 #if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
181 bool bi_skip_dm_default_key;
182 #endif
183 #endif
184
185 union {
186 #if defined(CONFIG_BLK_DEV_INTEGRITY)
187 struct bio_integrity_payload *bi_integrity; /* data integrity */
188 #endif
189 };
190
191 unsigned short bi_vcnt; /* how many bio_vec's */
192
193 /*
194 * Everything starting with bi_max_vecs will be preserved by bio_reset()
195 */
196
197 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
198
199 atomic_t __bi_cnt; /* pin count */
200
201 struct bio_vec *bi_io_vec; /* the actual vec list */
202
203 struct bio_set *bi_pool;
204
205 /*
206 * We can inline a number of vecs at the end of the bio, to avoid
207 * double allocations for a small number of bio_vecs. This member
208 * MUST obviously be kept at the very end of the bio.
209 */
210 struct bio_vec bi_inline_vecs[0];
211 };
212
213 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
214
215 /*
216 * bio flags
217 */
218 enum {
219 BIO_NO_PAGE_REF, /* don't put release vec pages */
220 BIO_CLONED, /* doesn't own data */
221 BIO_BOUNCED, /* bio is a bounce bio */
222 BIO_USER_MAPPED, /* contains user pages */
223 BIO_NULL_MAPPED, /* contains invalid user pages */
224 BIO_WORKINGSET, /* contains userspace workingset pages */
225 BIO_QUIET, /* Make BIO Quiet */
226 BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
227 BIO_REFFED, /* bio has elevated ->bi_cnt */
228 BIO_THROTTLED, /* This bio has already been subjected to
229 * throttling rules. Don't do it again. */
230 BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
231 * of this bio. */
232 BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */
233 BIO_TRACKED, /* set if bio goes through the rq_qos path */
234 BIO_FLAG_LAST
235 };
236
237 /* See BVEC_POOL_OFFSET below before adding new flags */
238
239 /*
240 * We support 6 different bvec pools, the last one is magic in that it
241 * is backed by a mempool.
242 */
243 #define BVEC_POOL_NR 6
244 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
245
246 /*
247 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
248 * 1 to the actual index so that 0 indicates that there are no bvecs to be
249 * freed.
250 */
251 #define BVEC_POOL_BITS (3)
252 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
253 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
254 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
255 # error "BVEC_POOL_BITS is too small"
256 #endif
257
258 /*
259 * Flags starting here get preserved by bio_reset() - this includes
260 * only BVEC_POOL_IDX()
261 */
262 #define BIO_RESET_BITS BVEC_POOL_OFFSET
263
264 typedef __u32 __bitwise blk_mq_req_flags_t;
265
266 /*
267 * Operations and flags common to the bio and request structures.
268 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
269 *
270 * The least significant bit of the operation number indicates the data
271 * transfer direction:
272 *
273 * - if the least significant bit is set transfers are TO the device
274 * - if the least significant bit is not set transfers are FROM the device
275 *
276 * If a operation does not transfer data the least significant bit has no
277 * meaning.
278 */
279 #define REQ_OP_BITS 8
280 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
281 #define REQ_FLAG_BITS 24
282
283 enum req_opf {
284 /* read sectors from the device */
285 REQ_OP_READ = 0,
286 /* write sectors to the device */
287 REQ_OP_WRITE = 1,
288 /* flush the volatile write cache */
289 REQ_OP_FLUSH = 2,
290 /* discard sectors */
291 REQ_OP_DISCARD = 3,
292 /* securely erase sectors */
293 REQ_OP_SECURE_ERASE = 5,
294 /* reset a zone write pointer */
295 REQ_OP_ZONE_RESET = 6,
296 /* write the same sector many times */
297 REQ_OP_WRITE_SAME = 7,
298 /* reset all the zone present on the device */
299 REQ_OP_ZONE_RESET_ALL = 8,
300 /* write the zero filled sector many times */
301 REQ_OP_WRITE_ZEROES = 9,
302 /* Open a zone */
303 REQ_OP_ZONE_OPEN = 10,
304 /* Close a zone */
305 REQ_OP_ZONE_CLOSE = 11,
306 /* Transition a zone to full */
307 REQ_OP_ZONE_FINISH = 12,
308
309 /* SCSI passthrough using struct scsi_request */
310 REQ_OP_SCSI_IN = 32,
311 REQ_OP_SCSI_OUT = 33,
312 /* Driver private requests */
313 REQ_OP_DRV_IN = 34,
314 REQ_OP_DRV_OUT = 35,
315
316 REQ_OP_LAST,
317 };
318
319 enum req_flag_bits {
320 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
321 REQ_OP_BITS,
322 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
323 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
324 __REQ_SYNC, /* request is sync (sync write or read) */
325 __REQ_META, /* metadata io request */
326 __REQ_PRIO, /* boost priority in cfq */
327 __REQ_NOMERGE, /* don't touch this for merging */
328 __REQ_IDLE, /* anticipate more IO after this one */
329 __REQ_INTEGRITY, /* I/O includes block integrity payload */
330 __REQ_FUA, /* forced unit access */
331 __REQ_PREFLUSH, /* request for cache flush */
332 __REQ_RAHEAD, /* read ahead, can fail anytime */
333 __REQ_BACKGROUND, /* background IO */
334 __REQ_NOWAIT, /* Don't wait if request will block */
335 __REQ_NOWAIT_INLINE, /* Return would-block error inline */
336 /*
337 * When a shared kthread needs to issue a bio for a cgroup, doing
338 * so synchronously can lead to priority inversions as the kthread
339 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
340 * submit_bio() punt the actual issuing to a dedicated per-blkcg
341 * work item to avoid such priority inversions.
342 */
343 __REQ_CGROUP_PUNT,
344
345 /* command specific flags for REQ_OP_WRITE_ZEROES: */
346 __REQ_NOUNMAP, /* do not free blocks when zeroing */
347
348 __REQ_HIPRI,
349
350 /* for driver use */
351 __REQ_DRV,
352 __REQ_SWAP, /* swapping request. */
353 __REQ_NR_BITS, /* stops here */
354 };
355
356 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
357 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
358 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
359 #define REQ_SYNC (1ULL << __REQ_SYNC)
360 #define REQ_META (1ULL << __REQ_META)
361 #define REQ_PRIO (1ULL << __REQ_PRIO)
362 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
363 #define REQ_IDLE (1ULL << __REQ_IDLE)
364 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
365 #define REQ_FUA (1ULL << __REQ_FUA)
366 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
367 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
368 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
369 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
370 #define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
371 #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
372
373 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
374 #define REQ_HIPRI (1ULL << __REQ_HIPRI)
375
376 #define REQ_DRV (1ULL << __REQ_DRV)
377 #define REQ_SWAP (1ULL << __REQ_SWAP)
378
379 #define REQ_FAILFAST_MASK \
380 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
381
382 #define REQ_NOMERGE_FLAGS \
383 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
384
385 enum stat_group {
386 STAT_READ,
387 STAT_WRITE,
388 STAT_DISCARD,
389
390 NR_STAT_GROUPS
391 };
392
393 #define bio_op(bio) \
394 ((bio)->bi_opf & REQ_OP_MASK)
395 #define req_op(req) \
396 ((req)->cmd_flags & REQ_OP_MASK)
397
398 /* obsolete, don't use in new code */
bio_set_op_attrs(struct bio * bio,unsigned op,unsigned op_flags)399 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
400 unsigned op_flags)
401 {
402 bio->bi_opf = op | op_flags;
403 }
404
op_is_write(unsigned int op)405 static inline bool op_is_write(unsigned int op)
406 {
407 return (op & 1);
408 }
409
410 /*
411 * Check if the bio or request is one that needs special treatment in the
412 * flush state machine.
413 */
op_is_flush(unsigned int op)414 static inline bool op_is_flush(unsigned int op)
415 {
416 return op & (REQ_FUA | REQ_PREFLUSH);
417 }
418
419 /*
420 * Reads are always treated as synchronous, as are requests with the FUA or
421 * PREFLUSH flag. Other operations may be marked as synchronous using the
422 * REQ_SYNC flag.
423 */
op_is_sync(unsigned int op)424 static inline bool op_is_sync(unsigned int op)
425 {
426 return (op & REQ_OP_MASK) == REQ_OP_READ ||
427 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
428 }
429
op_is_discard(unsigned int op)430 static inline bool op_is_discard(unsigned int op)
431 {
432 return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
433 }
434
435 /*
436 * Check if a bio or request operation is a zone management operation, with
437 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
438 * due to its different handling in the block layer and device response in
439 * case of command failure.
440 */
op_is_zone_mgmt(enum req_opf op)441 static inline bool op_is_zone_mgmt(enum req_opf op)
442 {
443 switch (op & REQ_OP_MASK) {
444 case REQ_OP_ZONE_RESET:
445 case REQ_OP_ZONE_OPEN:
446 case REQ_OP_ZONE_CLOSE:
447 case REQ_OP_ZONE_FINISH:
448 return true;
449 default:
450 return false;
451 }
452 }
453
op_stat_group(unsigned int op)454 static inline int op_stat_group(unsigned int op)
455 {
456 if (op_is_discard(op))
457 return STAT_DISCARD;
458 return op_is_write(op);
459 }
460
461 typedef unsigned int blk_qc_t;
462 #define BLK_QC_T_NONE -1U
463 #define BLK_QC_T_EAGAIN -2U
464 #define BLK_QC_T_SHIFT 16
465 #define BLK_QC_T_INTERNAL (1U << 31)
466
blk_qc_t_valid(blk_qc_t cookie)467 static inline bool blk_qc_t_valid(blk_qc_t cookie)
468 {
469 return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
470 }
471
blk_qc_t_to_queue_num(blk_qc_t cookie)472 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
473 {
474 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
475 }
476
blk_qc_t_to_tag(blk_qc_t cookie)477 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
478 {
479 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
480 }
481
blk_qc_t_is_internal(blk_qc_t cookie)482 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
483 {
484 return (cookie & BLK_QC_T_INTERNAL) != 0;
485 }
486
487 struct blk_rq_stat {
488 u64 mean;
489 u64 min;
490 u64 max;
491 u32 nr_samples;
492 u64 batch;
493 };
494
495 #endif /* __LINUX_BLK_TYPES_H */
496