• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Block data types and constants.  Directly include this file only to
3  * break include dependency loop.
4  */
5 #ifndef __LINUX_BLK_TYPES_H
6 #define __LINUX_BLK_TYPES_H
7 
8 #include <linux/types.h>
9 
10 struct bio_set;
11 struct bio;
12 struct bio_integrity_payload;
13 struct page;
14 struct block_device;
15 struct io_context;
16 struct cgroup_subsys_state;
17 typedef void (bio_end_io_t) (struct bio *);
18 typedef void (bio_destructor_t) (struct bio *);
19 
20 /*
21  * was unsigned short, but we might as well be ready for > 64kB I/O pages
22  */
23 struct bio_vec {
24 	struct page	*bv_page;
25 	unsigned int	bv_len;
26 	unsigned int	bv_offset;
27 };
28 
29 #ifdef CONFIG_BLOCK
30 
31 struct bvec_iter {
32 	sector_t		bi_sector;	/* device address in 512 byte
33 						   sectors */
34 	unsigned int		bi_size;	/* residual I/O count */
35 
36 	unsigned int		bi_idx;		/* current index into bvl_vec */
37 
38 	unsigned int            bi_bvec_done;	/* number of bytes completed in
39 						   current bvec */
40 };
41 
42 /*
43  * main unit of I/O for the block layer and lower layers (ie drivers and
44  * stacking drivers)
45  */
46 struct bio {
47 	struct bio		*bi_next;	/* request queue link */
48 	struct block_device	*bi_bdev;
49 	unsigned int		bi_flags;	/* status, command, etc */
50 	unsigned short		bi_write_hint;
51 	int			bi_error;
52 	unsigned long		bi_rw;		/* bottom bits READ/WRITE,
53 						 * top bits priority
54 						 */
55 
56 	struct bvec_iter	bi_iter;
57 
58 	/* Number of segments in this BIO after
59 	 * physical address coalescing is performed.
60 	 */
61 	unsigned int		bi_phys_segments;
62 
63 	/*
64 	 * To keep track of the max segment size, we account for the
65 	 * sizes of the first and last mergeable segments in this bio.
66 	 */
67 	unsigned int		bi_seg_front_size;
68 	unsigned int		bi_seg_back_size;
69 
70 	atomic_t		__bi_remaining;
71 
72 	bio_end_io_t		*bi_end_io;
73 
74 	void			*bi_private;
75 #ifdef CONFIG_BLK_CGROUP
76 	/*
77 	 * Optional ioc and css associated with this bio.  Put on bio
78 	 * release.  Read comment on top of bio_associate_current().
79 	 */
80 	struct io_context	*bi_ioc;
81 	struct cgroup_subsys_state *bi_css;
82 #endif
83 	union {
84 #if defined(CONFIG_BLK_DEV_INTEGRITY)
85 		struct bio_integrity_payload *bi_integrity; /* data integrity */
86 #endif
87 	};
88 
89 	unsigned short		bi_vcnt;	/* how many bio_vec's */
90 
91 	/*
92 	 * Everything starting with bi_max_vecs will be preserved by bio_reset()
93 	 */
94 
95 	unsigned short		bi_max_vecs;	/* max bvl_vecs we can hold */
96 
97 	atomic_t		__bi_cnt;	/* pin count */
98 
99 	struct bio_vec		*bi_io_vec;	/* the actual vec list */
100 
101 	struct bio_set		*bi_pool;
102 
103 	/*
104 	 * We can inline a number of vecs at the end of the bio, to avoid
105 	 * double allocations for a small number of bio_vecs. This member
106 	 * MUST obviously be kept at the very end of the bio.
107 	 */
108 	struct bio_vec		bi_inline_vecs[0];
109 };
110 
111 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
112 
113 /*
114  * bio flags
115  */
116 #define BIO_SEG_VALID	1	/* bi_phys_segments valid */
117 #define BIO_CLONED	2	/* doesn't own data */
118 #define BIO_BOUNCED	3	/* bio is a bounce bio */
119 #define BIO_USER_MAPPED 4	/* contains user pages */
120 #define BIO_NULL_MAPPED 5	/* contains invalid user pages */
121 #define BIO_QUIET	6	/* Make BIO Quiet */
122 #define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
123 #define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
124 
125 /*
126  * Flags starting here get preserved by bio_reset() - this includes
127  * BIO_POOL_IDX()
128  */
129 #define BIO_RESET_BITS	13
130 #define BIO_OWNS_VEC	13	/* bio_free() should free bvec */
131 
132 /*
133  * top 4 bits of bio flags indicate the pool this bio came from
134  */
135 #define BIO_POOL_BITS		(4)
136 #define BIO_POOL_NONE		((1UL << BIO_POOL_BITS) - 1)
137 #define BIO_POOL_OFFSET		(32 - BIO_POOL_BITS)
138 #define BIO_POOL_MASK		(1UL << BIO_POOL_OFFSET)
139 #define BIO_POOL_IDX(bio)	((bio)->bi_flags >> BIO_POOL_OFFSET)
140 
141 #endif /* CONFIG_BLOCK */
142 
143 /*
144  * Request flags.  For use in the cmd_flags field of struct request, and in
145  * bi_rw of struct bio.  Note that some flags are only valid in either one.
146  */
147 enum rq_flag_bits {
148 	/* common flags */
149 	__REQ_WRITE,		/* not set, read. set, write */
150 	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
151 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
152 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
153 
154 	__REQ_SYNC,		/* request is sync (sync write or read) */
155 	__REQ_META,		/* metadata io request */
156 	__REQ_PRIO,		/* boost priority in cfq */
157 	__REQ_DISCARD,		/* request to discard sectors */
158 	__REQ_SECURE,		/* secure discard (used with __REQ_DISCARD) */
159 	__REQ_WRITE_SAME,	/* write same block many times */
160 
161 	__REQ_NOIDLE,		/* don't anticipate more IO after this one */
162 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
163 	__REQ_FUA,		/* forced unit access */
164 	__REQ_FLUSH,		/* request for cache flush */
165 
166 	/* bio only flags */
167 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
168 	__REQ_THROTTLED,	/* This bio has already been subjected to
169 				 * throttling rules. Don't do it again. */
170 
171 	/* request only flags */
172 	__REQ_SORTED,		/* elevator knows about this request */
173 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
174 	__REQ_NOMERGE,		/* don't touch this for merging */
175 	__REQ_STARTED,		/* drive already may have started this one */
176 	__REQ_DONTPREP,		/* don't call prep for this one */
177 	__REQ_QUEUED,		/* uses queueing */
178 	__REQ_ELVPRIV,		/* elevator private data attached */
179 	__REQ_FAILED,		/* set if the request failed */
180 	__REQ_QUIET,		/* don't worry about errors */
181 	__REQ_PREEMPT,		/* set for "ide_preempt" requests and also
182 				   for requests for which the SCSI "quiesce"
183 				   state must be ignored. */
184 	__REQ_ALLOCED,		/* request came from our alloc pool */
185 	__REQ_COPY_USER,	/* contains copies of user pages */
186 	__REQ_FLUSH_SEQ,	/* request for flush sequence */
187 	__REQ_IO_STAT,		/* account I/O stat */
188 	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
189 	__REQ_PM,		/* runtime pm request */
190 	__REQ_HASHED,		/* on IO scheduler merge hash */
191 	__REQ_MQ_INFLIGHT,	/* track inflight for MQ */
192 	__REQ_NO_TIMEOUT,	/* requests may never expire */
193 	__REQ_NR_BITS,		/* stops here */
194 };
195 
196 #define REQ_WRITE		(1ULL << __REQ_WRITE)
197 #define REQ_FAILFAST_DEV	(1ULL << __REQ_FAILFAST_DEV)
198 #define REQ_FAILFAST_TRANSPORT	(1ULL << __REQ_FAILFAST_TRANSPORT)
199 #define REQ_FAILFAST_DRIVER	(1ULL << __REQ_FAILFAST_DRIVER)
200 #define REQ_SYNC		(1ULL << __REQ_SYNC)
201 #define REQ_META		(1ULL << __REQ_META)
202 #define REQ_PRIO		(1ULL << __REQ_PRIO)
203 #define REQ_DISCARD		(1ULL << __REQ_DISCARD)
204 #define REQ_WRITE_SAME		(1ULL << __REQ_WRITE_SAME)
205 #define REQ_NOIDLE		(1ULL << __REQ_NOIDLE)
206 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
207 
208 #define REQ_FAILFAST_MASK \
209 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
210 #define REQ_COMMON_MASK \
211 	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
212 	 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
213 	 REQ_SECURE | REQ_INTEGRITY)
214 #define REQ_CLONE_MASK		REQ_COMMON_MASK
215 
216 #define BIO_NO_ADVANCE_ITER_MASK	(REQ_DISCARD|REQ_WRITE_SAME)
217 
218 /* This mask is used for both bio and request merge checking */
219 #define REQ_NOMERGE_FLAGS \
220 	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
221 
222 #define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
223 #define REQ_THROTTLED		(1ULL << __REQ_THROTTLED)
224 
225 #define REQ_SORTED		(1ULL << __REQ_SORTED)
226 #define REQ_SOFTBARRIER		(1ULL << __REQ_SOFTBARRIER)
227 #define REQ_FUA			(1ULL << __REQ_FUA)
228 #define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
229 #define REQ_STARTED		(1ULL << __REQ_STARTED)
230 #define REQ_DONTPREP		(1ULL << __REQ_DONTPREP)
231 #define REQ_QUEUED		(1ULL << __REQ_QUEUED)
232 #define REQ_ELVPRIV		(1ULL << __REQ_ELVPRIV)
233 #define REQ_FAILED		(1ULL << __REQ_FAILED)
234 #define REQ_QUIET		(1ULL << __REQ_QUIET)
235 #define REQ_PREEMPT		(1ULL << __REQ_PREEMPT)
236 #define REQ_ALLOCED		(1ULL << __REQ_ALLOCED)
237 #define REQ_COPY_USER		(1ULL << __REQ_COPY_USER)
238 #define REQ_FLUSH		(1ULL << __REQ_FLUSH)
239 #define REQ_FLUSH_SEQ		(1ULL << __REQ_FLUSH_SEQ)
240 #define REQ_IO_STAT		(1ULL << __REQ_IO_STAT)
241 #define REQ_MIXED_MERGE		(1ULL << __REQ_MIXED_MERGE)
242 #define REQ_SECURE		(1ULL << __REQ_SECURE)
243 #define REQ_PM			(1ULL << __REQ_PM)
244 #define REQ_HASHED		(1ULL << __REQ_HASHED)
245 #define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT)
246 #define REQ_NO_TIMEOUT		(1ULL << __REQ_NO_TIMEOUT)
247 
248 typedef unsigned int blk_qc_t;
249 #define BLK_QC_T_NONE	-1U
250 #define BLK_QC_T_SHIFT	16
251 
blk_qc_t_valid(blk_qc_t cookie)252 static inline bool blk_qc_t_valid(blk_qc_t cookie)
253 {
254 	return cookie != BLK_QC_T_NONE;
255 }
256 
blk_tag_to_qc_t(unsigned int tag,unsigned int queue_num)257 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
258 {
259 	return tag | (queue_num << BLK_QC_T_SHIFT);
260 }
261 
blk_qc_t_to_queue_num(blk_qc_t cookie)262 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
263 {
264 	return cookie >> BLK_QC_T_SHIFT;
265 }
266 
blk_qc_t_to_tag(blk_qc_t cookie)267 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
268 {
269 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
270 }
271 
272 #endif /* __LINUX_BLK_TYPES_H */
273