• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4 
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/android_kabi.h>
11 
12 struct blk_mq_tags;
13 struct blk_flush_queue;
14 
15 #define BLKDEV_MIN_RQ	4
16 #define BLKDEV_DEFAULT_RQ	128
17 
18 enum rq_end_io_ret {
19 	RQ_END_IO_NONE,
20 	RQ_END_IO_FREE,
21 };
22 
23 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
24 
25 /*
26  * request flags */
27 typedef __u32 __bitwise req_flags_t;
28 
29 /* drive already may have started this one */
30 #define RQF_STARTED		((__force req_flags_t)(1 << 1))
31 /* may not be passed by ioscheduler */
32 #define RQF_SOFTBARRIER		((__force req_flags_t)(1 << 3))
33 /* request for flush sequence */
34 #define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << 4))
35 /* merge of different types, fail separately */
36 #define RQF_MIXED_MERGE		((__force req_flags_t)(1 << 5))
37 /* track inflight for MQ */
38 #define RQF_MQ_INFLIGHT		((__force req_flags_t)(1 << 6))
39 /* don't call prep for this one */
40 #define RQF_DONTPREP		((__force req_flags_t)(1 << 7))
41 /* vaguely specified driver internal error.  Ignored by the block layer */
42 #define RQF_FAILED		((__force req_flags_t)(1 << 10))
43 /* don't warn about errors */
44 #define RQF_QUIET		((__force req_flags_t)(1 << 11))
45 /* elevator private data attached */
46 #define RQF_ELVPRIV		((__force req_flags_t)(1 << 12))
47 /* account into disk and partition IO statistics */
48 #define RQF_IO_STAT		((__force req_flags_t)(1 << 13))
49 /* runtime pm request */
50 #define RQF_PM			((__force req_flags_t)(1 << 15))
51 /* on IO scheduler merge hash */
52 #define RQF_HASHED		((__force req_flags_t)(1 << 16))
53 /* track IO completion time */
54 #define RQF_STATS		((__force req_flags_t)(1 << 17))
55 /* Look at ->special_vec for the actual data payload instead of the
56    bio chain. */
57 #define RQF_SPECIAL_PAYLOAD	((__force req_flags_t)(1 << 18))
58 /* The per-zone write lock is held for this request */
59 #define RQF_ZONE_WRITE_LOCKED	((__force req_flags_t)(1 << 19))
60 /* already slept for hybrid poll */
61 #define RQF_MQ_POLL_SLEPT	((__force req_flags_t)(1 << 20))
62 /* ->timeout has been called, don't expire again */
63 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << 21))
64 /* queue has elevator attached */
65 #define RQF_ELV			((__force req_flags_t)(1 << 22))
66 #define RQF_RESV			((__force req_flags_t)(1 << 23))
67 
68 /* flags that prevent us from merging requests: */
69 #define RQF_NOMERGE_FLAGS \
70 	(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
71 
72 enum mq_rq_state {
73 	MQ_RQ_IDLE		= 0,
74 	MQ_RQ_IN_FLIGHT		= 1,
75 	MQ_RQ_COMPLETE		= 2,
76 };
77 
78 /*
79  * Try to put the fields that are referenced together in the same cacheline.
80  *
81  * If you modify this structure, make sure to update blk_rq_init() and
82  * especially blk_mq_rq_ctx_init() to take care of the added fields.
83  */
84 struct request {
85 	struct request_queue *q;
86 	struct blk_mq_ctx *mq_ctx;
87 	struct blk_mq_hw_ctx *mq_hctx;
88 
89 	blk_opf_t cmd_flags;		/* op and common flags */
90 	req_flags_t rq_flags;
91 
92 	int tag;
93 	int internal_tag;
94 
95 	unsigned int timeout;
96 
97 	/* the following two fields are internal, NEVER access directly */
98 	unsigned int __data_len;	/* total data len */
99 	sector_t __sector;		/* sector cursor */
100 
101 	struct bio *bio;
102 	struct bio *biotail;
103 
104 	union {
105 		struct list_head queuelist;
106 		struct request *rq_next;
107 	};
108 
109 	struct block_device *part;
110 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
111 	/* Time that the first bio started allocating this request. */
112 	u64 alloc_time_ns;
113 #endif
114 	/* Time that this request was allocated for this IO. */
115 	u64 start_time_ns;
116 	/* Time that I/O was submitted to the device. */
117 	u64 io_start_time_ns;
118 
119 #ifdef CONFIG_BLK_WBT
120 	unsigned short wbt_flags;
121 #endif
122 	/*
123 	 * rq sectors used for blk stats. It has the same value
124 	 * with blk_rq_sectors(rq), except that it never be zeroed
125 	 * by completion.
126 	 */
127 	unsigned short stats_sectors;
128 
129 	/*
130 	 * Number of scatter-gather DMA addr+len pairs after
131 	 * physical address coalescing is performed.
132 	 */
133 	unsigned short nr_phys_segments;
134 
135 #ifdef CONFIG_BLK_DEV_INTEGRITY
136 	unsigned short nr_integrity_segments;
137 #endif
138 
139 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
140 	struct bio_crypt_ctx *crypt_ctx;
141 	struct blk_crypto_keyslot *crypt_keyslot;
142 #endif
143 
144 	unsigned short write_hint;
145 	unsigned short ioprio;
146 
147 	enum mq_rq_state state;
148 	atomic_t ref;
149 
150 	unsigned long deadline;
151 
152 	/*
153 	 * The hash is used inside the scheduler, and killed once the
154 	 * request reaches the dispatch list. The ipi_list is only used
155 	 * to queue the request for softirq completion, which is long
156 	 * after the request has been unhashed (and even removed from
157 	 * the dispatch list).
158 	 */
159 	union {
160 		struct hlist_node hash;	/* merge hash */
161 		struct llist_node ipi_list;
162 	};
163 
164 	/*
165 	 * The rb_node is only used inside the io scheduler, requests
166 	 * are pruned when moved to the dispatch queue. So let the
167 	 * completion_data share space with the rb_node.
168 	 */
169 	union {
170 		struct rb_node rb_node;	/* sort/lookup */
171 		struct bio_vec special_vec;
172 		void *completion_data;
173 	};
174 
175 
176 	/*
177 	 * Three pointers are available for IO schedulers. If they need
178 	 * more private data they have to allocate it dynamically.
179 	 */
180 	struct {
181 		struct io_cq		*icq;
182 		void			*priv[2];
183 	} elv;
184 
185 	struct {
186 		unsigned int		seq;
187 		struct list_head	list;
188 		rq_end_io_fn		*saved_end_io;
189 	} flush;
190 
191 	union {
192 		struct __call_single_data csd;
193 		u64 fifo_time;
194 	};
195 
196 	/*
197 	 * completion callback.
198 	 */
199 	rq_end_io_fn *end_io;
200 	void *end_io_data;
201 
202 	ANDROID_KABI_RESERVE(1);
203 };
204 
req_op(const struct request * req)205 static inline enum req_op req_op(const struct request *req)
206 {
207 	return req->cmd_flags & REQ_OP_MASK;
208 }
209 
blk_rq_is_passthrough(struct request * rq)210 static inline bool blk_rq_is_passthrough(struct request *rq)
211 {
212 	return blk_op_is_passthrough(req_op(rq));
213 }
214 
req_get_ioprio(struct request * req)215 static inline unsigned short req_get_ioprio(struct request *req)
216 {
217 	return req->ioprio;
218 }
219 
220 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
221 
222 #define rq_dma_dir(rq) \
223 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
224 
225 #define rq_list_add(listptr, rq)	do {		\
226 	(rq)->rq_next = *(listptr);			\
227 	*(listptr) = rq;				\
228 } while (0)
229 
230 #define rq_list_add_tail(lastpptr, rq)	do {		\
231 	(rq)->rq_next = NULL;				\
232 	**(lastpptr) = rq;				\
233 	*(lastpptr) = &rq->rq_next;			\
234 } while (0)
235 
236 #define rq_list_pop(listptr)				\
237 ({							\
238 	struct request *__req = NULL;			\
239 	if ((listptr) && *(listptr))	{		\
240 		__req = *(listptr);			\
241 		*(listptr) = __req->rq_next;		\
242 	}						\
243 	__req;						\
244 })
245 
246 #define rq_list_peek(listptr)				\
247 ({							\
248 	struct request *__req = NULL;			\
249 	if ((listptr) && *(listptr))			\
250 		__req = *(listptr);			\
251 	__req;						\
252 })
253 
254 #define rq_list_for_each(listptr, pos)			\
255 	for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
256 
257 #define rq_list_for_each_safe(listptr, pos, nxt)			\
258 	for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos);	\
259 		pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
260 
261 #define rq_list_next(rq)	(rq)->rq_next
262 #define rq_list_empty(list)	((list) == (struct request *) NULL)
263 
264 /**
265  * rq_list_move() - move a struct request from one list to another
266  * @src: The source list @rq is currently in
267  * @dst: The destination list that @rq will be appended to
268  * @rq: The request to move
269  * @prev: The request preceding @rq in @src (NULL if @rq is the head)
270  */
rq_list_move(struct request ** src,struct request ** dst,struct request * rq,struct request * prev)271 static inline void rq_list_move(struct request **src, struct request **dst,
272 				struct request *rq, struct request *prev)
273 {
274 	if (prev)
275 		prev->rq_next = rq->rq_next;
276 	else
277 		*src = rq->rq_next;
278 	rq_list_add(dst, rq);
279 }
280 
281 /**
282  * enum blk_eh_timer_return - How the timeout handler should proceed
283  * @BLK_EH_DONE: The block driver completed the command or will complete it at
284  *	a later time.
285  * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
286  *	request to complete.
287  */
288 enum blk_eh_timer_return {
289 	BLK_EH_DONE,
290 	BLK_EH_RESET_TIMER,
291 };
292 
293 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
294 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
295 
296 /**
297  * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
298  * block device
299  */
300 struct blk_mq_hw_ctx {
301 	struct {
302 		/** @lock: Protects the dispatch list. */
303 		spinlock_t		lock;
304 		/**
305 		 * @dispatch: Used for requests that are ready to be
306 		 * dispatched to the hardware but for some reason (e.g. lack of
307 		 * resources) could not be sent to the hardware. As soon as the
308 		 * driver can send new requests, requests at this list will
309 		 * be sent first for a fairer dispatch.
310 		 */
311 		struct list_head	dispatch;
312 		 /**
313 		  * @state: BLK_MQ_S_* flags. Defines the state of the hw
314 		  * queue (active, scheduled to restart, stopped).
315 		  */
316 		unsigned long		state;
317 	} ____cacheline_aligned_in_smp;
318 
319 	/**
320 	 * @run_work: Used for scheduling a hardware queue run at a later time.
321 	 */
322 	struct delayed_work	run_work;
323 	/** @cpumask: Map of available CPUs where this hctx can run. */
324 	cpumask_var_t		cpumask;
325 	/**
326 	 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
327 	 * selection from @cpumask.
328 	 */
329 	int			next_cpu;
330 	/**
331 	 * @next_cpu_batch: Counter of how many works left in the batch before
332 	 * changing to the next CPU.
333 	 */
334 	int			next_cpu_batch;
335 
336 	/** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
337 	unsigned long		flags;
338 
339 	/**
340 	 * @sched_data: Pointer owned by the IO scheduler attached to a request
341 	 * queue. It's up to the IO scheduler how to use this pointer.
342 	 */
343 	void			*sched_data;
344 	/**
345 	 * @queue: Pointer to the request queue that owns this hardware context.
346 	 */
347 	struct request_queue	*queue;
348 	/** @fq: Queue of requests that need to perform a flush operation. */
349 	struct blk_flush_queue	*fq;
350 
351 	/**
352 	 * @driver_data: Pointer to data owned by the block driver that created
353 	 * this hctx
354 	 */
355 	void			*driver_data;
356 
357 	/**
358 	 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
359 	 * pending request in that software queue.
360 	 */
361 	struct sbitmap		ctx_map;
362 
363 	/**
364 	 * @dispatch_from: Software queue to be used when no scheduler was
365 	 * selected.
366 	 */
367 	struct blk_mq_ctx	*dispatch_from;
368 	/**
369 	 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
370 	 * decide if the hw_queue is busy using Exponential Weighted Moving
371 	 * Average algorithm.
372 	 */
373 	unsigned int		dispatch_busy;
374 
375 	/** @type: HCTX_TYPE_* flags. Type of hardware queue. */
376 	unsigned short		type;
377 	/** @nr_ctx: Number of software queues. */
378 	unsigned short		nr_ctx;
379 	/** @ctxs: Array of software queues. */
380 	struct blk_mq_ctx	**ctxs;
381 
382 	/** @dispatch_wait_lock: Lock for dispatch_wait queue. */
383 	spinlock_t		dispatch_wait_lock;
384 	/**
385 	 * @dispatch_wait: Waitqueue to put requests when there is no tag
386 	 * available at the moment, to wait for another try in the future.
387 	 */
388 	wait_queue_entry_t	dispatch_wait;
389 
390 	/**
391 	 * @wait_index: Index of next available dispatch_wait queue to insert
392 	 * requests.
393 	 */
394 	atomic_t		wait_index;
395 
396 	/**
397 	 * @tags: Tags owned by the block driver. A tag at this set is only
398 	 * assigned when a request is dispatched from a hardware queue.
399 	 */
400 	struct blk_mq_tags	*tags;
401 	/**
402 	 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
403 	 * scheduler associated with a request queue, a tag is assigned when
404 	 * that request is allocated. Else, this member is not used.
405 	 */
406 	struct blk_mq_tags	*sched_tags;
407 
408 	/** @queued: Number of queued requests. */
409 	unsigned long		queued;
410 	/** @run: Number of dispatched requests. */
411 	unsigned long		run;
412 
413 	/** @numa_node: NUMA node the storage adapter has been connected to. */
414 	unsigned int		numa_node;
415 	/** @queue_num: Index of this hardware queue. */
416 	unsigned int		queue_num;
417 
418 	/**
419 	 * @nr_active: Number of active requests. Only used when a tag set is
420 	 * shared across request queues.
421 	 */
422 	atomic_t		nr_active;
423 
424 	/** @cpuhp_online: List to store request if CPU is going to die */
425 	struct hlist_node	cpuhp_online;
426 	/** @cpuhp_dead: List to store request if some CPU die. */
427 	struct hlist_node	cpuhp_dead;
428 	/** @kobj: Kernel object for sysfs. */
429 	struct kobject		kobj;
430 
431 #ifdef CONFIG_BLK_DEBUG_FS
432 	/**
433 	 * @debugfs_dir: debugfs directory for this hardware queue. Named
434 	 * as cpu<cpu_number>.
435 	 */
436 	struct dentry		*debugfs_dir;
437 	/** @sched_debugfs_dir:	debugfs directory for the scheduler. */
438 	struct dentry		*sched_debugfs_dir;
439 #endif
440 
441 	/**
442 	 * @hctx_list: if this hctx is not in use, this is an entry in
443 	 * q->unused_hctx_list.
444 	 */
445 	struct list_head	hctx_list;
446 
447 	ANDROID_KABI_RESERVE(1);
448 };
449 
450 /**
451  * struct blk_mq_queue_map - Map software queues to hardware queues
452  * @mq_map:       CPU ID to hardware queue index map. This is an array
453  *	with nr_cpu_ids elements. Each element has a value in the range
454  *	[@queue_offset, @queue_offset + @nr_queues).
455  * @nr_queues:    Number of hardware queues to map CPU IDs onto.
456  * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
457  *	driver to map each hardware queue type (enum hctx_type) onto a distinct
458  *	set of hardware queues.
459  */
460 struct blk_mq_queue_map {
461 	unsigned int *mq_map;
462 	unsigned int nr_queues;
463 	unsigned int queue_offset;
464 };
465 
466 /**
467  * enum hctx_type - Type of hardware queue
468  * @HCTX_TYPE_DEFAULT:	All I/O not otherwise accounted for.
469  * @HCTX_TYPE_READ:	Just for READ I/O.
470  * @HCTX_TYPE_POLL:	Polled I/O of any kind.
471  * @HCTX_MAX_TYPES:	Number of types of hctx.
472  */
473 enum hctx_type {
474 	HCTX_TYPE_DEFAULT,
475 	HCTX_TYPE_READ,
476 	HCTX_TYPE_POLL,
477 
478 	HCTX_MAX_TYPES,
479 };
480 
481 /**
482  * struct blk_mq_tag_set - tag set that can be shared between request queues
483  * @map:	   One or more ctx -> hctx mappings. One map exists for each
484  *		   hardware queue type (enum hctx_type) that the driver wishes
485  *		   to support. There are no restrictions on maps being of the
486  *		   same size, and it's perfectly legal to share maps between
487  *		   types.
488  * @nr_maps:	   Number of elements in the @map array. A number in the range
489  *		   [1, HCTX_MAX_TYPES].
490  * @ops:	   Pointers to functions that implement block driver behavior.
491  * @nr_hw_queues:  Number of hardware queues supported by the block driver that
492  *		   owns this data structure.
493  * @queue_depth:   Number of tags per hardware queue, reserved tags included.
494  * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
495  *		   allocations.
496  * @cmd_size:	   Number of additional bytes to allocate per request. The block
497  *		   driver owns these additional bytes.
498  * @numa_node:	   NUMA node the storage adapter has been connected to.
499  * @timeout:	   Request processing timeout in jiffies.
500  * @flags:	   Zero or more BLK_MQ_F_* flags.
501  * @driver_data:   Pointer to data owned by the block driver that created this
502  *		   tag set.
503  * @tags:	   Tag sets. One tag set per hardware queue. Has @nr_hw_queues
504  *		   elements.
505  * @shared_tags:
506  *		   Shared set of tags. Has @nr_hw_queues elements. If set,
507  *		   shared by all @tags.
508  * @tag_list_lock: Serializes tag_list accesses.
509  * @tag_list:	   List of the request queues that use this tag set. See also
510  *		   request_queue.tag_set_list.
511  */
512 struct blk_mq_tag_set {
513 	struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
514 	unsigned int		nr_maps;
515 	const struct blk_mq_ops	*ops;
516 	unsigned int		nr_hw_queues;
517 	unsigned int		queue_depth;
518 	unsigned int		reserved_tags;
519 	unsigned int		cmd_size;
520 	int			numa_node;
521 	unsigned int		timeout;
522 	unsigned int		flags;
523 	void			*driver_data;
524 
525 	struct blk_mq_tags	**tags;
526 
527 	struct blk_mq_tags	*shared_tags;
528 
529 	struct mutex		tag_list_lock;
530 	struct list_head	tag_list;
531 
532 	ANDROID_KABI_RESERVE(1);
533 };
534 
535 /**
536  * struct blk_mq_queue_data - Data about a request inserted in a queue
537  *
538  * @rq:   Request pointer.
539  * @last: If it is the last request in the queue.
540  */
541 struct blk_mq_queue_data {
542 	struct request *rq;
543 	bool last;
544 };
545 
546 typedef bool (busy_tag_iter_fn)(struct request *, void *);
547 
548 /**
549  * struct blk_mq_ops - Callback functions that implements block driver
550  * behaviour.
551  */
552 struct blk_mq_ops {
553 	/**
554 	 * @queue_rq: Queue a new request from block IO.
555 	 */
556 	blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
557 				 const struct blk_mq_queue_data *);
558 
559 	/**
560 	 * @commit_rqs: If a driver uses bd->last to judge when to submit
561 	 * requests to hardware, it must define this function. In case of errors
562 	 * that make us stop issuing further requests, this hook serves the
563 	 * purpose of kicking the hardware (which the last request otherwise
564 	 * would have done).
565 	 */
566 	void (*commit_rqs)(struct blk_mq_hw_ctx *);
567 
568 	/**
569 	 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
570 	 * that each request belongs to the same queue. If the driver doesn't
571 	 * empty the @rqlist completely, then the rest will be queued
572 	 * individually by the block layer upon return.
573 	 */
574 	void (*queue_rqs)(struct request **rqlist);
575 
576 	/**
577 	 * @get_budget: Reserve budget before queue request, once .queue_rq is
578 	 * run, it is driver's responsibility to release the
579 	 * reserved budget. Also we have to handle failure case
580 	 * of .get_budget for avoiding I/O deadlock.
581 	 */
582 	int (*get_budget)(struct request_queue *);
583 
584 	/**
585 	 * @put_budget: Release the reserved budget.
586 	 */
587 	void (*put_budget)(struct request_queue *, int);
588 
589 	/**
590 	 * @set_rq_budget_token: store rq's budget token
591 	 */
592 	void (*set_rq_budget_token)(struct request *, int);
593 	/**
594 	 * @get_rq_budget_token: retrieve rq's budget token
595 	 */
596 	int (*get_rq_budget_token)(struct request *);
597 
598 	/**
599 	 * @timeout: Called on request timeout.
600 	 */
601 	enum blk_eh_timer_return (*timeout)(struct request *);
602 
603 	/**
604 	 * @poll: Called to poll for completion of a specific tag.
605 	 */
606 	int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
607 
608 	/**
609 	 * @complete: Mark the request as complete.
610 	 */
611 	void (*complete)(struct request *);
612 
613 	/**
614 	 * @init_hctx: Called when the block layer side of a hardware queue has
615 	 * been set up, allowing the driver to allocate/init matching
616 	 * structures.
617 	 */
618 	int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
619 	/**
620 	 * @exit_hctx: Ditto for exit/teardown.
621 	 */
622 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
623 
624 	/**
625 	 * @init_request: Called for every command allocated by the block layer
626 	 * to allow the driver to set up driver specific data.
627 	 *
628 	 * Tag greater than or equal to queue_depth is for setting up
629 	 * flush request.
630 	 */
631 	int (*init_request)(struct blk_mq_tag_set *set, struct request *,
632 			    unsigned int, unsigned int);
633 	/**
634 	 * @exit_request: Ditto for exit/teardown.
635 	 */
636 	void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
637 			     unsigned int);
638 
639 	/**
640 	 * @cleanup_rq: Called before freeing one request which isn't completed
641 	 * yet, and usually for freeing the driver private data.
642 	 */
643 	void (*cleanup_rq)(struct request *);
644 
645 	/**
646 	 * @busy: If set, returns whether or not this queue currently is busy.
647 	 */
648 	bool (*busy)(struct request_queue *);
649 
650 	/**
651 	 * @map_queues: This allows drivers specify their own queue mapping by
652 	 * overriding the setup-time function that builds the mq_map.
653 	 */
654 	void (*map_queues)(struct blk_mq_tag_set *set);
655 
656 #ifdef CONFIG_BLK_DEBUG_FS
657 	/**
658 	 * @show_rq: Used by the debugfs implementation to show driver-specific
659 	 * information about a request.
660 	 */
661 	void (*show_rq)(struct seq_file *m, struct request *rq);
662 #endif
663 
664 	ANDROID_KABI_RESERVE(1);
665 };
666 
667 enum {
668 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
669 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
670 	/*
671 	 * Set when this device requires underlying blk-mq device for
672 	 * completing IO:
673 	 */
674 	BLK_MQ_F_STACKING	= 1 << 2,
675 	BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
676 	BLK_MQ_F_BLOCKING	= 1 << 5,
677 	/* Do not allow an I/O scheduler to be configured. */
678 	BLK_MQ_F_NO_SCHED	= 1 << 6,
679 	/*
680 	 * Select 'none' during queue registration in case of a single hwq
681 	 * or shared hwqs instead of 'mq-deadline'.
682 	 */
683 	BLK_MQ_F_NO_SCHED_BY_DEFAULT	= 1 << 7,
684 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
685 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
686 
687 	BLK_MQ_S_STOPPED	= 0,
688 	BLK_MQ_S_TAG_ACTIVE	= 1,
689 	BLK_MQ_S_SCHED_RESTART	= 2,
690 
691 	/* hw queue is inactive after all its CPUs become offline */
692 	BLK_MQ_S_INACTIVE	= 3,
693 
694 	BLK_MQ_MAX_DEPTH	= 10240,
695 
696 	BLK_MQ_CPU_WORK_BATCH	= 8,
697 };
698 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
699 	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
700 		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
701 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
702 	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
703 		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
704 
705 #define BLK_MQ_NO_HCTX_IDX	(-1U)
706 
707 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
708 		struct lock_class_key *lkclass);
709 #define blk_mq_alloc_disk(set, queuedata)				\
710 ({									\
711 	static struct lock_class_key __key;				\
712 									\
713 	__blk_mq_alloc_disk(set, queuedata, &__key);			\
714 })
715 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
716 		struct lock_class_key *lkclass);
717 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
718 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
719 		struct request_queue *q);
720 void blk_mq_destroy_queue(struct request_queue *);
721 
722 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
723 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
724 		const struct blk_mq_ops *ops, unsigned int queue_depth,
725 		unsigned int set_flags);
726 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
727 
728 void blk_mq_free_request(struct request *rq);
729 
730 bool blk_mq_queue_inflight(struct request_queue *q);
731 
732 enum {
733 	/* return when out of requests */
734 	BLK_MQ_REQ_NOWAIT	= (__force blk_mq_req_flags_t)(1 << 0),
735 	/* allocate from reserved pool */
736 	BLK_MQ_REQ_RESERVED	= (__force blk_mq_req_flags_t)(1 << 1),
737 	/* set RQF_PM */
738 	BLK_MQ_REQ_PM		= (__force blk_mq_req_flags_t)(1 << 2),
739 };
740 
741 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
742 		blk_mq_req_flags_t flags);
743 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
744 		blk_opf_t opf, blk_mq_req_flags_t flags,
745 		unsigned int hctx_idx);
746 
747 /*
748  * Tag address space map.
749  */
750 struct blk_mq_tags {
751 	unsigned int nr_tags;
752 	unsigned int nr_reserved_tags;
753 
754 	atomic_t active_queues;
755 
756 	struct sbitmap_queue bitmap_tags;
757 	struct sbitmap_queue breserved_tags;
758 
759 	struct request **rqs;
760 	struct request **static_rqs;
761 	struct list_head page_list;
762 
763 	/*
764 	 * used to clear request reference in rqs[] before freeing one
765 	 * request pool
766 	 */
767 	spinlock_t lock;
768 };
769 
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)770 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
771 					       unsigned int tag)
772 {
773 	if (tag < tags->nr_tags) {
774 		prefetch(tags->rqs[tag]);
775 		return tags->rqs[tag];
776 	}
777 
778 	return NULL;
779 }
780 
781 enum {
782 	BLK_MQ_UNIQUE_TAG_BITS = 16,
783 	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
784 };
785 
786 u32 blk_mq_unique_tag(struct request *rq);
787 
blk_mq_unique_tag_to_hwq(u32 unique_tag)788 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
789 {
790 	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
791 }
792 
blk_mq_unique_tag_to_tag(u32 unique_tag)793 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
794 {
795 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
796 }
797 
798 /**
799  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
800  * @rq: target request.
801  */
blk_mq_rq_state(struct request * rq)802 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
803 {
804 	return READ_ONCE(rq->state);
805 }
806 
blk_mq_request_started(struct request * rq)807 static inline int blk_mq_request_started(struct request *rq)
808 {
809 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
810 }
811 
blk_mq_request_completed(struct request * rq)812 static inline int blk_mq_request_completed(struct request *rq)
813 {
814 	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
815 }
816 
817 /*
818  *
819  * Set the state to complete when completing a request from inside ->queue_rq.
820  * This is used by drivers that want to ensure special complete actions that
821  * need access to the request are called on failure, e.g. by nvme for
822  * multipathing.
823  */
blk_mq_set_request_complete(struct request * rq)824 static inline void blk_mq_set_request_complete(struct request *rq)
825 {
826 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
827 }
828 
829 /*
830  * Complete the request directly instead of deferring it to softirq or
831  * completing it another CPU. Useful in preemptible instead of an interrupt.
832  */
blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq))833 static inline void blk_mq_complete_request_direct(struct request *rq,
834 		   void (*complete)(struct request *rq))
835 {
836 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
837 	complete(rq);
838 }
839 
840 void blk_mq_start_request(struct request *rq);
841 void blk_mq_end_request(struct request *rq, blk_status_t error);
842 void __blk_mq_end_request(struct request *rq, blk_status_t error);
843 void blk_mq_end_request_batch(struct io_comp_batch *ib);
844 
845 /*
846  * Only need start/end time stamping if we have iostat or
847  * blk stats enabled, or using an IO scheduler.
848  */
blk_mq_need_time_stamp(struct request * rq)849 static inline bool blk_mq_need_time_stamp(struct request *rq)
850 {
851 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
852 }
853 
blk_mq_is_reserved_rq(struct request * rq)854 static inline bool blk_mq_is_reserved_rq(struct request *rq)
855 {
856 	return rq->rq_flags & RQF_RESV;
857 }
858 
859 /*
860  * Batched completions only work when there is no I/O error and no special
861  * ->end_io handler.
862  */
blk_mq_add_to_batch(struct request * req,struct io_comp_batch * iob,int ioerror,void (* complete)(struct io_comp_batch *))863 static inline bool blk_mq_add_to_batch(struct request *req,
864 				       struct io_comp_batch *iob, int ioerror,
865 				       void (*complete)(struct io_comp_batch *))
866 {
867 	if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
868 			(req->end_io && !blk_rq_is_passthrough(req)))
869 		return false;
870 
871 	if (!iob->complete)
872 		iob->complete = complete;
873 	else if (iob->complete != complete)
874 		return false;
875 	iob->need_ts |= blk_mq_need_time_stamp(req);
876 	rq_list_add(&iob->req_list, req);
877 	return true;
878 }
879 
880 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
881 void blk_mq_kick_requeue_list(struct request_queue *q);
882 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
883 void blk_mq_complete_request(struct request *rq);
884 bool blk_mq_complete_request_remote(struct request *rq);
885 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
886 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
887 void blk_mq_stop_hw_queues(struct request_queue *q);
888 void blk_mq_start_hw_queues(struct request_queue *q);
889 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
890 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
891 void blk_mq_quiesce_queue(struct request_queue *q);
892 void blk_mq_wait_quiesce_done(struct request_queue *q);
893 void blk_mq_unquiesce_queue(struct request_queue *q);
894 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
895 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
896 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
897 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
898 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
899 		busy_tag_iter_fn *fn, void *priv);
900 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
901 void blk_mq_freeze_queue(struct request_queue *q);
902 void blk_mq_unfreeze_queue(struct request_queue *q);
903 void blk_freeze_queue_start(struct request_queue *q);
904 void blk_mq_freeze_queue_wait(struct request_queue *q);
905 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
906 				     unsigned long timeout);
907 
908 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
909 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
910 
911 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
912 
913 unsigned int blk_mq_rq_cpu(struct request *rq);
914 
915 bool __blk_should_fake_timeout(struct request_queue *q);
blk_should_fake_timeout(struct request_queue * q)916 static inline bool blk_should_fake_timeout(struct request_queue *q)
917 {
918 	if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
919 	    test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
920 		return __blk_should_fake_timeout(q);
921 	return false;
922 }
923 
924 /**
925  * blk_mq_rq_from_pdu - cast a PDU to a request
926  * @pdu: the PDU (Protocol Data Unit) to be casted
927  *
928  * Return: request
929  *
930  * Driver command data is immediately after the request. So subtract request
931  * size to get back to the original request.
932  */
blk_mq_rq_from_pdu(void * pdu)933 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
934 {
935 	return pdu - sizeof(struct request);
936 }
937 
938 /**
939  * blk_mq_rq_to_pdu - cast a request to a PDU
940  * @rq: the request to be casted
941  *
942  * Return: pointer to the PDU
943  *
944  * Driver command data is immediately after the request. So add request to get
945  * the PDU.
946  */
blk_mq_rq_to_pdu(struct request * rq)947 static inline void *blk_mq_rq_to_pdu(struct request *rq)
948 {
949 	return rq + 1;
950 }
951 
952 #define queue_for_each_hw_ctx(q, hctx, i)				\
953 	xa_for_each(&(q)->hctx_table, (i), (hctx))
954 
955 #define hctx_for_each_ctx(hctx, ctx, i)					\
956 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
957 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
958 
blk_mq_cleanup_rq(struct request * rq)959 static inline void blk_mq_cleanup_rq(struct request *rq)
960 {
961 	if (rq->q->mq_ops->cleanup_rq)
962 		rq->q->mq_ops->cleanup_rq(rq);
963 }
964 
blk_rq_bio_prep(struct request * rq,struct bio * bio,unsigned int nr_segs)965 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
966 		unsigned int nr_segs)
967 {
968 	rq->nr_phys_segments = nr_segs;
969 	rq->__data_len = bio->bi_iter.bi_size;
970 	rq->bio = rq->biotail = bio;
971 	rq->ioprio = bio_prio(bio);
972 }
973 
974 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
975 		struct lock_class_key *key);
976 
rq_is_sync(struct request * rq)977 static inline bool rq_is_sync(struct request *rq)
978 {
979 	return op_is_sync(rq->cmd_flags);
980 }
981 
982 void blk_rq_init(struct request_queue *q, struct request *rq);
983 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
984 		struct bio_set *bs, gfp_t gfp_mask,
985 		int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
986 void blk_rq_unprep_clone(struct request *rq);
987 blk_status_t blk_insert_cloned_request(struct request *rq);
988 
989 struct rq_map_data {
990 	struct page **pages;
991 	unsigned long offset;
992 	unsigned short page_order;
993 	unsigned short nr_entries;
994 	bool null_mapped;
995 	bool from_user;
996 };
997 
998 int blk_rq_map_user(struct request_queue *, struct request *,
999 		struct rq_map_data *, void __user *, unsigned long, gfp_t);
1000 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1001 		void __user *, unsigned long, gfp_t, bool, int, bool, int);
1002 int blk_rq_map_user_iov(struct request_queue *, struct request *,
1003 		struct rq_map_data *, const struct iov_iter *, gfp_t);
1004 int blk_rq_unmap_user(struct bio *);
1005 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1006 		unsigned int, gfp_t);
1007 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1008 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1009 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1010 bool blk_rq_is_poll(struct request *rq);
1011 
1012 struct req_iterator {
1013 	struct bvec_iter iter;
1014 	struct bio *bio;
1015 };
1016 
1017 #define __rq_for_each_bio(_bio, rq)	\
1018 	if ((rq->bio))			\
1019 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1020 
1021 #define rq_for_each_segment(bvl, _rq, _iter)			\
1022 	__rq_for_each_bio(_iter.bio, _rq)			\
1023 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1024 
1025 #define rq_for_each_bvec(bvl, _rq, _iter)			\
1026 	__rq_for_each_bio(_iter.bio, _rq)			\
1027 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1028 
1029 #define rq_iter_last(bvec, _iter)				\
1030 		(_iter.bio->bi_next == NULL &&			\
1031 		 bio_iter_last(bvec, _iter.iter))
1032 
1033 /*
1034  * blk_rq_pos()			: the current sector
1035  * blk_rq_bytes()		: bytes left in the entire request
1036  * blk_rq_cur_bytes()		: bytes left in the current segment
1037  * blk_rq_sectors()		: sectors left in the entire request
1038  * blk_rq_cur_sectors()		: sectors left in the current segment
1039  * blk_rq_stats_sectors()	: sectors of the entire request used for stats
1040  */
blk_rq_pos(const struct request * rq)1041 static inline sector_t blk_rq_pos(const struct request *rq)
1042 {
1043 	return rq->__sector;
1044 }
1045 
blk_rq_bytes(const struct request * rq)1046 static inline unsigned int blk_rq_bytes(const struct request *rq)
1047 {
1048 	return rq->__data_len;
1049 }
1050 
blk_rq_cur_bytes(const struct request * rq)1051 static inline int blk_rq_cur_bytes(const struct request *rq)
1052 {
1053 	if (!rq->bio)
1054 		return 0;
1055 	if (!bio_has_data(rq->bio))	/* dataless requests such as discard */
1056 		return rq->bio->bi_iter.bi_size;
1057 	return bio_iovec(rq->bio).bv_len;
1058 }
1059 
blk_rq_sectors(const struct request * rq)1060 static inline unsigned int blk_rq_sectors(const struct request *rq)
1061 {
1062 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1063 }
1064 
blk_rq_cur_sectors(const struct request * rq)1065 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1066 {
1067 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1068 }
1069 
blk_rq_stats_sectors(const struct request * rq)1070 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1071 {
1072 	return rq->stats_sectors;
1073 }
1074 
1075 /*
1076  * Some commands like WRITE SAME have a payload or data transfer size which
1077  * is different from the size of the request.  Any driver that supports such
1078  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1079  * calculate the data transfer size.
1080  */
blk_rq_payload_bytes(struct request * rq)1081 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1082 {
1083 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1084 		return rq->special_vec.bv_len;
1085 	return blk_rq_bytes(rq);
1086 }
1087 
1088 /*
1089  * Return the first full biovec in the request.  The caller needs to check that
1090  * there are any bvecs before calling this helper.
1091  */
req_bvec(struct request * rq)1092 static inline struct bio_vec req_bvec(struct request *rq)
1093 {
1094 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1095 		return rq->special_vec;
1096 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1097 }
1098 
blk_rq_count_bios(struct request * rq)1099 static inline unsigned int blk_rq_count_bios(struct request *rq)
1100 {
1101 	unsigned int nr_bios = 0;
1102 	struct bio *bio;
1103 
1104 	__rq_for_each_bio(bio, rq)
1105 		nr_bios++;
1106 
1107 	return nr_bios;
1108 }
1109 
1110 void blk_steal_bios(struct bio_list *list, struct request *rq);
1111 
1112 /*
1113  * Request completion related functions.
1114  *
1115  * blk_update_request() completes given number of bytes and updates
1116  * the request without completing it.
1117  */
1118 bool blk_update_request(struct request *rq, blk_status_t error,
1119 			       unsigned int nr_bytes);
1120 void blk_abort_request(struct request *);
1121 
1122 /*
1123  * Number of physical segments as sent to the device.
1124  *
1125  * Normally this is the number of discontiguous data segments sent by the
1126  * submitter.  But for data-less command like discard we might have no
1127  * actual data segments submitted, but the driver might have to add it's
1128  * own special payload.  In that case we still return 1 here so that this
1129  * special payload will be mapped.
1130  */
blk_rq_nr_phys_segments(struct request * rq)1131 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1132 {
1133 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1134 		return 1;
1135 	return rq->nr_phys_segments;
1136 }
1137 
1138 /*
1139  * Number of discard segments (or ranges) the driver needs to fill in.
1140  * Each discard bio merged into a request is counted as one segment.
1141  */
blk_rq_nr_discard_segments(struct request * rq)1142 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1143 {
1144 	return max_t(unsigned short, rq->nr_phys_segments, 1);
1145 }
1146 
1147 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1148 		struct scatterlist *sglist, struct scatterlist **last_sg);
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)1149 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1150 		struct scatterlist *sglist)
1151 {
1152 	struct scatterlist *last_sg = NULL;
1153 
1154 	return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1155 }
1156 void blk_dump_rq_flags(struct request *, char *);
1157 
1158 #ifdef CONFIG_BLK_DEV_ZONED
blk_rq_zone_no(struct request * rq)1159 static inline unsigned int blk_rq_zone_no(struct request *rq)
1160 {
1161 	return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
1162 }
1163 
blk_rq_zone_is_seq(struct request * rq)1164 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1165 {
1166 	return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
1167 }
1168 
1169 bool blk_req_needs_zone_write_lock(struct request *rq);
1170 bool blk_req_zone_write_trylock(struct request *rq);
1171 void __blk_req_zone_write_lock(struct request *rq);
1172 void __blk_req_zone_write_unlock(struct request *rq);
1173 
blk_req_zone_write_lock(struct request * rq)1174 static inline void blk_req_zone_write_lock(struct request *rq)
1175 {
1176 	if (blk_req_needs_zone_write_lock(rq))
1177 		__blk_req_zone_write_lock(rq);
1178 }
1179 
blk_req_zone_write_unlock(struct request * rq)1180 static inline void blk_req_zone_write_unlock(struct request *rq)
1181 {
1182 	if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1183 		__blk_req_zone_write_unlock(rq);
1184 }
1185 
blk_req_zone_is_write_locked(struct request * rq)1186 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1187 {
1188 	return rq->q->disk->seq_zones_wlock &&
1189 		test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
1190 }
1191 
blk_req_can_dispatch_to_zone(struct request * rq)1192 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1193 {
1194 	if (!blk_req_needs_zone_write_lock(rq))
1195 		return true;
1196 	return !blk_req_zone_is_write_locked(rq);
1197 }
1198 #else /* CONFIG_BLK_DEV_ZONED */
blk_req_needs_zone_write_lock(struct request * rq)1199 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1200 {
1201 	return false;
1202 }
1203 
blk_req_zone_write_lock(struct request * rq)1204 static inline void blk_req_zone_write_lock(struct request *rq)
1205 {
1206 }
1207 
blk_req_zone_write_unlock(struct request * rq)1208 static inline void blk_req_zone_write_unlock(struct request *rq)
1209 {
1210 }
blk_req_zone_is_write_locked(struct request * rq)1211 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1212 {
1213 	return false;
1214 }
1215 
blk_req_can_dispatch_to_zone(struct request * rq)1216 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1217 {
1218 	return true;
1219 }
1220 #endif /* CONFIG_BLK_DEV_ZONED */
1221 
1222 #endif /* BLK_MQ_H */
1223