1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4 
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/srcu.h>
11 #include <linux/rw_hint.h>
12 #include <linux/android_kabi.h>
13 
14 struct blk_mq_tags;
15 struct blk_flush_queue;
16 
17 #define BLKDEV_MIN_RQ	4
18 #define BLKDEV_DEFAULT_RQ	128
19 
20 enum rq_end_io_ret {
21 	RQ_END_IO_NONE,
22 	RQ_END_IO_FREE,
23 };
24 
25 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
26 
27 /*
28  * request flags */
29 typedef __u32 __bitwise req_flags_t;
30 
31 /* Keep rqf_name[] in sync with the definitions below */
32 enum {
33 	/* drive already may have started this one */
34 	__RQF_STARTED,
35 	/* request for flush sequence */
36 	__RQF_FLUSH_SEQ,
37 	/* merge of different types, fail separately */
38 	__RQF_MIXED_MERGE,
39 	/* don't call prep for this one */
40 	__RQF_DONTPREP,
41 	/* use hctx->sched_tags */
42 	__RQF_SCHED_TAGS,
43 	/* use an I/O scheduler for this request */
44 	__RQF_USE_SCHED,
45 	/* vaguely specified driver internal error.  Ignored by block layer */
46 	__RQF_FAILED,
47 	/* don't warn about errors */
48 	__RQF_QUIET,
49 	/* account into disk and partition IO statistics */
50 	__RQF_IO_STAT,
51 	/* runtime pm request */
52 	__RQF_PM,
53 	/* on IO scheduler merge hash */
54 	__RQF_HASHED,
55 	/* track IO completion time */
56 	__RQF_STATS,
57 	/* Look at ->special_vec for the actual data payload instead of the
58 	   bio chain. */
59 	__RQF_SPECIAL_PAYLOAD,
60 	/* request completion needs to be signaled to zone write plugging. */
61 	__RQF_ZONE_WRITE_PLUGGING,
62 	/* ->timeout has been called, don't expire again */
63 	__RQF_TIMED_OUT,
64 	__RQF_RESV,
65 	__RQF_BITS
66 };
67 
68 #define RQF_STARTED		((__force req_flags_t)(1 << __RQF_STARTED))
69 #define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
70 #define RQF_MIXED_MERGE		((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
71 #define RQF_DONTPREP		((__force req_flags_t)(1 << __RQF_DONTPREP))
72 #define RQF_SCHED_TAGS		((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
73 #define RQF_USE_SCHED		((__force req_flags_t)(1 << __RQF_USE_SCHED))
74 #define RQF_FAILED		((__force req_flags_t)(1 << __RQF_FAILED))
75 #define RQF_QUIET		((__force req_flags_t)(1 << __RQF_QUIET))
76 #define RQF_IO_STAT		((__force req_flags_t)(1 << __RQF_IO_STAT))
77 #define RQF_PM			((__force req_flags_t)(1 << __RQF_PM))
78 #define RQF_HASHED		((__force req_flags_t)(1 << __RQF_HASHED))
79 #define RQF_STATS		((__force req_flags_t)(1 << __RQF_STATS))
80 #define RQF_SPECIAL_PAYLOAD	\
81 			((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
82 #define RQF_ZONE_WRITE_PLUGGING	\
83 			((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
84 #define RQF_TIMED_OUT		((__force req_flags_t)(1 << __RQF_TIMED_OUT))
85 #define RQF_RESV		((__force req_flags_t)(1 << __RQF_RESV))
86 
87 /* flags that prevent us from merging requests: */
88 #define RQF_NOMERGE_FLAGS \
89 	(RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
90 
91 enum mq_rq_state {
92 	MQ_RQ_IDLE		= 0,
93 	MQ_RQ_IN_FLIGHT		= 1,
94 	MQ_RQ_COMPLETE		= 2,
95 };
96 
97 /*
98  * Try to put the fields that are referenced together in the same cacheline.
99  *
100  * If you modify this structure, make sure to update blk_rq_init() and
101  * especially blk_mq_rq_ctx_init() to take care of the added fields.
102  */
103 struct request {
104 	struct request_queue *q;
105 	struct blk_mq_ctx *mq_ctx;
106 	struct blk_mq_hw_ctx *mq_hctx;
107 
108 	blk_opf_t cmd_flags;		/* op and common flags */
109 	req_flags_t rq_flags;
110 
111 	int tag;
112 	int internal_tag;
113 
114 	unsigned int timeout;
115 
116 	/* the following two fields are internal, NEVER access directly */
117 	unsigned int __data_len;	/* total data len */
118 	sector_t __sector;		/* sector cursor */
119 
120 	struct bio *bio;
121 	struct bio *biotail;
122 
123 	union {
124 		struct list_head queuelist;
125 		struct request *rq_next;
126 	};
127 
128 	struct block_device *part;
129 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
130 	/* Time that the first bio started allocating this request. */
131 	u64 alloc_time_ns;
132 #endif
133 	/* Time that this request was allocated for this IO. */
134 	u64 start_time_ns;
135 	/* Time that I/O was submitted to the device. */
136 	u64 io_start_time_ns;
137 
138 #ifdef CONFIG_BLK_WBT
139 	unsigned short wbt_flags;
140 #endif
141 	/*
142 	 * rq sectors used for blk stats. It has the same value
143 	 * with blk_rq_sectors(rq), except that it never be zeroed
144 	 * by completion.
145 	 */
146 	unsigned short stats_sectors;
147 
148 	/*
149 	 * Number of scatter-gather DMA addr+len pairs after
150 	 * physical address coalescing is performed.
151 	 */
152 	unsigned short nr_phys_segments;
153 	unsigned short nr_integrity_segments;
154 
155 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
156 	struct bio_crypt_ctx *crypt_ctx;
157 	struct blk_crypto_keyslot *crypt_keyslot;
158 #endif
159 
160 	enum rw_hint write_hint;
161 	unsigned short ioprio;
162 
163 	enum mq_rq_state state;
164 	atomic_t ref;
165 
166 	unsigned long deadline;
167 
168 	/*
169 	 * The hash is used inside the scheduler, and killed once the
170 	 * request reaches the dispatch list. The ipi_list is only used
171 	 * to queue the request for softirq completion, which is long
172 	 * after the request has been unhashed (and even removed from
173 	 * the dispatch list).
174 	 */
175 	union {
176 		struct hlist_node hash;	/* merge hash */
177 		struct llist_node ipi_list;
178 	};
179 
180 	/*
181 	 * The rb_node is only used inside the io scheduler, requests
182 	 * are pruned when moved to the dispatch queue. special_vec must
183 	 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
184 	 * insert into an IO scheduler.
185 	 */
186 	union {
187 		struct rb_node rb_node;	/* sort/lookup */
188 		struct bio_vec special_vec;
189 	};
190 
191 	/*
192 	 * Three pointers are available for the IO schedulers, if they need
193 	 * more they have to dynamically allocate it.
194 	 */
195 	struct {
196 		struct io_cq		*icq;
197 		void			*priv[2];
198 	} elv;
199 
200 	struct {
201 		unsigned int		seq;
202 		rq_end_io_fn		*saved_end_io;
203 	} flush;
204 
205 	u64 fifo_time;
206 
207 	/*
208 	 * completion callback.
209 	 */
210 	rq_end_io_fn *end_io;
211 	void *end_io_data;
212 
213 	ANDROID_KABI_RESERVE(1);
214 	ANDROID_OEM_DATA(1);
215 };
216 
req_op(const struct request * req)217 static inline enum req_op req_op(const struct request *req)
218 {
219 	return req->cmd_flags & REQ_OP_MASK;
220 }
221 
blk_rq_is_passthrough(struct request * rq)222 static inline bool blk_rq_is_passthrough(struct request *rq)
223 {
224 	return blk_op_is_passthrough(rq->cmd_flags);
225 }
226 
req_get_ioprio(struct request * req)227 static inline unsigned short req_get_ioprio(struct request *req)
228 {
229 	return req->ioprio;
230 }
231 
232 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
233 
234 #define rq_dma_dir(rq) \
235 	(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
236 
rq_list_empty(const struct rq_list * rl)237 static inline int rq_list_empty(const struct rq_list *rl)
238 {
239 	return rl->head == NULL;
240 }
241 
rq_list_init(struct rq_list * rl)242 static inline void rq_list_init(struct rq_list *rl)
243 {
244 	rl->head = NULL;
245 	rl->tail = NULL;
246 }
247 
rq_list_add_tail(struct rq_list * rl,struct request * rq)248 static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
249 {
250 	rq->rq_next = NULL;
251 	if (rl->tail)
252 		rl->tail->rq_next = rq;
253 	else
254 		rl->head = rq;
255 	rl->tail = rq;
256 }
257 
rq_list_add_head(struct rq_list * rl,struct request * rq)258 static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
259 {
260 	rq->rq_next = rl->head;
261 	rl->head = rq;
262 	if (!rl->tail)
263 		rl->tail = rq;
264 }
265 
rq_list_pop(struct rq_list * rl)266 static inline struct request *rq_list_pop(struct rq_list *rl)
267 {
268 	struct request *rq = rl->head;
269 
270 	if (rq) {
271 		rl->head = rl->head->rq_next;
272 		if (!rl->head)
273 			rl->tail = NULL;
274 		rq->rq_next = NULL;
275 	}
276 
277 	return rq;
278 }
279 
rq_list_peek(struct rq_list * rl)280 static inline struct request *rq_list_peek(struct rq_list *rl)
281 {
282 	return rl->head;
283 }
284 
285 #define rq_list_for_each(rl, pos)					\
286 	for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
287 
288 #define rq_list_for_each_safe(rl, pos, nxt)				\
289 	for (pos = rq_list_peek((rl)), nxt = pos->rq_next;		\
290 		pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
291 
292 /**
293  * enum blk_eh_timer_return - How the timeout handler should proceed
294  * @BLK_EH_DONE: The block driver completed the command or will complete it at
295  *	a later time.
296  * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
297  *	request to complete.
298  */
299 enum blk_eh_timer_return {
300 	BLK_EH_DONE,
301 	BLK_EH_RESET_TIMER,
302 };
303 
304 /* Keep alloc_policy_name[] in sync with the definitions below */
305 enum {
306 	BLK_TAG_ALLOC_FIFO,	/* allocate starting from 0 */
307 	BLK_TAG_ALLOC_RR,	/* allocate starting from last allocated tag */
308 	BLK_TAG_ALLOC_MAX
309 };
310 
311 /**
312  * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
313  * block device
314  */
315 struct blk_mq_hw_ctx {
316 	struct {
317 		/** @lock: Protects the dispatch list. */
318 		spinlock_t		lock;
319 		/**
320 		 * @dispatch: Used for requests that are ready to be
321 		 * dispatched to the hardware but for some reason (e.g. lack of
322 		 * resources) could not be sent to the hardware. As soon as the
323 		 * driver can send new requests, requests at this list will
324 		 * be sent first for a fairer dispatch.
325 		 */
326 		struct list_head	dispatch;
327 		 /**
328 		  * @state: BLK_MQ_S_* flags. Defines the state of the hw
329 		  * queue (active, scheduled to restart, stopped).
330 		  */
331 		unsigned long		state;
332 	} ____cacheline_aligned_in_smp;
333 
334 	/**
335 	 * @run_work: Used for scheduling a hardware queue run at a later time.
336 	 */
337 	struct delayed_work	run_work;
338 	/** @cpumask: Map of available CPUs where this hctx can run. */
339 	cpumask_var_t		cpumask;
340 	/**
341 	 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
342 	 * selection from @cpumask.
343 	 */
344 	int			next_cpu;
345 	/**
346 	 * @next_cpu_batch: Counter of how many works left in the batch before
347 	 * changing to the next CPU.
348 	 */
349 	int			next_cpu_batch;
350 
351 	/** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
352 	unsigned long		flags;
353 
354 	/**
355 	 * @sched_data: Pointer owned by the IO scheduler attached to a request
356 	 * queue. It's up to the IO scheduler how to use this pointer.
357 	 */
358 	void			*sched_data;
359 	/**
360 	 * @queue: Pointer to the request queue that owns this hardware context.
361 	 */
362 	struct request_queue	*queue;
363 	/** @fq: Queue of requests that need to perform a flush operation. */
364 	struct blk_flush_queue	*fq;
365 
366 	/**
367 	 * @driver_data: Pointer to data owned by the block driver that created
368 	 * this hctx
369 	 */
370 	void			*driver_data;
371 
372 	/**
373 	 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
374 	 * pending request in that software queue.
375 	 */
376 	struct sbitmap		ctx_map;
377 
378 	/**
379 	 * @dispatch_from: Software queue to be used when no scheduler was
380 	 * selected.
381 	 */
382 	struct blk_mq_ctx	*dispatch_from;
383 	/**
384 	 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
385 	 * decide if the hw_queue is busy using Exponential Weighted Moving
386 	 * Average algorithm.
387 	 */
388 	unsigned int		dispatch_busy;
389 
390 	/** @type: HCTX_TYPE_* flags. Type of hardware queue. */
391 	unsigned short		type;
392 	/** @nr_ctx: Number of software queues. */
393 	unsigned short		nr_ctx;
394 	/** @ctxs: Array of software queues. */
395 	struct blk_mq_ctx	**ctxs;
396 
397 	/** @dispatch_wait_lock: Lock for dispatch_wait queue. */
398 	spinlock_t		dispatch_wait_lock;
399 	/**
400 	 * @dispatch_wait: Waitqueue to put requests when there is no tag
401 	 * available at the moment, to wait for another try in the future.
402 	 */
403 	wait_queue_entry_t	dispatch_wait;
404 
405 	/**
406 	 * @wait_index: Index of next available dispatch_wait queue to insert
407 	 * requests.
408 	 */
409 	atomic_t		wait_index;
410 
411 	/**
412 	 * @tags: Tags owned by the block driver. A tag at this set is only
413 	 * assigned when a request is dispatched from a hardware queue.
414 	 */
415 	struct blk_mq_tags	*tags;
416 	/**
417 	 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
418 	 * scheduler associated with a request queue, a tag is assigned when
419 	 * that request is allocated. Else, this member is not used.
420 	 */
421 	struct blk_mq_tags	*sched_tags;
422 
423 	/** @numa_node: NUMA node the storage adapter has been connected to. */
424 	unsigned int		numa_node;
425 	/** @queue_num: Index of this hardware queue. */
426 	unsigned int		queue_num;
427 
428 	/**
429 	 * @nr_active: Number of active requests. Only used when a tag set is
430 	 * shared across request queues.
431 	 */
432 	atomic_t		nr_active;
433 
434 	/** @cpuhp_online: List to store request if CPU is going to die */
435 	struct hlist_node	cpuhp_online;
436 	/** @cpuhp_dead: List to store request if some CPU die. */
437 	struct hlist_node	cpuhp_dead;
438 	/** @kobj: Kernel object for sysfs. */
439 	struct kobject		kobj;
440 
441 #ifdef CONFIG_BLK_DEBUG_FS
442 	/**
443 	 * @debugfs_dir: debugfs directory for this hardware queue. Named
444 	 * as cpu<cpu_number>.
445 	 */
446 	struct dentry		*debugfs_dir;
447 	/** @sched_debugfs_dir:	debugfs directory for the scheduler. */
448 	struct dentry		*sched_debugfs_dir;
449 #endif
450 
451 	/**
452 	 * @hctx_list: if this hctx is not in use, this is an entry in
453 	 * q->unused_hctx_list.
454 	 */
455 	struct list_head	hctx_list;
456 
457 	ANDROID_KABI_RESERVE(1);
458 };
459 
460 /**
461  * struct blk_mq_queue_map - Map software queues to hardware queues
462  * @mq_map:       CPU ID to hardware queue index map. This is an array
463  *	with nr_cpu_ids elements. Each element has a value in the range
464  *	[@queue_offset, @queue_offset + @nr_queues).
465  * @nr_queues:    Number of hardware queues to map CPU IDs onto.
466  * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
467  *	driver to map each hardware queue type (enum hctx_type) onto a distinct
468  *	set of hardware queues.
469  */
470 struct blk_mq_queue_map {
471 	unsigned int *mq_map;
472 	unsigned int nr_queues;
473 	unsigned int queue_offset;
474 };
475 
476 /**
477  * enum hctx_type - Type of hardware queue
478  * @HCTX_TYPE_DEFAULT:	All I/O not otherwise accounted for.
479  * @HCTX_TYPE_READ:	Just for READ I/O.
480  * @HCTX_TYPE_POLL:	Polled I/O of any kind.
481  * @HCTX_MAX_TYPES:	Number of types of hctx.
482  */
483 enum hctx_type {
484 	HCTX_TYPE_DEFAULT,
485 	HCTX_TYPE_READ,
486 	HCTX_TYPE_POLL,
487 
488 	HCTX_MAX_TYPES,
489 };
490 
491 /**
492  * struct blk_mq_tag_set - tag set that can be shared between request queues
493  * @ops:	   Pointers to functions that implement block driver behavior.
494  * @map:	   One or more ctx -> hctx mappings. One map exists for each
495  *		   hardware queue type (enum hctx_type) that the driver wishes
496  *		   to support. There are no restrictions on maps being of the
497  *		   same size, and it's perfectly legal to share maps between
498  *		   types.
499  * @nr_maps:	   Number of elements in the @map array. A number in the range
500  *		   [1, HCTX_MAX_TYPES].
501  * @nr_hw_queues:  Number of hardware queues supported by the block driver that
502  *		   owns this data structure.
503  * @queue_depth:   Number of tags per hardware queue, reserved tags included.
504  * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
505  *		   allocations.
506  * @cmd_size:	   Number of additional bytes to allocate per request. The block
507  *		   driver owns these additional bytes.
508  * @numa_node:	   NUMA node the storage adapter has been connected to.
509  * @timeout:	   Request processing timeout in jiffies.
510  * @flags:	   Zero or more BLK_MQ_F_* flags.
511  * @driver_data:   Pointer to data owned by the block driver that created this
512  *		   tag set.
513  * @tags:	   Tag sets. One tag set per hardware queue. Has @nr_hw_queues
514  *		   elements.
515  * @shared_tags:
516  *		   Shared set of tags. Has @nr_hw_queues elements. If set,
517  *		   shared by all @tags.
518  * @tag_list_lock: Serializes tag_list accesses.
519  * @tag_list:	   List of the request queues that use this tag set. See also
520  *		   request_queue.tag_set_list.
521  * @srcu:	   Use as lock when type of the request queue is blocking
522  *		   (BLK_MQ_F_BLOCKING).
523  */
524 struct blk_mq_tag_set {
525 	const struct blk_mq_ops	*ops;
526 	struct blk_mq_queue_map	map[HCTX_MAX_TYPES];
527 	unsigned int		nr_maps;
528 	unsigned int		nr_hw_queues;
529 	unsigned int		queue_depth;
530 	unsigned int		reserved_tags;
531 	unsigned int		cmd_size;
532 	int			numa_node;
533 	unsigned int		timeout;
534 	unsigned int		flags;
535 	void			*driver_data;
536 
537 	struct blk_mq_tags	**tags;
538 
539 	struct blk_mq_tags	*shared_tags;
540 
541 	struct mutex		tag_list_lock;
542 	struct list_head	tag_list;
543 	struct srcu_struct	*srcu;
544 
545 	ANDROID_KABI_RESERVE(1);
546 };
547 
548 /**
549  * struct blk_mq_queue_data - Data about a request inserted in a queue
550  *
551  * @rq:   Request pointer.
552  * @last: If it is the last request in the queue.
553  */
554 struct blk_mq_queue_data {
555 	struct request *rq;
556 	bool last;
557 };
558 
559 typedef bool (busy_tag_iter_fn)(struct request *, void *);
560 
561 /**
562  * struct blk_mq_ops - Callback functions that implements block driver
563  * behaviour.
564  */
565 struct blk_mq_ops {
566 	/**
567 	 * @queue_rq: Queue a new request from block IO.
568 	 */
569 	blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
570 				 const struct blk_mq_queue_data *);
571 
572 	/**
573 	 * @commit_rqs: If a driver uses bd->last to judge when to submit
574 	 * requests to hardware, it must define this function. In case of errors
575 	 * that make us stop issuing further requests, this hook serves the
576 	 * purpose of kicking the hardware (which the last request otherwise
577 	 * would have done).
578 	 */
579 	void (*commit_rqs)(struct blk_mq_hw_ctx *);
580 
581 	/**
582 	 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
583 	 * that each request belongs to the same queue. If the driver doesn't
584 	 * empty the @rqlist completely, then the rest will be queued
585 	 * individually by the block layer upon return.
586 	 */
587 	void (*queue_rqs)(struct rq_list *rqlist);
588 
589 	/**
590 	 * @get_budget: Reserve budget before queue request, once .queue_rq is
591 	 * run, it is driver's responsibility to release the
592 	 * reserved budget. Also we have to handle failure case
593 	 * of .get_budget for avoiding I/O deadlock.
594 	 */
595 	int (*get_budget)(struct request_queue *);
596 
597 	/**
598 	 * @put_budget: Release the reserved budget.
599 	 */
600 	void (*put_budget)(struct request_queue *, int);
601 
602 	/**
603 	 * @set_rq_budget_token: store rq's budget token
604 	 */
605 	void (*set_rq_budget_token)(struct request *, int);
606 	/**
607 	 * @get_rq_budget_token: retrieve rq's budget token
608 	 */
609 	int (*get_rq_budget_token)(struct request *);
610 
611 	/**
612 	 * @timeout: Called on request timeout.
613 	 */
614 	enum blk_eh_timer_return (*timeout)(struct request *);
615 
616 	/**
617 	 * @poll: Called to poll for completion of a specific tag.
618 	 */
619 	int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
620 
621 	/**
622 	 * @complete: Mark the request as complete.
623 	 */
624 	void (*complete)(struct request *);
625 
626 	/**
627 	 * @init_hctx: Called when the block layer side of a hardware queue has
628 	 * been set up, allowing the driver to allocate/init matching
629 	 * structures.
630 	 */
631 	int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
632 	/**
633 	 * @exit_hctx: Ditto for exit/teardown.
634 	 */
635 	void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
636 
637 	/**
638 	 * @init_request: Called for every command allocated by the block layer
639 	 * to allow the driver to set up driver specific data.
640 	 *
641 	 * Tag greater than or equal to queue_depth is for setting up
642 	 * flush request.
643 	 */
644 	int (*init_request)(struct blk_mq_tag_set *set, struct request *,
645 			    unsigned int, unsigned int);
646 	/**
647 	 * @exit_request: Ditto for exit/teardown.
648 	 */
649 	void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
650 			     unsigned int);
651 
652 	/**
653 	 * @cleanup_rq: Called before freeing one request which isn't completed
654 	 * yet, and usually for freeing the driver private data.
655 	 */
656 	void (*cleanup_rq)(struct request *);
657 
658 	/**
659 	 * @busy: If set, returns whether or not this queue currently is busy.
660 	 */
661 	bool (*busy)(struct request_queue *);
662 
663 	/**
664 	 * @map_queues: This allows drivers specify their own queue mapping by
665 	 * overriding the setup-time function that builds the mq_map.
666 	 */
667 	void (*map_queues)(struct blk_mq_tag_set *set);
668 
669 #ifdef CONFIG_BLK_DEBUG_FS
670 	/**
671 	 * @show_rq: Used by the debugfs implementation to show driver-specific
672 	 * information about a request.
673 	 */
674 	void (*show_rq)(struct seq_file *m, struct request *rq);
675 #endif
676 
677 	ANDROID_KABI_RESERVE(1);
678 };
679 
680 /* Keep hctx_flag_name[] in sync with the definitions below */
681 enum {
682 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
683 	BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
684 	/*
685 	 * Set when this device requires underlying blk-mq device for
686 	 * completing IO:
687 	 */
688 	BLK_MQ_F_STACKING	= 1 << 2,
689 	BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
690 	BLK_MQ_F_BLOCKING	= 1 << 4,
691 	/* Do not allow an I/O scheduler to be configured. */
692 	BLK_MQ_F_NO_SCHED	= 1 << 5,
693 
694 	/*
695 	 * Select 'none' during queue registration in case of a single hwq
696 	 * or shared hwqs instead of 'mq-deadline'.
697 	 */
698 	BLK_MQ_F_NO_SCHED_BY_DEFAULT	= 1 << 6,
699 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 7,
700 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
701 };
702 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
703 	((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
704 		((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
705 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
706 	((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
707 		<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
708 
709 #define BLK_MQ_MAX_DEPTH	(10240)
710 #define BLK_MQ_NO_HCTX_IDX	(-1U)
711 
712 enum {
713 	/* Keep hctx_state_name[] in sync with the definitions below */
714 	BLK_MQ_S_STOPPED,
715 	BLK_MQ_S_TAG_ACTIVE,
716 	BLK_MQ_S_SCHED_RESTART,
717 	/* hw queue is inactive after all its CPUs become offline */
718 	BLK_MQ_S_INACTIVE,
719 	BLK_MQ_S_MAX
720 };
721 
722 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
723 		struct queue_limits *lim, void *queuedata,
724 		struct lock_class_key *lkclass);
725 #define blk_mq_alloc_disk(set, lim, queuedata)				\
726 ({									\
727 	static struct lock_class_key __key;				\
728 									\
729 	__blk_mq_alloc_disk(set, lim, queuedata, &__key);		\
730 })
731 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
732 		struct lock_class_key *lkclass);
733 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
734 		struct queue_limits *lim, void *queuedata);
735 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
736 		struct request_queue *q);
737 void blk_mq_destroy_queue(struct request_queue *);
738 
739 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
740 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
741 		const struct blk_mq_ops *ops, unsigned int queue_depth,
742 		unsigned int set_flags);
743 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
744 
745 void blk_mq_free_request(struct request *rq);
746 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
747 		unsigned int poll_flags);
748 
749 bool blk_mq_queue_inflight(struct request_queue *q);
750 
751 enum {
752 	/* return when out of requests */
753 	BLK_MQ_REQ_NOWAIT	= (__force blk_mq_req_flags_t)(1 << 0),
754 	/* allocate from reserved pool */
755 	BLK_MQ_REQ_RESERVED	= (__force blk_mq_req_flags_t)(1 << 1),
756 	/* set RQF_PM */
757 	BLK_MQ_REQ_PM		= (__force blk_mq_req_flags_t)(1 << 2),
758 };
759 
760 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
761 		blk_mq_req_flags_t flags);
762 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
763 		blk_opf_t opf, blk_mq_req_flags_t flags,
764 		unsigned int hctx_idx);
765 
766 /*
767  * Tag address space map.
768  */
769 struct blk_mq_tags {
770 	unsigned int nr_tags;
771 	unsigned int nr_reserved_tags;
772 	unsigned int active_queues;
773 
774 	struct sbitmap_queue bitmap_tags;
775 	struct sbitmap_queue breserved_tags;
776 
777 	struct request **rqs;
778 	struct request **static_rqs;
779 	struct list_head page_list;
780 
781 	/*
782 	 * used to clear request reference in rqs[] before freeing one
783 	 * request pool
784 	 */
785 	spinlock_t lock;
786 	ANDROID_OEM_DATA(1);
787 };
788 
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)789 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
790 					       unsigned int tag)
791 {
792 	if (tag < tags->nr_tags) {
793 		prefetch(tags->rqs[tag]);
794 		return tags->rqs[tag];
795 	}
796 
797 	return NULL;
798 }
799 
800 enum {
801 	BLK_MQ_UNIQUE_TAG_BITS = 16,
802 	BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
803 };
804 
805 u32 blk_mq_unique_tag(struct request *rq);
806 
blk_mq_unique_tag_to_hwq(u32 unique_tag)807 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
808 {
809 	return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
810 }
811 
blk_mq_unique_tag_to_tag(u32 unique_tag)812 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
813 {
814 	return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
815 }
816 
817 /**
818  * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
819  * @rq: target request.
820  */
blk_mq_rq_state(struct request * rq)821 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
822 {
823 	return READ_ONCE(rq->state);
824 }
825 
blk_mq_request_started(struct request * rq)826 static inline int blk_mq_request_started(struct request *rq)
827 {
828 	return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
829 }
830 
blk_mq_request_completed(struct request * rq)831 static inline int blk_mq_request_completed(struct request *rq)
832 {
833 	return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
834 }
835 
836 /*
837  *
838  * Set the state to complete when completing a request from inside ->queue_rq.
839  * This is used by drivers that want to ensure special complete actions that
840  * need access to the request are called on failure, e.g. by nvme for
841  * multipathing.
842  */
blk_mq_set_request_complete(struct request * rq)843 static inline void blk_mq_set_request_complete(struct request *rq)
844 {
845 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
846 }
847 
848 /*
849  * Complete the request directly instead of deferring it to softirq or
850  * completing it another CPU. Useful in preemptible instead of an interrupt.
851  */
blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq))852 static inline void blk_mq_complete_request_direct(struct request *rq,
853 		   void (*complete)(struct request *rq))
854 {
855 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
856 	complete(rq);
857 }
858 
859 void blk_mq_start_request(struct request *rq);
860 void blk_mq_end_request(struct request *rq, blk_status_t error);
861 void __blk_mq_end_request(struct request *rq, blk_status_t error);
862 void blk_mq_end_request_batch(struct io_comp_batch *ib);
863 
864 /*
865  * Only need start/end time stamping if we have iostat or
866  * blk stats enabled, or using an IO scheduler.
867  */
blk_mq_need_time_stamp(struct request * rq)868 static inline bool blk_mq_need_time_stamp(struct request *rq)
869 {
870 	/*
871 	 * passthrough io doesn't use iostat accounting, cgroup stats
872 	 * and io scheduler functionalities.
873 	 */
874 	if (blk_rq_is_passthrough(rq))
875 		return false;
876 	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
877 }
878 
blk_mq_is_reserved_rq(struct request * rq)879 static inline bool blk_mq_is_reserved_rq(struct request *rq)
880 {
881 	return rq->rq_flags & RQF_RESV;
882 }
883 
884 /**
885  * blk_mq_add_to_batch() - add a request to the completion batch
886  * @req: The request to add to batch
887  * @iob: The batch to add the request
888  * @is_error: Specify true if the request failed with an error
889  * @complete: The completaion handler for the request
890  *
891  * Batched completions only work when there is no I/O error and no special
892  * ->end_io handler.
893  *
894  * Return: true when the request was added to the batch, otherwise false
895  */
blk_mq_add_to_batch(struct request * req,struct io_comp_batch * iob,bool is_error,void (* complete)(struct io_comp_batch *))896 static inline bool blk_mq_add_to_batch(struct request *req,
897 				       struct io_comp_batch *iob, bool is_error,
898 				       void (*complete)(struct io_comp_batch *))
899 {
900 	/*
901 	 * Check various conditions that exclude batch processing:
902 	 * 1) No batch container
903 	 * 2) Has scheduler data attached
904 	 * 3) Not a passthrough request and end_io set
905 	 * 4) Not a passthrough request and failed with an error
906 	 */
907 	if (!iob)
908 		return false;
909 	if (req->rq_flags & RQF_SCHED_TAGS)
910 		return false;
911 	if (!blk_rq_is_passthrough(req)) {
912 		if (req->end_io)
913 			return false;
914 		if (is_error)
915 			return false;
916 	}
917 
918 	if (!iob->complete)
919 		iob->complete = complete;
920 	else if (iob->complete != complete)
921 		return false;
922 	iob->need_ts |= blk_mq_need_time_stamp(req);
923 	rq_list_add_tail(&iob->req_list, req);
924 	return true;
925 }
926 
927 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
928 void blk_mq_kick_requeue_list(struct request_queue *q);
929 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
930 void blk_mq_complete_request(struct request *rq);
931 bool blk_mq_complete_request_remote(struct request *rq);
932 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
933 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
934 void blk_mq_stop_hw_queues(struct request_queue *q);
935 void blk_mq_start_hw_queues(struct request_queue *q);
936 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
937 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
938 void blk_mq_quiesce_queue(struct request_queue *q);
939 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
940 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
941 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
942 void blk_mq_unquiesce_queue(struct request_queue *q);
943 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
944 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
945 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
946 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
947 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
948 		busy_tag_iter_fn *fn, void *priv);
949 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
950 void blk_mq_freeze_queue(struct request_queue *q);
951 void blk_mq_unfreeze_queue(struct request_queue *q);
952 void blk_freeze_queue_start(struct request_queue *q);
953 void blk_mq_freeze_queue_wait(struct request_queue *q);
954 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
955 				     unsigned long timeout);
956 void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
957 void blk_freeze_queue_start_non_owner(struct request_queue *q);
958 
959 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
960 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
961 
962 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
963 
964 unsigned int blk_mq_rq_cpu(struct request *rq);
965 
966 bool __blk_should_fake_timeout(struct request_queue *q);
blk_should_fake_timeout(struct request_queue * q)967 static inline bool blk_should_fake_timeout(struct request_queue *q)
968 {
969 	if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
970 	    test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
971 		return __blk_should_fake_timeout(q);
972 	return false;
973 }
974 
975 /**
976  * blk_mq_rq_from_pdu - cast a PDU to a request
977  * @pdu: the PDU (Protocol Data Unit) to be casted
978  *
979  * Return: request
980  *
981  * Driver command data is immediately after the request. So subtract request
982  * size to get back to the original request.
983  */
blk_mq_rq_from_pdu(void * pdu)984 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
985 {
986 	return pdu - sizeof(struct request);
987 }
988 
989 /**
990  * blk_mq_rq_to_pdu - cast a request to a PDU
991  * @rq: the request to be casted
992  *
993  * Return: pointer to the PDU
994  *
995  * Driver command data is immediately after the request. So add request to get
996  * the PDU.
997  */
blk_mq_rq_to_pdu(struct request * rq)998 static inline void *blk_mq_rq_to_pdu(struct request *rq)
999 {
1000 	return rq + 1;
1001 }
1002 
1003 #define queue_for_each_hw_ctx(q, hctx, i)				\
1004 	xa_for_each(&(q)->hctx_table, (i), (hctx))
1005 
1006 #define hctx_for_each_ctx(hctx, ctx, i)					\
1007 	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
1008 	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
1009 
blk_mq_cleanup_rq(struct request * rq)1010 static inline void blk_mq_cleanup_rq(struct request *rq)
1011 {
1012 	if (rq->q->mq_ops->cleanup_rq)
1013 		rq->q->mq_ops->cleanup_rq(rq);
1014 }
1015 
blk_rq_bio_prep(struct request * rq,struct bio * bio,unsigned int nr_segs)1016 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
1017 		unsigned int nr_segs)
1018 {
1019 	rq->nr_phys_segments = nr_segs;
1020 	rq->__data_len = bio->bi_iter.bi_size;
1021 	rq->bio = rq->biotail = bio;
1022 	rq->ioprio = bio_prio(bio);
1023 }
1024 
1025 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
1026 		struct lock_class_key *key);
1027 
rq_is_sync(struct request * rq)1028 static inline bool rq_is_sync(struct request *rq)
1029 {
1030 	return op_is_sync(rq->cmd_flags);
1031 }
1032 
1033 void blk_rq_init(struct request_queue *q, struct request *rq);
1034 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1035 		struct bio_set *bs, gfp_t gfp_mask,
1036 		int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
1037 void blk_rq_unprep_clone(struct request *rq);
1038 blk_status_t blk_insert_cloned_request(struct request *rq);
1039 
1040 struct rq_map_data {
1041 	struct page **pages;
1042 	unsigned long offset;
1043 	unsigned short page_order;
1044 	unsigned short nr_entries;
1045 	bool null_mapped;
1046 	bool from_user;
1047 };
1048 
1049 int blk_rq_map_user(struct request_queue *, struct request *,
1050 		struct rq_map_data *, void __user *, unsigned long, gfp_t);
1051 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1052 		void __user *, unsigned long, gfp_t, bool, int, bool, int);
1053 int blk_rq_map_user_iov(struct request_queue *, struct request *,
1054 		struct rq_map_data *, const struct iov_iter *, gfp_t);
1055 int blk_rq_unmap_user(struct bio *);
1056 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1057 		unsigned int, gfp_t);
1058 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1059 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1060 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1061 bool blk_rq_is_poll(struct request *rq);
1062 
1063 struct req_iterator {
1064 	struct bvec_iter iter;
1065 	struct bio *bio;
1066 };
1067 
1068 #define __rq_for_each_bio(_bio, rq)	\
1069 	if ((rq->bio))			\
1070 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1071 
1072 #define rq_for_each_segment(bvl, _rq, _iter)			\
1073 	__rq_for_each_bio(_iter.bio, _rq)			\
1074 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1075 
1076 #define rq_for_each_bvec(bvl, _rq, _iter)			\
1077 	__rq_for_each_bio(_iter.bio, _rq)			\
1078 		bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1079 
1080 #define rq_iter_last(bvec, _iter)				\
1081 		(_iter.bio->bi_next == NULL &&			\
1082 		 bio_iter_last(bvec, _iter.iter))
1083 
1084 /*
1085  * blk_rq_pos()			: the current sector
1086  * blk_rq_bytes()		: bytes left in the entire request
1087  * blk_rq_cur_bytes()		: bytes left in the current segment
1088  * blk_rq_sectors()		: sectors left in the entire request
1089  * blk_rq_cur_sectors()		: sectors left in the current segment
1090  * blk_rq_stats_sectors()	: sectors of the entire request used for stats
1091  */
blk_rq_pos(const struct request * rq)1092 static inline sector_t blk_rq_pos(const struct request *rq)
1093 {
1094 	return rq->__sector;
1095 }
1096 
blk_rq_bytes(const struct request * rq)1097 static inline unsigned int blk_rq_bytes(const struct request *rq)
1098 {
1099 	return rq->__data_len;
1100 }
1101 
blk_rq_cur_bytes(const struct request * rq)1102 static inline int blk_rq_cur_bytes(const struct request *rq)
1103 {
1104 	if (!rq->bio)
1105 		return 0;
1106 	if (!bio_has_data(rq->bio))	/* dataless requests such as discard */
1107 		return rq->bio->bi_iter.bi_size;
1108 	return bio_iovec(rq->bio).bv_len;
1109 }
1110 
blk_rq_sectors(const struct request * rq)1111 static inline unsigned int blk_rq_sectors(const struct request *rq)
1112 {
1113 	return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1114 }
1115 
blk_rq_cur_sectors(const struct request * rq)1116 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1117 {
1118 	return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1119 }
1120 
blk_rq_stats_sectors(const struct request * rq)1121 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1122 {
1123 	return rq->stats_sectors;
1124 }
1125 
1126 /*
1127  * Some commands like WRITE SAME have a payload or data transfer size which
1128  * is different from the size of the request.  Any driver that supports such
1129  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1130  * calculate the data transfer size.
1131  */
blk_rq_payload_bytes(struct request * rq)1132 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1133 {
1134 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1135 		return rq->special_vec.bv_len;
1136 	return blk_rq_bytes(rq);
1137 }
1138 
1139 /*
1140  * Return the first full biovec in the request.  The caller needs to check that
1141  * there are any bvecs before calling this helper.
1142  */
req_bvec(struct request * rq)1143 static inline struct bio_vec req_bvec(struct request *rq)
1144 {
1145 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1146 		return rq->special_vec;
1147 	return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1148 }
1149 
blk_rq_count_bios(struct request * rq)1150 static inline unsigned int blk_rq_count_bios(struct request *rq)
1151 {
1152 	unsigned int nr_bios = 0;
1153 	struct bio *bio;
1154 
1155 	__rq_for_each_bio(bio, rq)
1156 		nr_bios++;
1157 
1158 	return nr_bios;
1159 }
1160 
1161 void blk_steal_bios(struct bio_list *list, struct request *rq);
1162 
1163 /*
1164  * Request completion related functions.
1165  *
1166  * blk_update_request() completes given number of bytes and updates
1167  * the request without completing it.
1168  */
1169 bool blk_update_request(struct request *rq, blk_status_t error,
1170 			       unsigned int nr_bytes);
1171 void blk_abort_request(struct request *);
1172 
1173 /*
1174  * Number of physical segments as sent to the device.
1175  *
1176  * Normally this is the number of discontiguous data segments sent by the
1177  * submitter.  But for data-less command like discard we might have no
1178  * actual data segments submitted, but the driver might have to add it's
1179  * own special payload.  In that case we still return 1 here so that this
1180  * special payload will be mapped.
1181  */
blk_rq_nr_phys_segments(struct request * rq)1182 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1183 {
1184 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1185 		return 1;
1186 	return rq->nr_phys_segments;
1187 }
1188 
1189 /*
1190  * Number of discard segments (or ranges) the driver needs to fill in.
1191  * Each discard bio merged into a request is counted as one segment.
1192  */
blk_rq_nr_discard_segments(struct request * rq)1193 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1194 {
1195 	return max_t(unsigned short, rq->nr_phys_segments, 1);
1196 }
1197 
1198 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1199 		struct scatterlist *sglist, struct scatterlist **last_sg);
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)1200 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1201 		struct scatterlist *sglist)
1202 {
1203 	struct scatterlist *last_sg = NULL;
1204 
1205 	return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1206 }
1207 void blk_dump_rq_flags(struct request *, char *);
1208 
1209 #endif /* BLK_MQ_H */
1210