1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_MQ_H
3 #define BLK_MQ_H
4
5 #include <linux/blkdev.h>
6 #include <linux/sbitmap.h>
7 #include <linux/lockdep.h>
8 #include <linux/scatterlist.h>
9 #include <linux/prefetch.h>
10 #include <linux/srcu.h>
11 #include <linux/rw_hint.h>
12 #include <linux/android_kabi.h>
13
14 struct blk_mq_tags;
15 struct blk_flush_queue;
16
17 #define BLKDEV_MIN_RQ 4
18 #define BLKDEV_DEFAULT_RQ 128
19
20 enum rq_end_io_ret {
21 RQ_END_IO_NONE,
22 RQ_END_IO_FREE,
23 };
24
25 typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
26
27 /*
28 * request flags */
29 typedef __u32 __bitwise req_flags_t;
30
31 /* drive already may have started this one */
32 #define RQF_STARTED ((__force req_flags_t)(1 << 1))
33 /* request for flush sequence */
34 #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
35 /* merge of different types, fail separately */
36 #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
37 /* track inflight for MQ */
38 #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
39 /* don't call prep for this one */
40 #define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
41 /* use hctx->sched_tags */
42 #define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
43 /* use an I/O scheduler for this request */
44 #define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
45 /* vaguely specified driver internal error. Ignored by the block layer */
46 #define RQF_FAILED ((__force req_flags_t)(1 << 10))
47 /* don't warn about errors */
48 #define RQF_QUIET ((__force req_flags_t)(1 << 11))
49 /* account into disk and partition IO statistics */
50 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
51 /* runtime pm request */
52 #define RQF_PM ((__force req_flags_t)(1 << 15))
53 /* on IO scheduler merge hash */
54 #define RQF_HASHED ((__force req_flags_t)(1 << 16))
55 /* track IO completion time */
56 #define RQF_STATS ((__force req_flags_t)(1 << 17))
57 /* Look at ->special_vec for the actual data payload instead of the
58 bio chain. */
59 #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
60 /* The per-zone write lock is held for this request */
61 #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
62 /* ->timeout has been called, don't expire again */
63 #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
64 #define RQF_RESV ((__force req_flags_t)(1 << 23))
65
66 /* flags that prevent us from merging requests: */
67 #define RQF_NOMERGE_FLAGS \
68 (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
69
70 enum mq_rq_state {
71 MQ_RQ_IDLE = 0,
72 MQ_RQ_IN_FLIGHT = 1,
73 MQ_RQ_COMPLETE = 2,
74 };
75
76 /*
77 * Try to put the fields that are referenced together in the same cacheline.
78 *
79 * If you modify this structure, make sure to update blk_rq_init() and
80 * especially blk_mq_rq_ctx_init() to take care of the added fields.
81 */
82 struct request {
83 struct request_queue *q;
84 struct blk_mq_ctx *mq_ctx;
85 struct blk_mq_hw_ctx *mq_hctx;
86
87 blk_opf_t cmd_flags; /* op and common flags */
88 req_flags_t rq_flags;
89
90 int tag;
91 int internal_tag;
92
93 unsigned int timeout;
94
95 /* the following two fields are internal, NEVER access directly */
96 unsigned int __data_len; /* total data len */
97 sector_t __sector; /* sector cursor */
98
99 struct bio *bio;
100 struct bio *biotail;
101
102 union {
103 struct list_head queuelist;
104 struct request *rq_next;
105 };
106
107 struct block_device *part;
108 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
109 /* Time that the first bio started allocating this request. */
110 u64 alloc_time_ns;
111 #endif
112 /* Time that this request was allocated for this IO. */
113 u64 start_time_ns;
114 /* Time that I/O was submitted to the device. */
115 u64 io_start_time_ns;
116
117 #ifdef CONFIG_BLK_WBT
118 unsigned short wbt_flags;
119 #endif
120 /*
121 * rq sectors used for blk stats. It has the same value
122 * with blk_rq_sectors(rq), except that it never be zeroed
123 * by completion.
124 */
125 unsigned short stats_sectors;
126
127 /*
128 * Number of scatter-gather DMA addr+len pairs after
129 * physical address coalescing is performed.
130 */
131 unsigned short nr_phys_segments;
132
133 #ifdef CONFIG_BLK_DEV_INTEGRITY
134 unsigned short nr_integrity_segments;
135 #endif
136
137 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
138 struct bio_crypt_ctx *crypt_ctx;
139 struct blk_crypto_keyslot *crypt_keyslot;
140 #endif
141
142 enum rw_hint write_hint;
143 unsigned short ioprio;
144
145 enum mq_rq_state state;
146 atomic_t ref;
147
148 unsigned long deadline;
149
150 /*
151 * The hash is used inside the scheduler, and killed once the
152 * request reaches the dispatch list. The ipi_list is only used
153 * to queue the request for softirq completion, which is long
154 * after the request has been unhashed (and even removed from
155 * the dispatch list).
156 */
157 union {
158 struct hlist_node hash; /* merge hash */
159 struct llist_node ipi_list;
160 };
161
162 /*
163 * The rb_node is only used inside the io scheduler, requests
164 * are pruned when moved to the dispatch queue. special_vec must
165 * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
166 * insert into an IO scheduler.
167 */
168 union {
169 struct rb_node rb_node; /* sort/lookup */
170 struct bio_vec special_vec;
171 };
172
173 /*
174 * Three pointers are available for the IO schedulers, if they need
175 * more they have to dynamically allocate it.
176 */
177 struct {
178 struct io_cq *icq;
179 void *priv[2];
180 } elv;
181
182 struct {
183 unsigned int seq;
184 rq_end_io_fn *saved_end_io;
185 } flush;
186
187 u64 fifo_time;
188
189 /*
190 * completion callback.
191 */
192 rq_end_io_fn *end_io;
193 void *end_io_data;
194
195 ANDROID_OEM_DATA(1);
196
197 ANDROID_KABI_RESERVE(1);
198 };
199
req_op(const struct request * req)200 static inline enum req_op req_op(const struct request *req)
201 {
202 return req->cmd_flags & REQ_OP_MASK;
203 }
204
blk_rq_is_passthrough(struct request * rq)205 static inline bool blk_rq_is_passthrough(struct request *rq)
206 {
207 return blk_op_is_passthrough(rq->cmd_flags);
208 }
209
req_get_ioprio(struct request * req)210 static inline unsigned short req_get_ioprio(struct request *req)
211 {
212 return req->ioprio;
213 }
214
215 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
216
217 #define rq_dma_dir(rq) \
218 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
219
220 #define rq_list_add(listptr, rq) do { \
221 (rq)->rq_next = *(listptr); \
222 *(listptr) = rq; \
223 } while (0)
224
225 #define rq_list_add_tail(lastpptr, rq) do { \
226 (rq)->rq_next = NULL; \
227 **(lastpptr) = rq; \
228 *(lastpptr) = &rq->rq_next; \
229 } while (0)
230
231 #define rq_list_pop(listptr) \
232 ({ \
233 struct request *__req = NULL; \
234 if ((listptr) && *(listptr)) { \
235 __req = *(listptr); \
236 *(listptr) = __req->rq_next; \
237 } \
238 __req; \
239 })
240
241 #define rq_list_peek(listptr) \
242 ({ \
243 struct request *__req = NULL; \
244 if ((listptr) && *(listptr)) \
245 __req = *(listptr); \
246 __req; \
247 })
248
249 #define rq_list_for_each(listptr, pos) \
250 for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
251
252 #define rq_list_for_each_safe(listptr, pos, nxt) \
253 for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
254 pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
255
256 #define rq_list_next(rq) (rq)->rq_next
257 #define rq_list_empty(list) ((list) == (struct request *) NULL)
258
259 /**
260 * rq_list_move() - move a struct request from one list to another
261 * @src: The source list @rq is currently in
262 * @dst: The destination list that @rq will be appended to
263 * @rq: The request to move
264 * @prev: The request preceding @rq in @src (NULL if @rq is the head)
265 */
rq_list_move(struct request ** src,struct request ** dst,struct request * rq,struct request * prev)266 static inline void rq_list_move(struct request **src, struct request **dst,
267 struct request *rq, struct request *prev)
268 {
269 if (prev)
270 prev->rq_next = rq->rq_next;
271 else
272 *src = rq->rq_next;
273 rq_list_add(dst, rq);
274 }
275
276 /**
277 * enum blk_eh_timer_return - How the timeout handler should proceed
278 * @BLK_EH_DONE: The block driver completed the command or will complete it at
279 * a later time.
280 * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
281 * request to complete.
282 */
283 enum blk_eh_timer_return {
284 BLK_EH_DONE,
285 BLK_EH_RESET_TIMER,
286 };
287
288 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
289 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
290
291 /**
292 * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
293 * block device
294 */
295 struct blk_mq_hw_ctx {
296 struct {
297 /** @lock: Protects the dispatch list. */
298 spinlock_t lock;
299 /**
300 * @dispatch: Used for requests that are ready to be
301 * dispatched to the hardware but for some reason (e.g. lack of
302 * resources) could not be sent to the hardware. As soon as the
303 * driver can send new requests, requests at this list will
304 * be sent first for a fairer dispatch.
305 */
306 struct list_head dispatch;
307 /**
308 * @state: BLK_MQ_S_* flags. Defines the state of the hw
309 * queue (active, scheduled to restart, stopped).
310 */
311 unsigned long state;
312 } ____cacheline_aligned_in_smp;
313
314 /**
315 * @run_work: Used for scheduling a hardware queue run at a later time.
316 */
317 struct delayed_work run_work;
318 /** @cpumask: Map of available CPUs where this hctx can run. */
319 cpumask_var_t cpumask;
320 /**
321 * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
322 * selection from @cpumask.
323 */
324 int next_cpu;
325 /**
326 * @next_cpu_batch: Counter of how many works left in the batch before
327 * changing to the next CPU.
328 */
329 int next_cpu_batch;
330
331 /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
332 unsigned long flags;
333
334 /**
335 * @sched_data: Pointer owned by the IO scheduler attached to a request
336 * queue. It's up to the IO scheduler how to use this pointer.
337 */
338 void *sched_data;
339 /**
340 * @queue: Pointer to the request queue that owns this hardware context.
341 */
342 struct request_queue *queue;
343 /** @fq: Queue of requests that need to perform a flush operation. */
344 struct blk_flush_queue *fq;
345
346 /**
347 * @driver_data: Pointer to data owned by the block driver that created
348 * this hctx
349 */
350 void *driver_data;
351
352 /**
353 * @ctx_map: Bitmap for each software queue. If bit is on, there is a
354 * pending request in that software queue.
355 */
356 struct sbitmap ctx_map;
357
358 /**
359 * @dispatch_from: Software queue to be used when no scheduler was
360 * selected.
361 */
362 struct blk_mq_ctx *dispatch_from;
363 /**
364 * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
365 * decide if the hw_queue is busy using Exponential Weighted Moving
366 * Average algorithm.
367 */
368 unsigned int dispatch_busy;
369
370 /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
371 unsigned short type;
372 /** @nr_ctx: Number of software queues. */
373 unsigned short nr_ctx;
374 /** @ctxs: Array of software queues. */
375 struct blk_mq_ctx **ctxs;
376
377 /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
378 spinlock_t dispatch_wait_lock;
379 /**
380 * @dispatch_wait: Waitqueue to put requests when there is no tag
381 * available at the moment, to wait for another try in the future.
382 */
383 wait_queue_entry_t dispatch_wait;
384
385 /**
386 * @wait_index: Index of next available dispatch_wait queue to insert
387 * requests.
388 */
389 atomic_t wait_index;
390
391 /**
392 * @tags: Tags owned by the block driver. A tag at this set is only
393 * assigned when a request is dispatched from a hardware queue.
394 */
395 struct blk_mq_tags *tags;
396 /**
397 * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
398 * scheduler associated with a request queue, a tag is assigned when
399 * that request is allocated. Else, this member is not used.
400 */
401 struct blk_mq_tags *sched_tags;
402
403 /** @run: Number of dispatched requests. */
404 unsigned long run;
405
406 /** @numa_node: NUMA node the storage adapter has been connected to. */
407 unsigned int numa_node;
408 /** @queue_num: Index of this hardware queue. */
409 unsigned int queue_num;
410
411 /**
412 * @nr_active: Number of active requests. Only used when a tag set is
413 * shared across request queues.
414 */
415 atomic_t nr_active;
416
417 /** @cpuhp_online: List to store request if CPU is going to die */
418 struct hlist_node cpuhp_online;
419 /** @cpuhp_dead: List to store request if some CPU die. */
420 struct hlist_node cpuhp_dead;
421 /** @kobj: Kernel object for sysfs. */
422 struct kobject kobj;
423
424 #ifdef CONFIG_BLK_DEBUG_FS
425 /**
426 * @debugfs_dir: debugfs directory for this hardware queue. Named
427 * as cpu<cpu_number>.
428 */
429 struct dentry *debugfs_dir;
430 /** @sched_debugfs_dir: debugfs directory for the scheduler. */
431 struct dentry *sched_debugfs_dir;
432 #endif
433
434 /**
435 * @hctx_list: if this hctx is not in use, this is an entry in
436 * q->unused_hctx_list.
437 */
438 struct list_head hctx_list;
439
440 ANDROID_KABI_RESERVE(1);
441 };
442
443 /**
444 * struct blk_mq_queue_map - Map software queues to hardware queues
445 * @mq_map: CPU ID to hardware queue index map. This is an array
446 * with nr_cpu_ids elements. Each element has a value in the range
447 * [@queue_offset, @queue_offset + @nr_queues).
448 * @nr_queues: Number of hardware queues to map CPU IDs onto.
449 * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
450 * driver to map each hardware queue type (enum hctx_type) onto a distinct
451 * set of hardware queues.
452 */
453 struct blk_mq_queue_map {
454 unsigned int *mq_map;
455 unsigned int nr_queues;
456 unsigned int queue_offset;
457 };
458
459 /**
460 * enum hctx_type - Type of hardware queue
461 * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
462 * @HCTX_TYPE_READ: Just for READ I/O.
463 * @HCTX_TYPE_POLL: Polled I/O of any kind.
464 * @HCTX_MAX_TYPES: Number of types of hctx.
465 */
466 enum hctx_type {
467 HCTX_TYPE_DEFAULT,
468 HCTX_TYPE_READ,
469 HCTX_TYPE_POLL,
470
471 HCTX_MAX_TYPES,
472 };
473
474 /**
475 * struct blk_mq_tag_set - tag set that can be shared between request queues
476 * @ops: Pointers to functions that implement block driver behavior.
477 * @map: One or more ctx -> hctx mappings. One map exists for each
478 * hardware queue type (enum hctx_type) that the driver wishes
479 * to support. There are no restrictions on maps being of the
480 * same size, and it's perfectly legal to share maps between
481 * types.
482 * @nr_maps: Number of elements in the @map array. A number in the range
483 * [1, HCTX_MAX_TYPES].
484 * @nr_hw_queues: Number of hardware queues supported by the block driver that
485 * owns this data structure.
486 * @queue_depth: Number of tags per hardware queue, reserved tags included.
487 * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
488 * allocations.
489 * @cmd_size: Number of additional bytes to allocate per request. The block
490 * driver owns these additional bytes.
491 * @numa_node: NUMA node the storage adapter has been connected to.
492 * @timeout: Request processing timeout in jiffies.
493 * @flags: Zero or more BLK_MQ_F_* flags.
494 * @driver_data: Pointer to data owned by the block driver that created this
495 * tag set.
496 * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
497 * elements.
498 * @shared_tags:
499 * Shared set of tags. Has @nr_hw_queues elements. If set,
500 * shared by all @tags.
501 * @tag_list_lock: Serializes tag_list accesses.
502 * @tag_list: List of the request queues that use this tag set. See also
503 * request_queue.tag_set_list.
504 * @srcu: Use as lock when type of the request queue is blocking
505 * (BLK_MQ_F_BLOCKING).
506 */
507 struct blk_mq_tag_set {
508 const struct blk_mq_ops *ops;
509 struct blk_mq_queue_map map[HCTX_MAX_TYPES];
510 unsigned int nr_maps;
511 unsigned int nr_hw_queues;
512 unsigned int queue_depth;
513 unsigned int reserved_tags;
514 unsigned int cmd_size;
515 int numa_node;
516 unsigned int timeout;
517 unsigned int flags;
518 void *driver_data;
519
520 struct blk_mq_tags **tags;
521
522 struct blk_mq_tags *shared_tags;
523
524 struct mutex tag_list_lock;
525 struct list_head tag_list;
526 struct srcu_struct *srcu;
527
528 ANDROID_KABI_RESERVE(1);
529 };
530
531 /**
532 * struct blk_mq_queue_data - Data about a request inserted in a queue
533 *
534 * @rq: Request pointer.
535 * @last: If it is the last request in the queue.
536 */
537 struct blk_mq_queue_data {
538 struct request *rq;
539 bool last;
540 };
541
542 typedef bool (busy_tag_iter_fn)(struct request *, void *);
543
544 /**
545 * struct blk_mq_ops - Callback functions that implements block driver
546 * behaviour.
547 */
548 struct blk_mq_ops {
549 /**
550 * @queue_rq: Queue a new request from block IO.
551 */
552 blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
553 const struct blk_mq_queue_data *);
554
555 /**
556 * @commit_rqs: If a driver uses bd->last to judge when to submit
557 * requests to hardware, it must define this function. In case of errors
558 * that make us stop issuing further requests, this hook serves the
559 * purpose of kicking the hardware (which the last request otherwise
560 * would have done).
561 */
562 void (*commit_rqs)(struct blk_mq_hw_ctx *);
563
564 /**
565 * @queue_rqs: Queue a list of new requests. Driver is guaranteed
566 * that each request belongs to the same queue. If the driver doesn't
567 * empty the @rqlist completely, then the rest will be queued
568 * individually by the block layer upon return.
569 */
570 void (*queue_rqs)(struct request **rqlist);
571
572 /**
573 * @get_budget: Reserve budget before queue request, once .queue_rq is
574 * run, it is driver's responsibility to release the
575 * reserved budget. Also we have to handle failure case
576 * of .get_budget for avoiding I/O deadlock.
577 */
578 int (*get_budget)(struct request_queue *);
579
580 /**
581 * @put_budget: Release the reserved budget.
582 */
583 void (*put_budget)(struct request_queue *, int);
584
585 /**
586 * @set_rq_budget_token: store rq's budget token
587 */
588 void (*set_rq_budget_token)(struct request *, int);
589 /**
590 * @get_rq_budget_token: retrieve rq's budget token
591 */
592 int (*get_rq_budget_token)(struct request *);
593
594 /**
595 * @timeout: Called on request timeout.
596 */
597 enum blk_eh_timer_return (*timeout)(struct request *);
598
599 /**
600 * @poll: Called to poll for completion of a specific tag.
601 */
602 int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
603
604 /**
605 * @complete: Mark the request as complete.
606 */
607 void (*complete)(struct request *);
608
609 /**
610 * @init_hctx: Called when the block layer side of a hardware queue has
611 * been set up, allowing the driver to allocate/init matching
612 * structures.
613 */
614 int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
615 /**
616 * @exit_hctx: Ditto for exit/teardown.
617 */
618 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
619
620 /**
621 * @init_request: Called for every command allocated by the block layer
622 * to allow the driver to set up driver specific data.
623 *
624 * Tag greater than or equal to queue_depth is for setting up
625 * flush request.
626 */
627 int (*init_request)(struct blk_mq_tag_set *set, struct request *,
628 unsigned int, unsigned int);
629 /**
630 * @exit_request: Ditto for exit/teardown.
631 */
632 void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
633 unsigned int);
634
635 /**
636 * @cleanup_rq: Called before freeing one request which isn't completed
637 * yet, and usually for freeing the driver private data.
638 */
639 void (*cleanup_rq)(struct request *);
640
641 /**
642 * @busy: If set, returns whether or not this queue currently is busy.
643 */
644 bool (*busy)(struct request_queue *);
645
646 /**
647 * @map_queues: This allows drivers specify their own queue mapping by
648 * overriding the setup-time function that builds the mq_map.
649 */
650 void (*map_queues)(struct blk_mq_tag_set *set);
651
652 #ifdef CONFIG_BLK_DEBUG_FS
653 /**
654 * @show_rq: Used by the debugfs implementation to show driver-specific
655 * information about a request.
656 */
657 void (*show_rq)(struct seq_file *m, struct request *rq);
658 #endif
659
660 ANDROID_KABI_RESERVE(1);
661 };
662
663 enum {
664 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
665 BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
666 /*
667 * Set when this device requires underlying blk-mq device for
668 * completing IO:
669 */
670 BLK_MQ_F_STACKING = 1 << 2,
671 BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
672 BLK_MQ_F_BLOCKING = 1 << 5,
673 /* Do not allow an I/O scheduler to be configured. */
674 BLK_MQ_F_NO_SCHED = 1 << 6,
675 /*
676 * Select 'none' during queue registration in case of a single hwq
677 * or shared hwqs instead of 'mq-deadline'.
678 */
679 BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
680 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
681 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
682
683 BLK_MQ_S_STOPPED = 0,
684 BLK_MQ_S_TAG_ACTIVE = 1,
685 BLK_MQ_S_SCHED_RESTART = 2,
686
687 /* hw queue is inactive after all its CPUs become offline */
688 BLK_MQ_S_INACTIVE = 3,
689
690 BLK_MQ_MAX_DEPTH = 10240,
691
692 BLK_MQ_CPU_WORK_BATCH = 8,
693 };
694 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
695 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
696 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
697 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
698 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
699 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
700
701 #define BLK_MQ_NO_HCTX_IDX (-1U)
702
703 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
704 struct lock_class_key *lkclass);
705 #define blk_mq_alloc_disk(set, queuedata) \
706 ({ \
707 static struct lock_class_key __key; \
708 \
709 __blk_mq_alloc_disk(set, queuedata, &__key); \
710 })
711 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
712 struct lock_class_key *lkclass);
713 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
714 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
715 struct request_queue *q);
716 void blk_mq_destroy_queue(struct request_queue *);
717
718 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
719 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
720 const struct blk_mq_ops *ops, unsigned int queue_depth,
721 unsigned int set_flags);
722 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
723
724 void blk_mq_free_request(struct request *rq);
725 int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
726 unsigned int poll_flags);
727
728 bool blk_mq_queue_inflight(struct request_queue *q);
729
730 enum {
731 /* return when out of requests */
732 BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
733 /* allocate from reserved pool */
734 BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
735 /* set RQF_PM */
736 BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
737 };
738
739 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
740 blk_mq_req_flags_t flags);
741 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
742 blk_opf_t opf, blk_mq_req_flags_t flags,
743 unsigned int hctx_idx);
744
745 /*
746 * Tag address space map.
747 */
748 struct blk_mq_tags {
749 unsigned int nr_tags;
750 unsigned int nr_reserved_tags;
751 unsigned int active_queues;
752
753 struct sbitmap_queue bitmap_tags;
754 struct sbitmap_queue breserved_tags;
755
756 struct request **rqs;
757 struct request **static_rqs;
758 struct list_head page_list;
759
760 /*
761 * used to clear request reference in rqs[] before freeing one
762 * request pool
763 */
764 spinlock_t lock;
765 ANDROID_OEM_DATA(1);
766 };
767
blk_mq_tag_to_rq(struct blk_mq_tags * tags,unsigned int tag)768 static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
769 unsigned int tag)
770 {
771 if (tag < tags->nr_tags) {
772 prefetch(tags->rqs[tag]);
773 return tags->rqs[tag];
774 }
775
776 return NULL;
777 }
778
779 enum {
780 BLK_MQ_UNIQUE_TAG_BITS = 16,
781 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
782 };
783
784 u32 blk_mq_unique_tag(struct request *rq);
785
blk_mq_unique_tag_to_hwq(u32 unique_tag)786 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
787 {
788 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
789 }
790
blk_mq_unique_tag_to_tag(u32 unique_tag)791 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
792 {
793 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
794 }
795
796 /**
797 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
798 * @rq: target request.
799 */
blk_mq_rq_state(struct request * rq)800 static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
801 {
802 return READ_ONCE(rq->state);
803 }
804
blk_mq_request_started(struct request * rq)805 static inline int blk_mq_request_started(struct request *rq)
806 {
807 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
808 }
809
blk_mq_request_completed(struct request * rq)810 static inline int blk_mq_request_completed(struct request *rq)
811 {
812 return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
813 }
814
815 /*
816 *
817 * Set the state to complete when completing a request from inside ->queue_rq.
818 * This is used by drivers that want to ensure special complete actions that
819 * need access to the request are called on failure, e.g. by nvme for
820 * multipathing.
821 */
blk_mq_set_request_complete(struct request * rq)822 static inline void blk_mq_set_request_complete(struct request *rq)
823 {
824 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
825 }
826
827 /*
828 * Complete the request directly instead of deferring it to softirq or
829 * completing it another CPU. Useful in preemptible instead of an interrupt.
830 */
blk_mq_complete_request_direct(struct request * rq,void (* complete)(struct request * rq))831 static inline void blk_mq_complete_request_direct(struct request *rq,
832 void (*complete)(struct request *rq))
833 {
834 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
835 complete(rq);
836 }
837
838 void blk_mq_start_request(struct request *rq);
839 void blk_mq_end_request(struct request *rq, blk_status_t error);
840 void __blk_mq_end_request(struct request *rq, blk_status_t error);
841 void blk_mq_end_request_batch(struct io_comp_batch *ib);
842
843 /*
844 * Only need start/end time stamping if we have iostat or
845 * blk stats enabled, or using an IO scheduler.
846 */
blk_mq_need_time_stamp(struct request * rq)847 static inline bool blk_mq_need_time_stamp(struct request *rq)
848 {
849 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
850 }
851
blk_mq_is_reserved_rq(struct request * rq)852 static inline bool blk_mq_is_reserved_rq(struct request *rq)
853 {
854 return rq->rq_flags & RQF_RESV;
855 }
856
857 /*
858 * Batched completions only work when there is no I/O error and no special
859 * ->end_io handler.
860 */
blk_mq_add_to_batch(struct request * req,struct io_comp_batch * iob,int ioerror,void (* complete)(struct io_comp_batch *))861 static inline bool blk_mq_add_to_batch(struct request *req,
862 struct io_comp_batch *iob, int ioerror,
863 void (*complete)(struct io_comp_batch *))
864 {
865 /*
866 * blk_mq_end_request_batch() can't end request allocated from
867 * sched tags
868 */
869 if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
870 (req->end_io && !blk_rq_is_passthrough(req)))
871 return false;
872
873 if (!iob->complete)
874 iob->complete = complete;
875 else if (iob->complete != complete)
876 return false;
877 iob->need_ts |= blk_mq_need_time_stamp(req);
878 rq_list_add(&iob->req_list, req);
879 return true;
880 }
881
882 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
883 void blk_mq_kick_requeue_list(struct request_queue *q);
884 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
885 void blk_mq_complete_request(struct request *rq);
886 bool blk_mq_complete_request_remote(struct request *rq);
887 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
888 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
889 void blk_mq_stop_hw_queues(struct request_queue *q);
890 void blk_mq_start_hw_queues(struct request_queue *q);
891 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
892 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
893 void blk_mq_quiesce_queue(struct request_queue *q);
894 void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
895 void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
896 void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
897 void blk_mq_unquiesce_queue(struct request_queue *q);
898 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
899 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
900 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
901 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
902 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
903 busy_tag_iter_fn *fn, void *priv);
904 void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
905 void blk_mq_freeze_queue(struct request_queue *q);
906 void blk_mq_unfreeze_queue(struct request_queue *q);
907 void blk_freeze_queue_start(struct request_queue *q);
908 void blk_mq_freeze_queue_wait(struct request_queue *q);
909 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
910 unsigned long timeout);
911
912 void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
913 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
914
915 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
916
917 unsigned int blk_mq_rq_cpu(struct request *rq);
918
919 bool __blk_should_fake_timeout(struct request_queue *q);
blk_should_fake_timeout(struct request_queue * q)920 static inline bool blk_should_fake_timeout(struct request_queue *q)
921 {
922 if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
923 test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
924 return __blk_should_fake_timeout(q);
925 return false;
926 }
927
928 /**
929 * blk_mq_rq_from_pdu - cast a PDU to a request
930 * @pdu: the PDU (Protocol Data Unit) to be casted
931 *
932 * Return: request
933 *
934 * Driver command data is immediately after the request. So subtract request
935 * size to get back to the original request.
936 */
blk_mq_rq_from_pdu(void * pdu)937 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
938 {
939 return pdu - sizeof(struct request);
940 }
941
942 /**
943 * blk_mq_rq_to_pdu - cast a request to a PDU
944 * @rq: the request to be casted
945 *
946 * Return: pointer to the PDU
947 *
948 * Driver command data is immediately after the request. So add request to get
949 * the PDU.
950 */
blk_mq_rq_to_pdu(struct request * rq)951 static inline void *blk_mq_rq_to_pdu(struct request *rq)
952 {
953 return rq + 1;
954 }
955
956 #define queue_for_each_hw_ctx(q, hctx, i) \
957 xa_for_each(&(q)->hctx_table, (i), (hctx))
958
959 #define hctx_for_each_ctx(hctx, ctx, i) \
960 for ((i) = 0; (i) < (hctx)->nr_ctx && \
961 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
962
blk_mq_cleanup_rq(struct request * rq)963 static inline void blk_mq_cleanup_rq(struct request *rq)
964 {
965 if (rq->q->mq_ops->cleanup_rq)
966 rq->q->mq_ops->cleanup_rq(rq);
967 }
968
blk_rq_bio_prep(struct request * rq,struct bio * bio,unsigned int nr_segs)969 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
970 unsigned int nr_segs)
971 {
972 rq->nr_phys_segments = nr_segs;
973 rq->__data_len = bio->bi_iter.bi_size;
974 rq->bio = rq->biotail = bio;
975 rq->ioprio = bio_prio(bio);
976 }
977
978 void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
979 struct lock_class_key *key);
980
rq_is_sync(struct request * rq)981 static inline bool rq_is_sync(struct request *rq)
982 {
983 return op_is_sync(rq->cmd_flags);
984 }
985
986 void blk_rq_init(struct request_queue *q, struct request *rq);
987 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
988 struct bio_set *bs, gfp_t gfp_mask,
989 int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
990 void blk_rq_unprep_clone(struct request *rq);
991 blk_status_t blk_insert_cloned_request(struct request *rq);
992
993 struct rq_map_data {
994 struct page **pages;
995 unsigned long offset;
996 unsigned short page_order;
997 unsigned short nr_entries;
998 bool null_mapped;
999 bool from_user;
1000 };
1001
1002 int blk_rq_map_user(struct request_queue *, struct request *,
1003 struct rq_map_data *, void __user *, unsigned long, gfp_t);
1004 int blk_rq_map_user_io(struct request *, struct rq_map_data *,
1005 void __user *, unsigned long, gfp_t, bool, int, bool, int);
1006 int blk_rq_map_user_iov(struct request_queue *, struct request *,
1007 struct rq_map_data *, const struct iov_iter *, gfp_t);
1008 int blk_rq_unmap_user(struct bio *);
1009 int blk_rq_map_kern(struct request_queue *, struct request *, void *,
1010 unsigned int, gfp_t);
1011 int blk_rq_append_bio(struct request *rq, struct bio *bio);
1012 void blk_execute_rq_nowait(struct request *rq, bool at_head);
1013 blk_status_t blk_execute_rq(struct request *rq, bool at_head);
1014 bool blk_rq_is_poll(struct request *rq);
1015
1016 struct req_iterator {
1017 struct bvec_iter iter;
1018 struct bio *bio;
1019 };
1020
1021 #define __rq_for_each_bio(_bio, rq) \
1022 if ((rq->bio)) \
1023 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
1024
1025 #define rq_for_each_segment(bvl, _rq, _iter) \
1026 __rq_for_each_bio(_iter.bio, _rq) \
1027 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
1028
1029 #define rq_for_each_bvec(bvl, _rq, _iter) \
1030 __rq_for_each_bio(_iter.bio, _rq) \
1031 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
1032
1033 #define rq_iter_last(bvec, _iter) \
1034 (_iter.bio->bi_next == NULL && \
1035 bio_iter_last(bvec, _iter.iter))
1036
1037 /*
1038 * blk_rq_pos() : the current sector
1039 * blk_rq_bytes() : bytes left in the entire request
1040 * blk_rq_cur_bytes() : bytes left in the current segment
1041 * blk_rq_sectors() : sectors left in the entire request
1042 * blk_rq_cur_sectors() : sectors left in the current segment
1043 * blk_rq_stats_sectors() : sectors of the entire request used for stats
1044 */
blk_rq_pos(const struct request * rq)1045 static inline sector_t blk_rq_pos(const struct request *rq)
1046 {
1047 return rq->__sector;
1048 }
1049
blk_rq_bytes(const struct request * rq)1050 static inline unsigned int blk_rq_bytes(const struct request *rq)
1051 {
1052 return rq->__data_len;
1053 }
1054
blk_rq_cur_bytes(const struct request * rq)1055 static inline int blk_rq_cur_bytes(const struct request *rq)
1056 {
1057 if (!rq->bio)
1058 return 0;
1059 if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
1060 return rq->bio->bi_iter.bi_size;
1061 return bio_iovec(rq->bio).bv_len;
1062 }
1063
blk_rq_sectors(const struct request * rq)1064 static inline unsigned int blk_rq_sectors(const struct request *rq)
1065 {
1066 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
1067 }
1068
blk_rq_cur_sectors(const struct request * rq)1069 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1070 {
1071 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1072 }
1073
blk_rq_stats_sectors(const struct request * rq)1074 static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
1075 {
1076 return rq->stats_sectors;
1077 }
1078
1079 /*
1080 * Some commands like WRITE SAME have a payload or data transfer size which
1081 * is different from the size of the request. Any driver that supports such
1082 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1083 * calculate the data transfer size.
1084 */
blk_rq_payload_bytes(struct request * rq)1085 static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1086 {
1087 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1088 return rq->special_vec.bv_len;
1089 return blk_rq_bytes(rq);
1090 }
1091
1092 /*
1093 * Return the first full biovec in the request. The caller needs to check that
1094 * there are any bvecs before calling this helper.
1095 */
req_bvec(struct request * rq)1096 static inline struct bio_vec req_bvec(struct request *rq)
1097 {
1098 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1099 return rq->special_vec;
1100 return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
1101 }
1102
blk_rq_count_bios(struct request * rq)1103 static inline unsigned int blk_rq_count_bios(struct request *rq)
1104 {
1105 unsigned int nr_bios = 0;
1106 struct bio *bio;
1107
1108 __rq_for_each_bio(bio, rq)
1109 nr_bios++;
1110
1111 return nr_bios;
1112 }
1113
1114 void blk_steal_bios(struct bio_list *list, struct request *rq);
1115
1116 /*
1117 * Request completion related functions.
1118 *
1119 * blk_update_request() completes given number of bytes and updates
1120 * the request without completing it.
1121 */
1122 bool blk_update_request(struct request *rq, blk_status_t error,
1123 unsigned int nr_bytes);
1124 void blk_abort_request(struct request *);
1125
1126 /*
1127 * Number of physical segments as sent to the device.
1128 *
1129 * Normally this is the number of discontiguous data segments sent by the
1130 * submitter. But for data-less command like discard we might have no
1131 * actual data segments submitted, but the driver might have to add it's
1132 * own special payload. In that case we still return 1 here so that this
1133 * special payload will be mapped.
1134 */
blk_rq_nr_phys_segments(struct request * rq)1135 static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1136 {
1137 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1138 return 1;
1139 return rq->nr_phys_segments;
1140 }
1141
1142 /*
1143 * Number of discard segments (or ranges) the driver needs to fill in.
1144 * Each discard bio merged into a request is counted as one segment.
1145 */
blk_rq_nr_discard_segments(struct request * rq)1146 static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1147 {
1148 return max_t(unsigned short, rq->nr_phys_segments, 1);
1149 }
1150
1151 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
1152 struct scatterlist *sglist, struct scatterlist **last_sg);
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)1153 static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
1154 struct scatterlist *sglist)
1155 {
1156 struct scatterlist *last_sg = NULL;
1157
1158 return __blk_rq_map_sg(q, rq, sglist, &last_sg);
1159 }
1160 void blk_dump_rq_flags(struct request *, char *);
1161
1162 #ifdef CONFIG_BLK_DEV_ZONED
blk_rq_zone_no(struct request * rq)1163 static inline unsigned int blk_rq_zone_no(struct request *rq)
1164 {
1165 return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
1166 }
1167
blk_rq_zone_is_seq(struct request * rq)1168 static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1169 {
1170 return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
1171 }
1172
1173 /**
1174 * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
1175 * @rq: Request to examine.
1176 *
1177 * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
1178 */
blk_rq_is_seq_zoned_write(struct request * rq)1179 static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1180 {
1181 return op_needs_zoned_write_locking(req_op(rq)) &&
1182 blk_rq_zone_is_seq(rq);
1183 }
1184
1185 bool blk_req_needs_zone_write_lock(struct request *rq);
1186 bool blk_req_zone_write_trylock(struct request *rq);
1187 void __blk_req_zone_write_lock(struct request *rq);
1188 void __blk_req_zone_write_unlock(struct request *rq);
1189
blk_req_zone_write_lock(struct request * rq)1190 static inline void blk_req_zone_write_lock(struct request *rq)
1191 {
1192 if (blk_req_needs_zone_write_lock(rq))
1193 __blk_req_zone_write_lock(rq);
1194 }
1195
blk_req_zone_write_unlock(struct request * rq)1196 static inline void blk_req_zone_write_unlock(struct request *rq)
1197 {
1198 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1199 __blk_req_zone_write_unlock(rq);
1200 }
1201
blk_req_zone_is_write_locked(struct request * rq)1202 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1203 {
1204 return rq->q->disk->seq_zones_wlock &&
1205 test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
1206 }
1207
blk_req_can_dispatch_to_zone(struct request * rq)1208 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1209 {
1210 if (!blk_req_needs_zone_write_lock(rq))
1211 return true;
1212 return !blk_req_zone_is_write_locked(rq);
1213 }
1214 #else /* CONFIG_BLK_DEV_ZONED */
blk_rq_is_seq_zoned_write(struct request * rq)1215 static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
1216 {
1217 return false;
1218 }
1219
blk_req_needs_zone_write_lock(struct request * rq)1220 static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1221 {
1222 return false;
1223 }
1224
blk_req_zone_write_lock(struct request * rq)1225 static inline void blk_req_zone_write_lock(struct request *rq)
1226 {
1227 }
1228
blk_req_zone_write_unlock(struct request * rq)1229 static inline void blk_req_zone_write_unlock(struct request *rq)
1230 {
1231 }
blk_req_zone_is_write_locked(struct request * rq)1232 static inline bool blk_req_zone_is_write_locked(struct request *rq)
1233 {
1234 return false;
1235 }
1236
blk_req_can_dispatch_to_zone(struct request * rq)1237 static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1238 {
1239 return true;
1240 }
1241 #endif /* CONFIG_BLK_DEV_ZONED */
1242
1243 #endif /* BLK_MQ_H */
1244