• Home
  • Raw
  • Download

Lines Matching refs:nvmeq

93 static void nvme_process_cq(struct nvme_queue *nvmeq);
158 struct nvme_queue *nvmeq; member
195 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_hctx() local
199 WARN_ON(nvmeq->tags); in nvme_admin_init_hctx()
201 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
202 nvmeq->tags = &dev->admin_tagset.tags[0]; in nvme_admin_init_hctx()
208 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_admin_exit_hctx() local
210 nvmeq->tags = NULL; in nvme_admin_exit_hctx()
219 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_request() local
221 BUG_ON(!nvmeq); in nvme_admin_init_request()
222 cmd->nvmeq = nvmeq; in nvme_admin_init_request()
230 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; in nvme_init_hctx() local
232 if (!nvmeq->tags) in nvme_init_hctx()
233 nvmeq->tags = &dev->tagset.tags[hctx_idx]; in nvme_init_hctx()
236 hctx->driver_data = nvmeq; in nvme_init_hctx()
246 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; in nvme_init_request() local
248 BUG_ON(!nvmeq); in nvme_init_request()
249 cmd->nvmeq = nvmeq; in nvme_init_request()
281 static void special_completion(struct nvme_queue *nvmeq, void *ctx, in special_completion() argument
287 dev_warn(nvmeq->q_dmadev, in special_completion()
293 dev_warn(nvmeq->q_dmadev, in special_completion()
298 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); in special_completion()
313 static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, in async_req_completion() argument
320 ++nvmeq->dev->event_limit; in async_req_completion()
326 dev_info(nvmeq->q_dmadev, "rescanning\n"); in async_req_completion()
327 schedule_work(&nvmeq->dev->scan_work); in async_req_completion()
329 dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result); in async_req_completion()
333 static void abort_completion(struct nvme_queue *nvmeq, void *ctx, in abort_completion() argument
343 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); in abort_completion()
344 ++nvmeq->dev->abort_limit; in abort_completion()
347 static void async_completion(struct nvme_queue *nvmeq, void *ctx, in async_completion() argument
357 static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, in get_cmd_from_tag() argument
360 struct request *req = blk_mq_tag_to_rq(*nvmeq->tags, tag); in get_cmd_from_tag()
368 static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag, in nvme_finish_cmd() argument
371 struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag); in nvme_finish_cmd()
373 if (tag >= nvmeq->q_depth) { in nvme_finish_cmd()
392 static void __nvme_submit_cmd(struct nvme_queue *nvmeq, in __nvme_submit_cmd() argument
395 u16 tail = nvmeq->sq_tail; in __nvme_submit_cmd()
397 if (nvmeq->sq_cmds_io) in __nvme_submit_cmd()
398 memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd)); in __nvme_submit_cmd()
400 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); in __nvme_submit_cmd()
402 if (++tail == nvmeq->q_depth) in __nvme_submit_cmd()
404 writel(tail, nvmeq->q_db); in __nvme_submit_cmd()
405 nvmeq->sq_tail = tail; in __nvme_submit_cmd()
408 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in nvme_submit_cmd() argument
411 spin_lock_irqsave(&nvmeq->q_lock, flags); in nvme_submit_cmd()
412 __nvme_submit_cmd(nvmeq, cmd); in nvme_submit_cmd()
413 spin_unlock_irqrestore(&nvmeq->q_lock, flags); in nvme_submit_cmd()
589 static void req_completion(struct nvme_queue *nvmeq, void *ctx, in req_completion() argument
629 dev_warn(nvmeq->dev->dev, in req_completion()
635 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, in req_completion()
640 dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1, in req_completion()
644 nvme_free_iod(nvmeq->dev, iod); in req_completion()
730 static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req, in nvme_submit_priv() argument
742 __nvme_submit_cmd(nvmeq, &cmnd); in nvme_submit_priv()
750 static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_discard() argument
769 __nvme_submit_cmd(nvmeq, &cmnd); in nvme_submit_discard()
772 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_flush() argument
782 __nvme_submit_cmd(nvmeq, &cmnd); in nvme_submit_flush()
785 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, in nvme_submit_iod() argument
833 __nvme_submit_cmd(nvmeq, &cmnd); in nvme_submit_iod()
845 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq() local
846 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
890 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir)) in nvme_queue_rq()
916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { in nvme_queue_rq()
925 spin_lock_irq(&nvmeq->q_lock); in nvme_queue_rq()
927 nvme_submit_priv(nvmeq, req, iod); in nvme_queue_rq()
929 nvme_submit_discard(nvmeq, ns, req, iod); in nvme_queue_rq()
931 nvme_submit_flush(nvmeq, ns, req->tag); in nvme_queue_rq()
933 nvme_submit_iod(nvmeq, iod, ns); in nvme_queue_rq()
935 nvme_process_cq(nvmeq); in nvme_queue_rq()
936 spin_unlock_irq(&nvmeq->q_lock); in nvme_queue_rq()
947 static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) in __nvme_process_cq() argument
951 head = nvmeq->cq_head; in __nvme_process_cq()
952 phase = nvmeq->cq_phase; in __nvme_process_cq()
957 struct nvme_completion cqe = nvmeq->cqes[head]; in __nvme_process_cq()
960 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); in __nvme_process_cq()
961 if (++head == nvmeq->q_depth) { in __nvme_process_cq()
967 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn); in __nvme_process_cq()
968 fn(nvmeq, ctx, &cqe); in __nvme_process_cq()
977 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) in __nvme_process_cq()
980 if (likely(nvmeq->cq_vector >= 0)) in __nvme_process_cq()
981 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in __nvme_process_cq()
982 nvmeq->cq_head = head; in __nvme_process_cq()
983 nvmeq->cq_phase = phase; in __nvme_process_cq()
985 nvmeq->cqe_seen = 1; in __nvme_process_cq()
988 static void nvme_process_cq(struct nvme_queue *nvmeq) in nvme_process_cq() argument
990 __nvme_process_cq(nvmeq, NULL); in nvme_process_cq()
996 struct nvme_queue *nvmeq = data; in nvme_irq() local
997 spin_lock(&nvmeq->q_lock); in nvme_irq()
998 nvme_process_cq(nvmeq); in nvme_irq()
999 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; in nvme_irq()
1000 nvmeq->cqe_seen = 0; in nvme_irq()
1001 spin_unlock(&nvmeq->q_lock); in nvme_irq()
1007 struct nvme_queue *nvmeq = data; in nvme_irq_check() local
1008 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; in nvme_irq_check()
1009 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) in nvme_irq_check()
1016 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll() local
1018 if ((le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == in nvme_poll()
1019 nvmeq->cq_phase) { in nvme_poll()
1020 spin_lock_irq(&nvmeq->q_lock); in nvme_poll()
1021 __nvme_process_cq(nvmeq, &tag); in nvme_poll()
1022 spin_unlock_irq(&nvmeq->q_lock); in nvme_poll()
1092 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_async_admin_req() local
1110 __nvme_submit_cmd(nvmeq, &c); in nvme_submit_async_admin_req()
1118 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_admin_async_cmd() local
1134 nvme_submit_cmd(nvmeq, cmd); in nvme_submit_admin_async_cmd()
1150 struct nvme_queue *nvmeq) in adapter_alloc_cq() argument
1161 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1163 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1165 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); in adapter_alloc_cq()
1171 struct nvme_queue *nvmeq) in adapter_alloc_sq() argument
1182 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1184 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1303 struct nvme_queue *nvmeq = cmd_rq->nvmeq; in nvme_abort_req() local
1304 struct nvme_dev *dev = nvmeq->dev; in nvme_abort_req()
1309 if (!nvmeq->qid || cmd_rq->aborted) { in nvme_abort_req()
1314 req->tag, nvmeq->qid); in nvme_abort_req()
1334 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_abort_req()
1340 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag, in nvme_abort_req()
1341 nvmeq->qid); in nvme_abort_req()
1347 struct nvme_queue *nvmeq = data; in nvme_cancel_queue_ios() local
1367 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", in nvme_cancel_queue_ios()
1368 req->tag, nvmeq->qid); in nvme_cancel_queue_ios()
1370 fn(nvmeq, ctx, &cqe); in nvme_cancel_queue_ios()
1376 struct nvme_queue *nvmeq = cmd->nvmeq; in nvme_timeout() local
1378 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, in nvme_timeout()
1379 nvmeq->qid); in nvme_timeout()
1380 spin_lock_irq(&nvmeq->q_lock); in nvme_timeout()
1382 spin_unlock_irq(&nvmeq->q_lock); in nvme_timeout()
1392 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
1394 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1395 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1396 if (nvmeq->sq_cmds) in nvme_free_queue()
1397 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1398 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1399 kfree(nvmeq); in nvme_free_queue()
1407 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues() local
1410 nvme_free_queue(nvmeq); in nvme_free_queues()
1418 static int nvme_suspend_queue(struct nvme_queue *nvmeq) in nvme_suspend_queue() argument
1422 spin_lock_irq(&nvmeq->q_lock); in nvme_suspend_queue()
1423 if (nvmeq->cq_vector == -1) { in nvme_suspend_queue()
1424 spin_unlock_irq(&nvmeq->q_lock); in nvme_suspend_queue()
1427 vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; in nvme_suspend_queue()
1428 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1429 nvmeq->cq_vector = -1; in nvme_suspend_queue()
1430 spin_unlock_irq(&nvmeq->q_lock); in nvme_suspend_queue()
1432 if (!nvmeq->qid && nvmeq->dev->admin_q) in nvme_suspend_queue()
1433 blk_mq_freeze_queue_start(nvmeq->dev->admin_q); in nvme_suspend_queue()
1436 free_irq(vector, nvmeq); in nvme_suspend_queue()
1441 static void nvme_clear_queue(struct nvme_queue *nvmeq) in nvme_clear_queue() argument
1443 spin_lock_irq(&nvmeq->q_lock); in nvme_clear_queue()
1444 if (nvmeq->tags && *nvmeq->tags) in nvme_clear_queue()
1445 blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq); in nvme_clear_queue()
1446 spin_unlock_irq(&nvmeq->q_lock); in nvme_clear_queue()
1451 struct nvme_queue *nvmeq = dev->queues[qid]; in nvme_disable_queue() local
1453 if (!nvmeq) in nvme_disable_queue()
1455 if (nvme_suspend_queue(nvmeq)) in nvme_disable_queue()
1465 spin_lock_irq(&nvmeq->q_lock); in nvme_disable_queue()
1466 nvme_process_cq(nvmeq); in nvme_disable_queue()
1467 spin_unlock_irq(&nvmeq->q_lock); in nvme_disable_queue()
1493 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, in nvme_alloc_sq_cmds() argument
1499 nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; in nvme_alloc_sq_cmds()
1500 nvmeq->sq_cmds_io = dev->cmb + offset; in nvme_alloc_sq_cmds()
1502 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), in nvme_alloc_sq_cmds()
1503 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1504 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1514 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); in nvme_alloc_queue() local
1515 if (!nvmeq) in nvme_alloc_queue()
1518 nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), in nvme_alloc_queue()
1519 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1520 if (!nvmeq->cqes) in nvme_alloc_queue()
1523 if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) in nvme_alloc_queue()
1526 nvmeq->q_dmadev = dev->dev; in nvme_alloc_queue()
1527 nvmeq->dev = dev; in nvme_alloc_queue()
1528 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", in nvme_alloc_queue()
1530 spin_lock_init(&nvmeq->q_lock); in nvme_alloc_queue()
1531 nvmeq->cq_head = 0; in nvme_alloc_queue()
1532 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1533 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1534 nvmeq->q_depth = depth; in nvme_alloc_queue()
1535 nvmeq->qid = qid; in nvme_alloc_queue()
1536 nvmeq->cq_vector = -1; in nvme_alloc_queue()
1537 dev->queues[qid] = nvmeq; in nvme_alloc_queue()
1543 return nvmeq; in nvme_alloc_queue()
1546 dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, in nvme_alloc_queue()
1547 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1549 kfree(nvmeq); in nvme_alloc_queue()
1553 static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, in queue_request_irq() argument
1557 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, in queue_request_irq()
1559 name, nvmeq); in queue_request_irq()
1560 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, in queue_request_irq()
1561 IRQF_SHARED, name, nvmeq); in queue_request_irq()
1564 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
1566 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1568 spin_lock_irq(&nvmeq->q_lock); in nvme_init_queue()
1569 nvmeq->sq_tail = 0; in nvme_init_queue()
1570 nvmeq->cq_head = 0; in nvme_init_queue()
1571 nvmeq->cq_phase = 1; in nvme_init_queue()
1572 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1573 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
1575 spin_unlock_irq(&nvmeq->q_lock); in nvme_init_queue()
1578 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) in nvme_create_queue() argument
1580 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1583 nvmeq->cq_vector = qid - 1; in nvme_create_queue()
1584 result = adapter_alloc_cq(dev, qid, nvmeq); in nvme_create_queue()
1588 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1592 nvme_init_queue(nvmeq, qid); in nvme_create_queue()
1593 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); in nvme_create_queue()
1605 nvmeq->cq_vector = -1; in nvme_create_queue()
1747 struct nvme_queue *nvmeq; in nvme_configure_admin_queue() local
1774 nvmeq = dev->queues[0]; in nvme_configure_admin_queue()
1775 if (!nvmeq) { in nvme_configure_admin_queue()
1776 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_configure_admin_queue()
1777 if (!nvmeq) in nvme_configure_admin_queue()
1781 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
1792 lo_hi_writeq(nvmeq->sq_dma_addr, &dev->bar->asq); in nvme_configure_admin_queue()
1793 lo_hi_writeq(nvmeq->cq_dma_addr, &dev->bar->acq); in nvme_configure_admin_queue()
1799 nvmeq->cq_vector = 0; in nvme_configure_admin_queue()
1800 nvme_init_queue(nvmeq, 0); in nvme_configure_admin_queue()
1801 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); in nvme_configure_admin_queue()
1803 nvmeq->cq_vector = -1; in nvme_configure_admin_queue()
2235 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_kthread() local
2236 if (!nvmeq) in nvme_kthread()
2238 spin_lock_irq(&nvmeq->q_lock); in nvme_kthread()
2239 nvme_process_cq(nvmeq); in nvme_kthread()
2246 spin_unlock_irq(&nvmeq->q_lock); in nvme_kthread()
2595 struct nvme_queue *nvmeq; in nvme_set_irq_hints() local
2599 nvmeq = dev->queues[i]; in nvme_set_irq_hints()
2601 if (!nvmeq->tags || !(*nvmeq->tags)) in nvme_set_irq_hints()
2604 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, in nvme_set_irq_hints()
2605 blk_mq_tags_cpumask(*nvmeq->tags)); in nvme_set_irq_hints()
2810 static void nvme_del_queue_end(struct nvme_queue *nvmeq) in nvme_del_queue_end() argument
2812 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; in nvme_del_queue_end()
2815 spin_lock_irq(&nvmeq->q_lock); in nvme_del_queue_end()
2816 nvme_process_cq(nvmeq); in nvme_del_queue_end()
2817 spin_unlock_irq(&nvmeq->q_lock); in nvme_del_queue_end()
2820 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, in adapter_async_del_queue() argument
2827 c.delete_queue.qid = cpu_to_le16(nvmeq->qid); in adapter_async_del_queue()
2829 init_kthread_work(&nvmeq->cmdinfo.work, fn); in adapter_async_del_queue()
2830 return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo, in adapter_async_del_queue()
2836 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, in nvme_del_cq_work_handler() local
2838 nvme_del_queue_end(nvmeq); in nvme_del_cq_work_handler()
2841 static int nvme_delete_cq(struct nvme_queue *nvmeq) in nvme_delete_cq() argument
2843 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq, in nvme_delete_cq()
2849 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, in nvme_del_sq_work_handler() local
2851 int status = nvmeq->cmdinfo.status; in nvme_del_sq_work_handler()
2854 status = nvme_delete_cq(nvmeq); in nvme_del_sq_work_handler()
2856 nvme_del_queue_end(nvmeq); in nvme_del_sq_work_handler()
2859 static int nvme_delete_sq(struct nvme_queue *nvmeq) in nvme_delete_sq() argument
2861 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq, in nvme_delete_sq()
2867 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, in nvme_del_queue_start() local
2869 if (nvme_delete_sq(nvmeq)) in nvme_del_queue_start()
2870 nvme_del_queue_end(nvmeq); in nvme_del_queue_start()
2893 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_disable_io_queues() local
2895 if (nvme_suspend_queue(nvmeq)) in nvme_disable_io_queues()
2897 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq); in nvme_disable_io_queues()
2898 nvmeq->cmdinfo.worker = dq.worker; in nvme_disable_io_queues()
2899 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start); in nvme_disable_io_queues()
2900 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); in nvme_disable_io_queues()
2968 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_dev_shutdown() local
2969 nvme_suspend_queue(nvmeq); in nvme_dev_shutdown()