Lines Matching refs:nvmeq
228 struct nvme_queue *nvmeq; member
286 struct nvme_queue *nvmeq, int qid) in nvme_dbbuf_init() argument
291 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
292 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
293 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
294 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
297 static void nvme_dbbuf_free(struct nvme_queue *nvmeq) in nvme_dbbuf_free() argument
299 if (!nvmeq->qid) in nvme_dbbuf_free()
302 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
303 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
304 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
305 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
402 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx() local
407 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
415 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() local
418 hctx->driver_data = nvmeq; in nvme_init_hctx()
428 struct nvme_queue *nvmeq = &dev->queues[queue_idx]; in nvme_init_request() local
430 BUG_ON(!nvmeq); in nvme_init_request()
431 iod->nvmeq = nvmeq; in nvme_init_request()
480 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) in nvme_write_sq_db() argument
483 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
485 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
487 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
491 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
492 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
493 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
494 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
503 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, in nvme_submit_cmd() argument
506 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmd()
507 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_submit_cmd()
509 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd()
510 nvmeq->sq_tail = 0; in nvme_submit_cmd()
511 nvme_write_sq_db(nvmeq, write_sq); in nvme_submit_cmd()
512 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmd()
517 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs() local
519 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
520 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
521 nvme_write_sq_db(nvmeq, true); in nvme_commit_rqs()
522 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
541 if (!iod->nvmeq->qid) in nvme_pci_use_sgls()
859 if (iod->nvmeq->qid && sgl_threshold && in nvme_map_data()
920 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq() local
921 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
935 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
955 nvme_submit_cmd(nvmeq, cmnd, bd->last); in nvme_queue_rq()
967 struct nvme_dev *dev = iod->nvmeq->dev; in nvme_pci_complete_rq()
978 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) in nvme_cqe_pending() argument
980 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
982 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
985 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) in nvme_ring_cq_doorbell() argument
987 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
989 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
990 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
991 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
994 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) in nvme_queue_tagset() argument
996 if (!nvmeq->qid) in nvme_queue_tagset()
997 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
998 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1001 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) in nvme_handle_cqe() argument
1003 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1013 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1014 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1019 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); in nvme_handle_cqe()
1021 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1027 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1032 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) in nvme_update_cq_head() argument
1034 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1036 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1037 nvmeq->cq_head = 0; in nvme_update_cq_head()
1038 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1040 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1044 static inline int nvme_process_cq(struct nvme_queue *nvmeq) in nvme_process_cq() argument
1048 while (nvme_cqe_pending(nvmeq)) { in nvme_process_cq()
1055 nvme_handle_cqe(nvmeq, nvmeq->cq_head); in nvme_process_cq()
1056 nvme_update_cq_head(nvmeq); in nvme_process_cq()
1060 nvme_ring_cq_doorbell(nvmeq); in nvme_process_cq()
1066 struct nvme_queue *nvmeq = data; in nvme_irq() local
1074 if (nvme_process_cq(nvmeq)) in nvme_irq()
1083 struct nvme_queue *nvmeq = data; in nvme_irq_check() local
1085 if (nvme_cqe_pending(nvmeq)) in nvme_irq_check()
1094 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) in nvme_poll_irqdisable() argument
1096 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1098 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1100 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1101 nvme_process_cq(nvmeq); in nvme_poll_irqdisable()
1102 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1107 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll() local
1110 if (!nvme_cqe_pending(nvmeq)) in nvme_poll()
1113 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1114 found = nvme_process_cq(nvmeq); in nvme_poll()
1115 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1123 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event() local
1129 nvme_submit_cmd(nvmeq, &c, true); in nvme_pci_submit_async_event()
1144 struct nvme_queue *nvmeq, s16 vector) in adapter_alloc_cq() argument
1149 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1158 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1160 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1168 struct nvme_queue *nvmeq) in adapter_alloc_sq() argument
1188 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1190 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1210 struct nvme_queue *nvmeq = iod->nvmeq; in abort_endio() local
1212 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1214 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1264 struct nvme_queue *nvmeq = iod->nvmeq; in nvme_timeout() local
1265 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1290 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1293 nvme_poll_irqdisable(nvmeq); in nvme_timeout()
1298 req->tag, nvmeq->qid); in nvme_timeout()
1315 req->tag, nvmeq->qid); in nvme_timeout()
1330 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1333 req->tag, nvmeq->qid); in nvme_timeout()
1350 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1352 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1354 req->tag, nvmeq->qid); in nvme_timeout()
1374 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
1376 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1377 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1378 if (!nvmeq->sq_cmds) in nvme_free_queue()
1381 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1382 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1383 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1385 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1386 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1404 static int nvme_suspend_queue(struct nvme_queue *nvmeq) in nvme_suspend_queue() argument
1406 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1412 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1413 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1414 nvme_stop_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1415 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1416 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1430 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_disable_admin_queue() local
1437 nvme_poll_irqdisable(nvmeq); in nvme_disable_admin_queue()
1482 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, in nvme_alloc_sq_cmds() argument
1488 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1489 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1490 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1491 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1492 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1493 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1497 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1501 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1502 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1503 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1510 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue() local
1515 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1516 nvmeq->q_depth = depth; in nvme_alloc_queue()
1517 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1518 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1519 if (!nvmeq->cqes) in nvme_alloc_queue()
1522 if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) in nvme_alloc_queue()
1525 nvmeq->dev = dev; in nvme_alloc_queue()
1526 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1527 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1528 nvmeq->cq_head = 0; in nvme_alloc_queue()
1529 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1530 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1531 nvmeq->qid = qid; in nvme_alloc_queue()
1537 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1538 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1543 static int queue_request_irq(struct nvme_queue *nvmeq) in queue_request_irq() argument
1545 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1546 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1549 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1550 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1552 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1553 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1557 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
1559 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1561 nvmeq->sq_tail = 0; in nvme_init_queue()
1562 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1563 nvmeq->cq_head = 0; in nvme_init_queue()
1564 nvmeq->cq_phase = 1; in nvme_init_queue()
1565 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1566 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1567 nvme_dbbuf_init(dev, nvmeq, qid); in nvme_init_queue()
1572 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) in nvme_create_queue() argument
1574 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1578 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1587 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1589 result = adapter_alloc_cq(dev, qid, nvmeq, vector); in nvme_create_queue()
1593 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1599 nvmeq->cq_vector = vector; in nvme_create_queue()
1600 nvme_init_queue(nvmeq, qid); in nvme_create_queue()
1603 result = queue_request_irq(nvmeq); in nvme_create_queue()
1608 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1716 struct nvme_queue *nvmeq; in nvme_pci_configure_admin_queue() local
1739 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1740 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1744 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1745 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1751 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1752 nvme_init_queue(nvmeq, 0); in nvme_pci_configure_admin_queue()
1753 result = queue_request_irq(nvmeq); in nvme_pci_configure_admin_queue()
1759 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
2259 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end() local
2262 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2267 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end() local
2270 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2275 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) in nvme_delete_queue() argument
2277 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2283 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2289 req->end_io_data = nvmeq; in nvme_delete_queue()
2291 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2312 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_disable_io_queues() local
2314 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_disable_io_queues()