Lines Matching +full:restricted +full:- +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
10 #include <linux/blk-mq.h>
11 #include <linux/blk-mq-pci.h>
12 #include <linux/blk-integrity.h>
25 #include <linux/t10-pi.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/io-64-nonatomic-hi-lo.h>
29 #include <linux/sed-opal.h>
30 #include <linux/pci-p2pdma.h>
35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
85 return -EINVAL; in io_queue_count_set()
234 s8 nr_allocations; /* PRP list pool allocations. 0 means small
235 pool in use */
236 unsigned int dma_len; /* length of single DMA segment mapping */
245 return dev->nr_allocated_queues * 8 * dev->db_stride; in nvme_dbbuf_size()
252 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) in nvme_dbbuf_dma_alloc()
255 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_alloc()
260 memset(dev->dbbuf_dbs, 0, mem_size); in nvme_dbbuf_dma_alloc()
261 memset(dev->dbbuf_eis, 0, mem_size); in nvme_dbbuf_dma_alloc()
265 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
266 &dev->dbbuf_dbs_dma_addr, in nvme_dbbuf_dma_alloc()
268 if (!dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
270 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
271 &dev->dbbuf_eis_dma_addr, in nvme_dbbuf_dma_alloc()
273 if (!dev->dbbuf_eis) in nvme_dbbuf_dma_alloc()
278 dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, in nvme_dbbuf_dma_alloc()
279 dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_alloc()
280 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_alloc()
282 dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); in nvme_dbbuf_dma_alloc()
289 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_free()
290 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
291 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_free()
292 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_free()
294 if (dev->dbbuf_eis) { in nvme_dbbuf_dma_free()
295 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
296 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); in nvme_dbbuf_dma_free()
297 dev->dbbuf_eis = NULL; in nvme_dbbuf_dma_free()
304 if (!dev->dbbuf_dbs || !qid) in nvme_dbbuf_init()
307 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
308 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
309 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
310 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
315 if (!nvmeq->qid) in nvme_dbbuf_free()
318 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
319 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
320 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
321 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
329 if (!dev->dbbuf_dbs) in nvme_dbbuf_set()
333 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_set()
334 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); in nvme_dbbuf_set()
336 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
337 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); in nvme_dbbuf_set()
341 for (i = 1; i <= dev->online_queues; i++) in nvme_dbbuf_set()
342 nvme_dbbuf_free(&dev->queues[i]); in nvme_dbbuf_set()
348 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); in nvme_dbbuf_need_event()
392 return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); in nvme_pci_npages_prp()
399 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx()
402 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
404 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx()
414 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
415 hctx->driver_data = nvmeq; in nvme_init_hctx()
425 nvme_req(req)->ctrl = set->driver_data; in nvme_pci_init_request()
426 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
433 if (dev->num_vecs > 1) in queue_irq_offset()
441 struct nvme_dev *dev = to_nvme_dev(set->driver_data); in nvme_pci_map_queues()
445 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
446 struct blk_mq_queue_map *map = &set->map[i]; in nvme_pci_map_queues()
448 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
449 if (!map->nr_queues) { in nvme_pci_map_queues()
456 * affinity), so use the regular blk-mq cpu mapping in nvme_pci_map_queues()
458 map->queue_offset = qoff; in nvme_pci_map_queues()
460 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); in nvme_pci_map_queues()
463 qoff += map->nr_queues; in nvme_pci_map_queues()
464 offset += map->nr_queues; in nvme_pci_map_queues()
474 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
476 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
478 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
482 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
483 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
484 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
485 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
491 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_sq_copy_cmd()
493 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_sq_copy_cmd()
494 nvmeq->sq_tail = 0; in nvme_sq_copy_cmd()
499 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
501 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
502 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
504 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
510 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_use_sgls()
515 if (!nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_pci_use_sgls()
517 if (!nvmeq->qid) in nvme_pci_use_sgls()
526 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; in nvme_free_prps()
528 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
531 for (i = 0; i < iod->nr_allocations; i++) { in nvme_free_prps()
532 __le64 *prp_list = iod->list[i].prp_list; in nvme_free_prps()
535 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); in nvme_free_prps()
544 if (iod->dma_len) { in nvme_unmap_data()
545 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
550 WARN_ON_ONCE(!iod->sgt.nents); in nvme_unmap_data()
552 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_unmap_data()
554 if (iod->nr_allocations == 0) in nvme_unmap_data()
555 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, in nvme_unmap_data()
556 iod->first_dma); in nvme_unmap_data()
557 else if (iod->nr_allocations == 1) in nvme_unmap_data()
558 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, in nvme_unmap_data()
559 iod->first_dma); in nvme_unmap_data()
562 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_unmap_data()
574 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), in nvme_print_sgl()
583 struct dma_pool *pool; in nvme_pci_setup_prps() local
585 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_prps()
588 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_setup_prps()
593 length -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
595 iod->first_dma = 0; in nvme_pci_setup_prps()
599 dma_len -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
601 dma_addr += (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
609 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
615 pool = dev->prp_small_pool; in nvme_pci_setup_prps()
616 iod->nr_allocations = 0; in nvme_pci_setup_prps()
618 pool = dev->prp_page_pool; in nvme_pci_setup_prps()
619 iod->nr_allocations = 1; in nvme_pci_setup_prps()
622 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); in nvme_pci_setup_prps()
624 iod->nr_allocations = -1; in nvme_pci_setup_prps()
627 iod->list[0].prp_list = prp_list; in nvme_pci_setup_prps()
628 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
633 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); in nvme_pci_setup_prps()
636 iod->list[iod->nr_allocations++].prp_list = prp_list; in nvme_pci_setup_prps()
637 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_prps()
638 old_prp_list[i - 1] = cpu_to_le64(prp_dma); in nvme_pci_setup_prps()
642 dma_len -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
644 length -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
656 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); in nvme_pci_setup_prps()
657 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
663 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), in nvme_pci_setup_prps()
665 blk_rq_payload_bytes(req), iod->sgt.nents); in nvme_pci_setup_prps()
672 sge->addr = cpu_to_le64(sg_dma_address(sg)); in nvme_pci_sgl_set_data()
673 sge->length = cpu_to_le32(sg_dma_len(sg)); in nvme_pci_sgl_set_data()
674 sge->type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_pci_sgl_set_data()
680 sge->addr = cpu_to_le64(dma_addr); in nvme_pci_sgl_set_seg()
681 sge->length = cpu_to_le32(entries * sizeof(*sge)); in nvme_pci_sgl_set_seg()
682 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; in nvme_pci_sgl_set_seg()
689 struct dma_pool *pool; in nvme_pci_setup_sgls() local
691 struct scatterlist *sg = iod->sgt.sgl; in nvme_pci_setup_sgls()
692 unsigned int entries = iod->sgt.nents; in nvme_pci_setup_sgls()
697 cmd->flags = NVME_CMD_SGL_METABUF; in nvme_pci_setup_sgls()
700 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); in nvme_pci_setup_sgls()
705 pool = dev->prp_small_pool; in nvme_pci_setup_sgls()
706 iod->nr_allocations = 0; in nvme_pci_setup_sgls()
708 pool = dev->prp_page_pool; in nvme_pci_setup_sgls()
709 iod->nr_allocations = 1; in nvme_pci_setup_sgls()
712 sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); in nvme_pci_setup_sgls()
714 iod->nr_allocations = -1; in nvme_pci_setup_sgls()
718 iod->list[0].sg_list = sg_list; in nvme_pci_setup_sgls()
719 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
721 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); in nvme_pci_setup_sgls()
725 } while (--entries > 0); in nvme_pci_setup_sgls()
735 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); in nvme_setup_prp_simple()
736 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; in nvme_setup_prp_simple()
738 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
739 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
741 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
743 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
744 if (bv->bv_len > first_prp_len) in nvme_setup_prp_simple()
745 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
747 cmnd->dptr.prp2 = 0; in nvme_setup_prp_simple()
757 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
758 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
760 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
762 cmnd->flags = NVME_CMD_SGL_METABUF; in nvme_setup_sgl_simple()
763 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
764 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
765 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_setup_sgl_simple()
777 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_map_data()
783 &cmnd->rw, &bv); in nvme_map_data()
785 if (nvmeq->qid && sgl_threshold && in nvme_map_data()
786 nvme_ctrl_sgl_supported(&dev->ctrl)) in nvme_map_data()
788 &cmnd->rw, &bv); in nvme_map_data()
792 iod->dma_len = 0; in nvme_map_data()
793 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
794 if (!iod->sgt.sgl) in nvme_map_data()
796 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); in nvme_map_data()
797 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); in nvme_map_data()
798 if (!iod->sgt.orig_nents) in nvme_map_data()
801 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), in nvme_map_data()
804 if (rc == -EREMOTEIO) in nvme_map_data()
809 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) in nvme_map_data()
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); in nvme_map_data()
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); in nvme_map_data()
818 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); in nvme_map_data()
820 mempool_free(iod->sgt.sgl, dev->iod_mempool); in nvme_map_data()
829 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), in nvme_map_metadata()
831 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
833 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_map_metadata()
842 iod->aborted = false; in nvme_prep_rq()
843 iod->nr_allocations = -1; in nvme_prep_rq()
844 iod->sgt.nents = 0; in nvme_prep_rq()
846 ret = nvme_setup_cmd(req->q->queuedata, req); in nvme_prep_rq()
851 ret = nvme_map_data(dev, req, &iod->cmd); in nvme_prep_rq()
857 ret = nvme_map_metadata(dev, req, &iod->cmd); in nvme_prep_rq()
877 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq()
878 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
879 struct request *req = bd->rq; in nvme_queue_rq()
887 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
890 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) in nvme_queue_rq()
891 return nvme_fail_nonready_command(&dev->ctrl, req); in nvme_queue_rq()
896 spin_lock(&nvmeq->sq_lock); in nvme_queue_rq()
897 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_queue_rq()
898 nvme_write_sq_db(nvmeq, bd->last); in nvme_queue_rq()
899 spin_unlock(&nvmeq->sq_lock); in nvme_queue_rq()
905 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmds()
910 nvme_sq_copy_cmd(nvmeq, &iod->cmd); in nvme_submit_cmds()
913 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmds()
922 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_prep_rq_batch()
924 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) in nvme_prep_rq_batch()
927 req->mq_hctx->tags->rqs[req->tag] = req; in nvme_prep_rq_batch()
928 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; in nvme_prep_rq_batch()
937 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_queue_rqs()
948 if (!next || req->mq_hctx != next->mq_hctx) { in nvme_queue_rqs()
950 req->rq_next = NULL; in nvme_queue_rqs()
963 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_pci_unmap_rq()
964 struct nvme_dev *dev = nvmeq->dev; in nvme_pci_unmap_rq()
969 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_unmap_rq()
970 rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); in nvme_pci_unmap_rq()
991 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
993 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
998 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
1000 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
1001 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
1002 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
1007 if (!nvmeq->qid) in nvme_queue_tagset()
1008 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1009 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
1015 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
1016 __u16 command_id = READ_ONCE(cqe->command_id); in nvme_handle_cqe()
1025 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1026 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1027 cqe->status, &cqe->result); in nvme_handle_cqe()
1033 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1035 command_id, le16_to_cpu(cqe->sq_id)); in nvme_handle_cqe()
1039 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1040 if (!nvme_try_complete_req(req, cqe->status, cqe->result) && in nvme_handle_cqe()
1041 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, in nvme_handle_cqe()
1048 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1050 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1051 nvmeq->cq_head = 0; in nvme_update_cq_head()
1052 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1054 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1066 * load-load control dependency between phase and the rest of in nvme_poll_cq()
1070 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); in nvme_poll_cq()
1107 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1109 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1111 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1113 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1118 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll()
1124 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1126 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1134 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1140 spin_lock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1143 spin_unlock(&nvmeq->sq_lock); in nvme_pci_submit_async_event()
1153 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1162 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1170 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1172 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1176 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1182 struct nvme_ctrl *ctrl = &dev->ctrl; in adapter_alloc_sq()
1187 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't in adapter_alloc_sq()
1191 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) in adapter_alloc_sq()
1199 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1201 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1205 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1220 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in abort_endio()
1222 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1223 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1224 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1234 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); in nvme_should_reset()
1237 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_should_reset()
1260 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, in nvme_warn_reset()
1263 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1267 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1274 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1276 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1283 struct nvme_queue *nvmeq = req->mq_hctx->driver_data; in nvme_timeout()
1284 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1287 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_timeout()
1293 if (pci_channel_offline(to_pci_dev(dev->dev))) in nvme_timeout()
1307 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1308 nvme_poll(req->mq_hctx, NULL); in nvme_timeout()
1313 dev_warn(dev->ctrl.device, in nvme_timeout()
1315 req->tag, nvmeq->qid); in nvme_timeout()
1325 switch (nvme_ctrl_state(&dev->ctrl)) { in nvme_timeout()
1327 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1330 dev_warn_ratelimited(dev->ctrl.device, in nvme_timeout()
1332 req->tag, nvmeq->qid); in nvme_timeout()
1333 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1347 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1348 dev_warn(dev->ctrl.device, in nvme_timeout()
1350 req->tag, nvmeq->qid); in nvme_timeout()
1351 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1355 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1356 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1359 iod->aborted = true; in nvme_timeout()
1363 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1365 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1367 req->tag, in nvme_timeout()
1368 nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), in nvme_timeout()
1369 nvmeq->qid); in nvme_timeout()
1371 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), in nvme_timeout()
1374 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1379 abort_req->end_io = abort_endio; in nvme_timeout()
1380 abort_req->end_io_data = NULL; in nvme_timeout()
1391 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) in nvme_timeout()
1395 if (nvme_try_sched_reset(&dev->ctrl)) in nvme_timeout()
1396 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_timeout()
1402 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1403 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1404 if (!nvmeq->sq_cmds) in nvme_free_queue()
1407 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1408 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1409 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1411 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1412 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1420 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { in nvme_free_queues()
1421 dev->ctrl.queue_count--; in nvme_free_queues()
1422 nvme_free_queue(&dev->queues[i]); in nvme_free_queues()
1428 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_suspend_queue()
1430 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1436 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1437 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1438 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1439 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1440 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1447 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1461 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1462 spin_lock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1463 nvme_poll_cq(&dev->queues[i], NULL); in nvme_reap_pending_cqes()
1464 spin_unlock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1471 int q_depth = dev->q_depth; in nvme_cmb_qdepth()
1475 if (q_size_aligned * nr_io_queues > dev->cmb_size) { in nvme_cmb_qdepth()
1476 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); in nvme_cmb_qdepth()
1487 return -ENOMEM; in nvme_cmb_qdepth()
1496 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_alloc_sq_cmds()
1498 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { in nvme_alloc_sq_cmds()
1499 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1500 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1501 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1502 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1503 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1504 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1508 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1512 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1513 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1514 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1515 return -ENOMEM; in nvme_alloc_sq_cmds()
1521 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue()
1523 if (dev->ctrl.queue_count > qid) in nvme_alloc_queue()
1526 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1527 nvmeq->q_depth = depth; in nvme_alloc_queue()
1528 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1529 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1530 if (!nvmeq->cqes) in nvme_alloc_queue()
1536 nvmeq->dev = dev; in nvme_alloc_queue()
1537 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1538 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1539 nvmeq->cq_head = 0; in nvme_alloc_queue()
1540 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1541 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1542 nvmeq->qid = qid; in nvme_alloc_queue()
1543 dev->ctrl.queue_count++; in nvme_alloc_queue()
1548 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1549 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1551 return -ENOMEM; in nvme_alloc_queue()
1556 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1557 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1560 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1561 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1563 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1564 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1570 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1572 nvmeq->sq_tail = 0; in nvme_init_queue()
1573 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1574 nvmeq->cq_head = 0; in nvme_init_queue()
1575 nvmeq->cq_phase = 1; in nvme_init_queue()
1576 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1577 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1579 dev->online_queues++; in nvme_init_queue()
1591 if (!mutex_trylock(&dev->shutdown_lock)) in nvme_setup_io_queues_trylock()
1592 return -ENODEV; in nvme_setup_io_queues_trylock()
1597 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) { in nvme_setup_io_queues_trylock()
1598 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues_trylock()
1599 return -ENODEV; in nvme_setup_io_queues_trylock()
1607 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1611 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1618 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1620 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1632 nvmeq->cq_vector = vector; in nvme_create_queue()
1644 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1645 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1649 dev->online_queues--; in nvme_create_queue()
1650 mutex_unlock(&dev->shutdown_lock); in nvme_create_queue()
1679 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { in nvme_dev_remove_admin()
1685 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_remove_admin()
1686 nvme_remove_admin_tag_set(&dev->ctrl); in nvme_dev_remove_admin()
1692 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); in db_bar_size()
1697 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remap_bar()
1699 if (size <= dev->bar_mapped_size) in nvme_remap_bar()
1702 return -ENOMEM; in nvme_remap_bar()
1703 if (dev->bar) in nvme_remap_bar()
1704 iounmap(dev->bar); in nvme_remap_bar()
1705 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
1706 if (!dev->bar) { in nvme_remap_bar()
1707 dev->bar_mapped_size = 0; in nvme_remap_bar()
1708 return -ENOMEM; in nvme_remap_bar()
1710 dev->bar_mapped_size = size; in nvme_remap_bar()
1711 dev->dbs = dev->bar + NVME_REG_DBS; in nvme_remap_bar()
1726 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
1727 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
1729 if (dev->subsystem && in nvme_pci_configure_admin_queue()
1730 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) in nvme_pci_configure_admin_queue()
1731 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); in nvme_pci_configure_admin_queue()
1740 result = nvme_disable_ctrl(&dev->ctrl, false); in nvme_pci_configure_admin_queue()
1748 dev->ctrl.numa_node = dev_to_node(dev->dev); in nvme_pci_configure_admin_queue()
1750 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1751 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1754 writel(aqa, dev->bar + NVME_REG_AQA); in nvme_pci_configure_admin_queue()
1755 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1756 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1758 result = nvme_enable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
1762 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1766 dev->online_queues--; in nvme_pci_configure_admin_queue()
1770 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
1779 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { in nvme_create_io_queues()
1780 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
1781 ret = -ENOMEM; in nvme_create_io_queues()
1786 max = min(dev->max_qid, dev->ctrl.queue_count - 1); in nvme_create_io_queues()
1787 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { in nvme_create_io_queues()
1788 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + in nvme_create_io_queues()
1789 dev->io_queues[HCTX_TYPE_READ]; in nvme_create_io_queues()
1794 for (i = dev->online_queues; i <= max; i++) { in nvme_create_io_queues()
1797 ret = nvme_create_queue(&dev->queues[i], i, polled); in nvme_create_io_queues()
1813 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; in nvme_cmb_size_unit()
1820 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; in nvme_cmb_size()
1827 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_map_cmb()
1830 if (dev->cmb_size) in nvme_map_cmb()
1833 if (NVME_CAP_CMBS(dev->ctrl.cap)) in nvme_map_cmb()
1834 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1836 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); in nvme_map_cmb()
1837 if (!dev->cmbsz) in nvme_map_cmb()
1839 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); in nvme_map_cmb()
1842 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); in nvme_map_cmb()
1843 bar = NVME_CMB_BIR(dev->cmbloc); in nvme_map_cmb()
1853 if (NVME_CAP_CMBS(dev->ctrl.cap)) { in nvme_map_cmb()
1856 dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1864 if (size > bar_size - offset) in nvme_map_cmb()
1865 size = bar_size - offset; in nvme_map_cmb()
1868 dev_warn(dev->ctrl.device, in nvme_map_cmb()
1873 dev->cmb_size = size; in nvme_map_cmb()
1874 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); in nvme_map_cmb()
1876 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == in nvme_map_cmb()
1885 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; in nvme_set_host_mem()
1886 u64 dma_addr = dev->host_mem_descs_dma; in nvme_set_host_mem()
1896 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); in nvme_set_host_mem()
1898 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
1900 dev_warn(dev->ctrl.device, in nvme_set_host_mem()
1904 dev->hmb = bits & NVME_HOST_MEM_ENABLE; in nvme_set_host_mem()
1913 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem()
1914 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; in nvme_free_host_mem()
1915 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; in nvme_free_host_mem()
1917 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], in nvme_free_host_mem()
1918 le64_to_cpu(desc->addr), in nvme_free_host_mem()
1922 kfree(dev->host_mem_desc_bufs); in nvme_free_host_mem()
1923 dev->host_mem_desc_bufs = NULL; in nvme_free_host_mem()
1924 dma_free_coherent(dev->dev, in nvme_free_host_mem()
1925 dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), in nvme_free_host_mem()
1926 dev->host_mem_descs, dev->host_mem_descs_dma); in nvme_free_host_mem()
1927 dev->host_mem_descs = NULL; in nvme_free_host_mem()
1928 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
1941 tmp = (preferred + chunk_size - 1); in __nvme_alloc_host_mem()
1945 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) in __nvme_alloc_host_mem()
1946 max_entries = dev->ctrl.hmmaxd; in __nvme_alloc_host_mem()
1948 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), in __nvme_alloc_host_mem()
1960 len = min_t(u64, chunk_size, preferred - size); in __nvme_alloc_host_mem()
1961 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, in __nvme_alloc_host_mem()
1974 dev->nr_host_mem_descs = i; in __nvme_alloc_host_mem()
1975 dev->host_mem_size = size; in __nvme_alloc_host_mem()
1976 dev->host_mem_descs = descs; in __nvme_alloc_host_mem()
1977 dev->host_mem_descs_dma = descs_dma; in __nvme_alloc_host_mem()
1978 dev->host_mem_desc_bufs = bufs; in __nvme_alloc_host_mem()
1982 while (--i >= 0) { in __nvme_alloc_host_mem()
1985 dma_free_attrs(dev->dev, size, bufs[i], in __nvme_alloc_host_mem()
1992 dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, in __nvme_alloc_host_mem()
1995 dev->host_mem_descs = NULL; in __nvme_alloc_host_mem()
1996 return -ENOMEM; in __nvme_alloc_host_mem()
2002 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); in nvme_alloc_host_mem()
2008 if (!min || dev->host_mem_size >= min) in nvme_alloc_host_mem()
2014 return -ENOMEM; in nvme_alloc_host_mem()
2020 u64 preferred = (u64)dev->ctrl.hmpre * 4096; in nvme_setup_host_mem()
2021 u64 min = (u64)dev->ctrl.hmmin * 4096; in nvme_setup_host_mem()
2025 if (!dev->ctrl.hmpre) in nvme_setup_host_mem()
2030 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2040 if (dev->host_mem_descs) { in nvme_setup_host_mem()
2041 if (dev->host_mem_size >= min) in nvme_setup_host_mem()
2047 if (!dev->host_mem_descs) { in nvme_setup_host_mem()
2049 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2054 dev_info(dev->ctrl.device, in nvme_setup_host_mem()
2056 dev->host_mem_size >> ilog2(SZ_1M)); in nvme_setup_host_mem()
2071 ndev->cmbloc, ndev->cmbsz); in cmb_show()
2080 return sysfs_emit(buf, "%u\n", ndev->cmbloc); in cmbloc_show()
2089 return sysfs_emit(buf, "%u\n", ndev->cmbsz); in cmbsz_show()
2098 return sysfs_emit(buf, "%d\n", ndev->hmb); in hmb_show()
2109 return -EINVAL; in hmb_store()
2111 if (new == ndev->hmb) in hmb_store()
2139 if (!dev->cmbsz) in nvme_pci_attrs_are_visible()
2142 if (a == &dev_attr_hmb.attr && !ctrl->hmpre) in nvme_pci_attrs_are_visible()
2145 return a->mode; in nvme_pci_attrs_are_visible()
2169 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group); in nvme_update_attrs()
2178 struct nvme_dev *dev = affd->priv; in nvme_calc_irq_sets()
2179 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; in nvme_calc_irq_sets()
2200 nr_read_queues = nrirqs - nr_write_queues; in nvme_calc_irq_sets()
2203 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2204 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2205 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2206 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2207 affd->nr_sets = nr_read_queues ? 2 : 1; in nvme_calc_irq_sets()
2212 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_irqs()
2222 * left over for non-polled I/O. in nvme_setup_irqs()
2224 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); in nvme_setup_irqs()
2225 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; in nvme_setup_irqs()
2231 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; in nvme_setup_irqs()
2232 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2235 * We need interrupts for the admin queue and each non-polled I/O queue, in nvme_setup_irqs()
2240 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) in nvme_setup_irqs()
2241 irq_queues += (nr_io_queues - poll_queues); in nvme_setup_irqs()
2252 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_max_io_queues()
2254 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; in nvme_max_io_queues()
2259 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2260 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_io_queues()
2269 dev->nr_write_queues = write_queues; in nvme_setup_io_queues()
2270 dev->nr_poll_queues = poll_queues; in nvme_setup_io_queues()
2272 nr_io_queues = dev->nr_allocated_queues - 1; in nvme_setup_io_queues()
2273 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); in nvme_setup_io_queues()
2290 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2293 if (dev->cmb_use_sqes) { in nvme_setup_io_queues()
2297 dev->q_depth = result; in nvme_setup_io_queues()
2298 dev->ctrl.sqsize = result - 1; in nvme_setup_io_queues()
2300 dev->cmb_use_sqes = false; in nvme_setup_io_queues()
2309 if (!--nr_io_queues) { in nvme_setup_io_queues()
2310 result = -ENOMEM; in nvme_setup_io_queues()
2314 adminq->q_db = dev->dbs; in nvme_setup_io_queues()
2318 if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) in nvme_setup_io_queues()
2329 result = -EIO; in nvme_setup_io_queues()
2333 dev->num_vecs = result; in nvme_setup_io_queues()
2334 result = max(result - 1, 1); in nvme_setup_io_queues()
2335 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; in nvme_setup_io_queues()
2346 set_bit(NVMEQ_ENABLED, &adminq->flags); in nvme_setup_io_queues()
2347 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2350 if (result || dev->online_queues < 2) in nvme_setup_io_queues()
2353 if (dev->online_queues - 1 < dev->max_qid) { in nvme_setup_io_queues()
2354 nr_io_queues = dev->online_queues - 1; in nvme_setup_io_queues()
2362 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", in nvme_setup_io_queues()
2363 dev->io_queues[HCTX_TYPE_DEFAULT], in nvme_setup_io_queues()
2364 dev->io_queues[HCTX_TYPE_READ], in nvme_setup_io_queues()
2365 dev->io_queues[HCTX_TYPE_POLL]); in nvme_setup_io_queues()
2368 mutex_unlock(&dev->shutdown_lock); in nvme_setup_io_queues()
2375 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end()
2378 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2385 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end()
2388 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2395 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2400 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2408 req->end_io = nvme_del_cq_end; in nvme_delete_queue()
2410 req->end_io = nvme_del_queue_end; in nvme_delete_queue()
2411 req->end_io_data = nvmeq; in nvme_delete_queue()
2413 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2420 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues()
2426 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_delete_io_queues()
2428 nr_queues--; in __nvme_delete_io_queues()
2432 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues()
2434 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_delete_io_queues()
2439 sent--; in __nvme_delete_io_queues()
2454 if (dev->io_queues[HCTX_TYPE_POLL]) in nvme_pci_nr_maps()
2456 if (dev->io_queues[HCTX_TYPE_READ]) in nvme_pci_nr_maps()
2463 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); in nvme_pci_update_nr_queues()
2465 nvme_free_queues(dev, dev->online_queues); in nvme_pci_update_nr_queues()
2470 int result = -ENOMEM; in nvme_pci_enable()
2471 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_enable()
2478 if (readl(dev->bar + NVME_REG_CSTS) == -1) { in nvme_pci_enable()
2479 result = -ENODEV; in nvme_pci_enable()
2485 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll in nvme_pci_enable()
2492 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); in nvme_pci_enable()
2494 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2496 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); in nvme_pci_enable()
2497 dev->dbs = dev->bar + 4096; in nvme_pci_enable()
2500 * Some Apple controllers require a non-standard SQE size. in nvme_pci_enable()
2504 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) in nvme_pci_enable()
2505 dev->io_sqes = 7; in nvme_pci_enable()
2507 dev->io_sqes = NVME_NVM_IOSQES; in nvme_pci_enable()
2513 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { in nvme_pci_enable()
2514 dev->q_depth = 2; in nvme_pci_enable()
2515 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " in nvme_pci_enable()
2517 dev->q_depth); in nvme_pci_enable()
2518 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && in nvme_pci_enable()
2519 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2520 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2521 dev->q_depth = 64; in nvme_pci_enable()
2522 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " in nvme_pci_enable()
2523 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()
2530 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && in nvme_pci_enable()
2531 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { in nvme_pci_enable()
2532 dev->q_depth = NVME_AQ_DEPTH + 2; in nvme_pci_enable()
2533 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", in nvme_pci_enable()
2534 dev->q_depth); in nvme_pci_enable()
2536 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2556 if (dev->bar) in nvme_dev_unmap()
2557 iounmap(dev->bar); in nvme_dev_unmap()
2558 pci_release_mem_regions(to_pci_dev(dev->dev)); in nvme_dev_unmap()
2563 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_ctrl_is_dead()
2568 if (pdev->error_state != pci_channel_io_normal) in nvme_pci_ctrl_is_dead()
2571 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_pci_ctrl_is_dead()
2577 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl); in nvme_dev_disable()
2578 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_disable()
2581 mutex_lock(&dev->shutdown_lock); in nvme_dev_disable()
2585 nvme_start_freeze(&dev->ctrl); in nvme_dev_disable()
2591 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); in nvme_dev_disable()
2594 nvme_quiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2596 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
2598 nvme_disable_ctrl(&dev->ctrl, shutdown); in nvme_dev_disable()
2599 nvme_poll_irqdisable(&dev->queues[0]); in nvme_dev_disable()
2608 nvme_cancel_tagset(&dev->ctrl); in nvme_dev_disable()
2609 nvme_cancel_admin_tagset(&dev->ctrl); in nvme_dev_disable()
2614 * deadlocking blk-mq hot-cpu notifier. in nvme_dev_disable()
2617 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_dev_disable()
2618 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) in nvme_dev_disable()
2619 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_dev_disable()
2621 mutex_unlock(&dev->shutdown_lock); in nvme_dev_disable()
2626 if (!nvme_wait_reset(&dev->ctrl)) in nvme_disable_prepare_reset()
2627 return -EBUSY; in nvme_disable_prepare_reset()
2634 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, in nvme_setup_prp_pools()
2637 if (!dev->prp_page_pool) in nvme_setup_prp_pools()
2638 return -ENOMEM; in nvme_setup_prp_pools()
2641 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, in nvme_setup_prp_pools()
2643 if (!dev->prp_small_pool) { in nvme_setup_prp_pools()
2644 dma_pool_destroy(dev->prp_page_pool); in nvme_setup_prp_pools()
2645 return -ENOMEM; in nvme_setup_prp_pools()
2652 dma_pool_destroy(dev->prp_page_pool); in nvme_release_prp_pools()
2653 dma_pool_destroy(dev->prp_small_pool); in nvme_release_prp_pools()
2660 dev->iod_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
2663 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
2664 if (!dev->iod_mempool) in nvme_pci_alloc_iod_mempool()
2665 return -ENOMEM; in nvme_pci_alloc_iod_mempool()
2671 if (dev->tagset.tags) in nvme_free_tagset()
2672 nvme_remove_io_tag_set(&dev->ctrl); in nvme_free_tagset()
2673 dev->ctrl.tagset = NULL; in nvme_free_tagset()
2682 put_device(dev->dev); in nvme_pci_free_ctrl()
2683 kfree(dev->queues); in nvme_pci_free_ctrl()
2691 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); in nvme_reset_work()
2694 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) { in nvme_reset_work()
2695 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", in nvme_reset_work()
2696 dev->ctrl.state); in nvme_reset_work()
2697 result = -ENODEV; in nvme_reset_work()
2705 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) in nvme_reset_work()
2707 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2709 mutex_lock(&dev->shutdown_lock); in nvme_reset_work()
2713 nvme_unquiesce_admin_queue(&dev->ctrl); in nvme_reset_work()
2714 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2717 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the in nvme_reset_work()
2720 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_work()
2721 dev_warn(dev->ctrl.device, in nvme_reset_work()
2723 result = -EBUSY; in nvme_reset_work()
2727 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); in nvme_reset_work()
2746 if (dev->online_queues > 1) { in nvme_reset_work()
2747 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2748 nvme_wait_freeze(&dev->ctrl); in nvme_reset_work()
2751 nvme_unfreeze(&dev->ctrl); in nvme_reset_work()
2753 dev_warn(dev->ctrl.device, "IO queues lost\n"); in nvme_reset_work()
2754 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2755 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2756 nvme_remove_namespaces(&dev->ctrl); in nvme_reset_work()
2764 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_reset_work()
2765 dev_warn(dev->ctrl.device, in nvme_reset_work()
2767 result = -ENODEV; in nvme_reset_work()
2771 nvme_start_ctrl(&dev->ctrl); in nvme_reset_work()
2775 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2781 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", in nvme_reset_work()
2783 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_reset_work()
2785 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2786 nvme_mark_namespaces_dead(&dev->ctrl); in nvme_reset_work()
2787 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_reset_work()
2788 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_reset_work()
2793 *val = readl(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read32()
2799 writel(val, to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_write32()
2805 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read64()
2811 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_get_address()
2813 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); in nvme_pci_get_address()
2818 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_print_device_info()
2819 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_pci_print_device_info()
2821 dev_err(ctrl->device, in nvme_pci_print_device_info()
2823 pdev->vendor, pdev->device, in nvme_pci_print_device_info()
2824 nvme_strlen(subsys->model, sizeof(subsys->model)), in nvme_pci_print_device_info()
2825 subsys->model, nvme_strlen(subsys->firmware_rev, in nvme_pci_print_device_info()
2826 sizeof(subsys->firmware_rev)), in nvme_pci_print_device_info()
2827 subsys->firmware_rev); in nvme_pci_print_device_info()
2834 return dma_pci_p2pdma_supported(dev->dev); in nvme_pci_supports_pci_p2pdma()
2854 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_map()
2857 return -ENODEV; in nvme_dev_map()
2865 return -ENODEV; in nvme_dev_map()
2870 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
2876 * 950 PRO 256GB", but it seems to be restricted to two Dell in check_vendor_combination_bug()
2883 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
2886 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as in check_vendor_combination_bug()
2887 * within few minutes after bootup on a Coffee Lake board - in check_vendor_combination_bug()
2888 * ASUS PRIME Z370-A in check_vendor_combination_bug()
2891 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || in check_vendor_combination_bug()
2892 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) in check_vendor_combination_bug()
2894 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
2895 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
2896 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
2906 } else if (pdev->vendor == 0x2646 && (pdev->device == 0x2263 || in check_vendor_combination_bug()
2907 pdev->device == 0x500f)) { in check_vendor_combination_bug()
2926 unsigned long quirks = id->driver_data; in nvme_pci_alloc_dev()
2927 int node = dev_to_node(&pdev->dev); in nvme_pci_alloc_dev()
2929 int ret = -ENOMEM; in nvme_pci_alloc_dev()
2933 return ERR_PTR(-ENOMEM); in nvme_pci_alloc_dev()
2934 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); in nvme_pci_alloc_dev()
2935 mutex_init(&dev->shutdown_lock); in nvme_pci_alloc_dev()
2937 dev->nr_write_queues = write_queues; in nvme_pci_alloc_dev()
2938 dev->nr_poll_queues = poll_queues; in nvme_pci_alloc_dev()
2939 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; in nvme_pci_alloc_dev()
2940 dev->queues = kcalloc_node(dev->nr_allocated_queues, in nvme_pci_alloc_dev()
2942 if (!dev->queues) in nvme_pci_alloc_dev()
2945 dev->dev = get_device(&pdev->dev); in nvme_pci_alloc_dev()
2950 acpi_storage_d3(&pdev->dev)) { in nvme_pci_alloc_dev()
2955 dev_info(&pdev->dev, in nvme_pci_alloc_dev()
2959 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, in nvme_pci_alloc_dev()
2964 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) in nvme_pci_alloc_dev()
2965 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); in nvme_pci_alloc_dev()
2967 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nvme_pci_alloc_dev()
2968 dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_alloc_dev()
2969 dma_set_max_seg_size(&pdev->dev, 0xffffffff); in nvme_pci_alloc_dev()
2972 * Limit the max command size to prevent iod->sg allocations going in nvme_pci_alloc_dev()
2975 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_pci_alloc_dev()
2976 NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); in nvme_pci_alloc_dev()
2977 dev->ctrl.max_segments = NVME_MAX_SEGS; in nvme_pci_alloc_dev()
2983 dev->ctrl.max_integrity_segments = 1; in nvme_pci_alloc_dev()
2987 put_device(dev->dev); in nvme_pci_alloc_dev()
2988 kfree(dev->queues); in nvme_pci_alloc_dev()
2997 int result = -ENOMEM; in nvme_probe()
3015 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); in nvme_probe()
3021 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, in nvme_probe()
3030 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_probe()
3031 dev_warn(dev->ctrl.device, in nvme_probe()
3033 result = -EBUSY; in nvme_probe()
3037 result = nvme_init_ctrl_finish(&dev->ctrl, false); in nvme_probe()
3051 if (dev->online_queues > 1) { in nvme_probe()
3052 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, in nvme_probe()
3057 if (!dev->ctrl.tagset) in nvme_probe()
3058 dev_warn(dev->ctrl.device, "IO queues not created\n"); in nvme_probe()
3060 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_probe()
3061 dev_warn(dev->ctrl.device, in nvme_probe()
3063 result = -ENODEV; in nvme_probe()
3069 nvme_start_ctrl(&dev->ctrl); in nvme_probe()
3070 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3071 flush_work(&dev->ctrl.scan_work); in nvme_probe()
3075 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_probe()
3082 mempool_destroy(dev->iod_mempool); in nvme_probe()
3088 nvme_uninit_ctrl(&dev->ctrl); in nvme_probe()
3089 nvme_put_ctrl(&dev->ctrl); in nvme_probe()
3100 * with ->remove(). in nvme_reset_prepare()
3103 nvme_sync_queues(&dev->ctrl); in nvme_reset_prepare()
3110 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_reset_done()
3111 flush_work(&dev->ctrl.reset_work); in nvme_reset_done()
3130 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove()
3134 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_remove()
3138 flush_work(&dev->ctrl.reset_work); in nvme_remove()
3139 nvme_stop_ctrl(&dev->ctrl); in nvme_remove()
3140 nvme_remove_namespaces(&dev->ctrl); in nvme_remove()
3146 mempool_destroy(dev->iod_mempool); in nvme_remove()
3149 nvme_uninit_ctrl(&dev->ctrl); in nvme_remove()
3166 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_resume()
3168 if (ndev->last_ps == U32_MAX || in nvme_resume()
3169 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3171 if (ctrl->hmpre && nvme_setup_host_mem(ndev)) in nvme_resume()
3183 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_suspend()
3184 int ret = -EBUSY; in nvme_suspend()
3186 ndev->last_ps = U32_MAX; in nvme_suspend()
3193 * device does not support any non-default power states, shut down the in nvme_suspend()
3198 * down, so as to allow the platform to achieve its minimum low-power in nvme_suspend()
3201 if (pm_suspend_via_firmware() || !ctrl->npss || in nvme_suspend()
3203 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) in nvme_suspend()
3216 * non-operational power state. in nvme_suspend()
3218 if (ndev->hmb) { in nvme_suspend()
3224 ret = nvme_get_power_state(ctrl, &ndev->last_ps); in nvme_suspend()
3235 ret = nvme_set_power_state(ctrl, ctrl->npss); in nvme_suspend()
3248 ctrl->npss = 0; in nvme_suspend()
3267 return nvme_try_sched_reset(&ndev->ctrl); in nvme_simple_resume()
3294 dev_warn(dev->ctrl.device, in nvme_error_detected()
3296 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) { in nvme_error_detected()
3303 dev_warn(dev->ctrl.device, in nvme_error_detected()
3314 dev_info(dev->ctrl.device, "restart after slot reset\n"); in nvme_slot_reset()
3316 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_slot_reset()
3317 nvme_unquiesce_io_queues(&dev->ctrl); in nvme_slot_reset()
3325 flush_work(&dev->ctrl.reset_work); in nvme_error_resume()
3472 { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
3474 { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */