• Home
  • Raw
  • Download

Lines Matching +full:dram +full:- +full:access +full:- +full:quirk

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
11 #include <linux/blk-mq.h>
12 #include <linux/blk-mq-pci.h>
23 #include <linux/t10-pi.h>
25 #include <linux/io-64-nonatomic-lo-hi.h>
26 #include <linux/io-64-nonatomic-hi-lo.h>
27 #include <linux/sed-opal.h>
28 #include <linux/pci-p2pdma.h>
33 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
34 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
80 return -EINVAL; in io_queue_count_set()
166 return -EINVAL; in io_queue_depth_set()
242 return dev->nr_allocated_queues * 8 * dev->db_stride; in nvme_dbbuf_size()
249 if (dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
252 dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
253 &dev->dbbuf_dbs_dma_addr, in nvme_dbbuf_dma_alloc()
255 if (!dev->dbbuf_dbs) in nvme_dbbuf_dma_alloc()
256 return -ENOMEM; in nvme_dbbuf_dma_alloc()
257 dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
258 &dev->dbbuf_eis_dma_addr, in nvme_dbbuf_dma_alloc()
260 if (!dev->dbbuf_eis) { in nvme_dbbuf_dma_alloc()
261 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_alloc()
262 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_alloc()
263 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_alloc()
264 return -ENOMEM; in nvme_dbbuf_dma_alloc()
274 if (dev->dbbuf_dbs) { in nvme_dbbuf_dma_free()
275 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
276 dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_dma_free()
277 dev->dbbuf_dbs = NULL; in nvme_dbbuf_dma_free()
279 if (dev->dbbuf_eis) { in nvme_dbbuf_dma_free()
280 dma_free_coherent(dev->dev, mem_size, in nvme_dbbuf_dma_free()
281 dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); in nvme_dbbuf_dma_free()
282 dev->dbbuf_eis = NULL; in nvme_dbbuf_dma_free()
289 if (!dev->dbbuf_dbs || !qid) in nvme_dbbuf_init()
292 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
293 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
294 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
295 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; in nvme_dbbuf_init()
300 if (!nvmeq->qid) in nvme_dbbuf_free()
303 nvmeq->dbbuf_sq_db = NULL; in nvme_dbbuf_free()
304 nvmeq->dbbuf_cq_db = NULL; in nvme_dbbuf_free()
305 nvmeq->dbbuf_sq_ei = NULL; in nvme_dbbuf_free()
306 nvmeq->dbbuf_cq_ei = NULL; in nvme_dbbuf_free()
314 if (!dev->dbbuf_dbs) in nvme_dbbuf_set()
319 c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); in nvme_dbbuf_set()
320 c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); in nvme_dbbuf_set()
322 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
323 dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); in nvme_dbbuf_set()
327 for (i = 1; i <= dev->online_queues; i++) in nvme_dbbuf_set()
328 nvme_dbbuf_free(&dev->queues[i]); in nvme_dbbuf_set()
334 return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); in nvme_dbbuf_need_event()
378 return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); in nvme_pci_npages_prp()
395 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx()
398 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
400 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
408 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx()
410 WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); in nvme_init_hctx()
411 hctx->driver_data = nvmeq; in nvme_init_hctx()
418 struct nvme_dev *dev = set->driver_data; in nvme_init_request()
420 int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; in nvme_init_request()
421 struct nvme_queue *nvmeq = &dev->queues[queue_idx]; in nvme_init_request()
424 iod->nvmeq = nvmeq; in nvme_init_request()
426 nvme_req(req)->ctrl = &dev->ctrl; in nvme_init_request()
433 if (dev->num_vecs > 1) in queue_irq_offset()
441 struct nvme_dev *dev = set->driver_data; in nvme_pci_map_queues()
445 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
446 struct blk_mq_queue_map *map = &set->map[i]; in nvme_pci_map_queues()
448 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues()
449 if (!map->nr_queues) { in nvme_pci_map_queues()
456 * affinity), so use the regular blk-mq cpu mapping in nvme_pci_map_queues()
458 map->queue_offset = qoff; in nvme_pci_map_queues()
460 blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); in nvme_pci_map_queues()
463 qoff += map->nr_queues; in nvme_pci_map_queues()
464 offset += map->nr_queues; in nvme_pci_map_queues()
476 u16 next_tail = nvmeq->sq_tail + 1; in nvme_write_sq_db()
478 if (next_tail == nvmeq->q_depth) in nvme_write_sq_db()
480 if (next_tail != nvmeq->last_sq_tail) in nvme_write_sq_db()
484 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, in nvme_write_sq_db()
485 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) in nvme_write_sq_db()
486 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_write_sq_db()
487 nvmeq->last_sq_tail = nvmeq->sq_tail; in nvme_write_sq_db()
491 * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
499 spin_lock(&nvmeq->sq_lock); in nvme_submit_cmd()
500 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), in nvme_submit_cmd()
502 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_cmd()
503 nvmeq->sq_tail = 0; in nvme_submit_cmd()
505 spin_unlock(&nvmeq->sq_lock); in nvme_submit_cmd()
510 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_commit_rqs()
512 spin_lock(&nvmeq->sq_lock); in nvme_commit_rqs()
513 if (nvmeq->sq_tail != nvmeq->last_sq_tail) in nvme_commit_rqs()
515 spin_unlock(&nvmeq->sq_lock); in nvme_commit_rqs()
521 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); in nvme_pci_iod_list()
532 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) in nvme_pci_use_sgls()
534 if (!iod->nvmeq->qid) in nvme_pci_use_sgls()
543 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; in nvme_free_prps()
545 dma_addr_t dma_addr = iod->first_dma; in nvme_free_prps()
548 for (i = 0; i < iod->npages; i++) { in nvme_free_prps()
552 dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); in nvme_free_prps()
560 const int last_sg = SGES_PER_PAGE - 1; in nvme_free_sgls()
562 dma_addr_t dma_addr = iod->first_dma; in nvme_free_sgls()
565 for (i = 0; i < iod->npages; i++) { in nvme_free_sgls()
569 dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); in nvme_free_sgls()
579 if (is_pci_p2pdma_page(sg_page(iod->sg))) in nvme_unmap_sg()
580 pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, in nvme_unmap_sg()
583 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); in nvme_unmap_sg()
590 if (iod->dma_len) { in nvme_unmap_data()
591 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, in nvme_unmap_data()
596 WARN_ON_ONCE(!iod->nents); in nvme_unmap_data()
599 if (iod->npages == 0) in nvme_unmap_data()
600 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], in nvme_unmap_data()
601 iod->first_dma); in nvme_unmap_data()
602 else if (iod->use_sgl) in nvme_unmap_data()
606 mempool_free(iod->sg, dev->iod_mempool); in nvme_unmap_data()
618 i, &phys, sg->offset, sg->length, &sg_dma_address(sg), in nvme_print_sgl()
629 struct scatterlist *sg = iod->sg; in nvme_pci_setup_prps()
632 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); in nvme_pci_setup_prps()
638 length -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
640 iod->first_dma = 0; in nvme_pci_setup_prps()
644 dma_len -= (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
646 dma_addr += (NVME_CTRL_PAGE_SIZE - offset); in nvme_pci_setup_prps()
654 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
660 pool = dev->prp_small_pool; in nvme_pci_setup_prps()
661 iod->npages = 0; in nvme_pci_setup_prps()
663 pool = dev->prp_page_pool; in nvme_pci_setup_prps()
664 iod->npages = 1; in nvme_pci_setup_prps()
669 iod->first_dma = dma_addr; in nvme_pci_setup_prps()
670 iod->npages = -1; in nvme_pci_setup_prps()
674 iod->first_dma = prp_dma; in nvme_pci_setup_prps()
682 list[iod->npages++] = prp_list; in nvme_pci_setup_prps()
683 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_prps()
684 old_prp_list[i - 1] = cpu_to_le64(prp_dma); in nvme_pci_setup_prps()
688 dma_len -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
690 length -= NVME_CTRL_PAGE_SIZE; in nvme_pci_setup_prps()
702 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_pci_setup_prps()
703 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); in nvme_pci_setup_prps()
709 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), in nvme_pci_setup_prps()
711 blk_rq_payload_bytes(req), iod->nents); in nvme_pci_setup_prps()
718 sge->addr = cpu_to_le64(sg_dma_address(sg)); in nvme_pci_sgl_set_data()
719 sge->length = cpu_to_le32(sg_dma_len(sg)); in nvme_pci_sgl_set_data()
720 sge->type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_pci_sgl_set_data()
726 sge->addr = cpu_to_le64(dma_addr); in nvme_pci_sgl_set_seg()
728 sge->length = cpu_to_le32(entries * sizeof(*sge)); in nvme_pci_sgl_set_seg()
729 sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; in nvme_pci_sgl_set_seg()
731 sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE); in nvme_pci_sgl_set_seg()
732 sge->type = NVME_SGL_FMT_SEG_DESC << 4; in nvme_pci_sgl_set_seg()
742 struct scatterlist *sg = iod->sg; in nvme_pci_setup_sgls()
747 cmd->flags = NVME_CMD_SGL_METABUF; in nvme_pci_setup_sgls()
750 nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); in nvme_pci_setup_sgls()
755 pool = dev->prp_small_pool; in nvme_pci_setup_sgls()
756 iod->npages = 0; in nvme_pci_setup_sgls()
758 pool = dev->prp_page_pool; in nvme_pci_setup_sgls()
759 iod->npages = 1; in nvme_pci_setup_sgls()
764 iod->npages = -1; in nvme_pci_setup_sgls()
769 iod->first_dma = sgl_dma; in nvme_pci_setup_sgls()
771 nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); in nvme_pci_setup_sgls()
776 struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; in nvme_pci_setup_sgls()
783 nvme_pci_iod_list(req)[iod->npages++] = sg_list; in nvme_pci_setup_sgls()
790 } while (--entries > 0); in nvme_pci_setup_sgls()
803 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); in nvme_setup_prp_simple()
804 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; in nvme_setup_prp_simple()
806 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
807 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_prp_simple()
809 iod->dma_len = bv->bv_len; in nvme_setup_prp_simple()
811 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); in nvme_setup_prp_simple()
812 if (bv->bv_len > first_prp_len) in nvme_setup_prp_simple()
813 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); in nvme_setup_prp_simple()
815 cmnd->dptr.prp2 = 0; in nvme_setup_prp_simple()
825 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
826 if (dma_mapping_error(dev->dev, iod->first_dma)) in nvme_setup_sgl_simple()
828 iod->dma_len = bv->bv_len; in nvme_setup_sgl_simple()
830 cmnd->flags = NVME_CMD_SGL_METABUF; in nvme_setup_sgl_simple()
831 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); in nvme_setup_sgl_simple()
832 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); in nvme_setup_sgl_simple()
833 cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; in nvme_setup_sgl_simple()
850 &cmnd->rw, &bv); in nvme_map_data()
852 if (iod->nvmeq->qid && sgl_threshold && in nvme_map_data()
853 dev->ctrl.sgls & ((1 << 0) | (1 << 1))) in nvme_map_data()
855 &cmnd->rw, &bv); in nvme_map_data()
859 iod->dma_len = 0; in nvme_map_data()
860 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); in nvme_map_data()
861 if (!iod->sg) in nvme_map_data()
863 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); in nvme_map_data()
864 iod->nents = blk_rq_map_sg(req->q, req, iod->sg); in nvme_map_data()
865 if (!iod->nents) in nvme_map_data()
868 if (is_pci_p2pdma_page(sg_page(iod->sg))) in nvme_map_data()
869 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, in nvme_map_data()
870 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); in nvme_map_data()
872 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, in nvme_map_data()
877 iod->use_sgl = nvme_pci_use_sgls(dev, req); in nvme_map_data()
878 if (iod->use_sgl) in nvme_map_data()
879 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); in nvme_map_data()
881 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); in nvme_map_data()
889 mempool_free(iod->sg, dev->iod_mempool); in nvme_map_data()
898 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), in nvme_map_metadata()
900 if (dma_mapping_error(dev->dev, iod->meta_dma)) in nvme_map_metadata()
902 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); in nvme_map_metadata()
912 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_queue_rq()
913 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq()
914 struct nvme_dev *dev = nvmeq->dev; in nvme_queue_rq()
915 struct request *req = bd->rq; in nvme_queue_rq()
917 struct nvme_command *cmnd = &iod->cmd; in nvme_queue_rq()
920 iod->aborted = 0; in nvme_queue_rq()
921 iod->npages = -1; in nvme_queue_rq()
922 iod->nents = 0; in nvme_queue_rq()
928 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) in nvme_queue_rq()
948 nvme_submit_cmd(nvmeq, cmnd, bd->last); in nvme_queue_rq()
960 struct nvme_dev *dev = iod->nvmeq->dev; in nvme_pci_complete_rq()
963 dma_unmap_page(dev->dev, iod->meta_dma, in nvme_pci_complete_rq()
964 rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); in nvme_pci_complete_rq()
974 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending()
976 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; in nvme_cqe_pending()
981 u16 head = nvmeq->cq_head; in nvme_ring_cq_doorbell()
983 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, in nvme_ring_cq_doorbell()
984 nvmeq->dbbuf_cq_ei)) in nvme_ring_cq_doorbell()
985 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_ring_cq_doorbell()
990 if (!nvmeq->qid) in nvme_queue_tagset()
991 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
992 return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; in nvme_queue_tagset()
997 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe()
998 __u16 command_id = READ_ONCE(cqe->command_id); in nvme_handle_cqe()
1007 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { in nvme_handle_cqe()
1008 nvme_complete_async_event(&nvmeq->dev->ctrl, in nvme_handle_cqe()
1009 cqe->status, &cqe->result); in nvme_handle_cqe()
1015 dev_warn(nvmeq->dev->ctrl.device, in nvme_handle_cqe()
1017 command_id, le16_to_cpu(cqe->sq_id)); in nvme_handle_cqe()
1021 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); in nvme_handle_cqe()
1022 if (!nvme_try_complete_req(req, cqe->status, cqe->result)) in nvme_handle_cqe()
1028 u32 tmp = nvmeq->cq_head + 1; in nvme_update_cq_head()
1030 if (tmp == nvmeq->q_depth) { in nvme_update_cq_head()
1031 nvmeq->cq_head = 0; in nvme_update_cq_head()
1032 nvmeq->cq_phase ^= 1; in nvme_update_cq_head()
1034 nvmeq->cq_head = tmp; in nvme_update_cq_head()
1045 * load-load control dependency between phase and the rest of in nvme_process_cq()
1049 nvme_handle_cqe(nvmeq, nvmeq->cq_head); in nvme_process_cq()
1090 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in nvme_poll_irqdisable()
1092 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); in nvme_poll_irqdisable()
1094 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1096 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); in nvme_poll_irqdisable()
1101 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_poll()
1107 spin_lock(&nvmeq->cq_poll_lock); in nvme_poll()
1109 spin_unlock(&nvmeq->cq_poll_lock); in nvme_poll()
1117 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1134 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1143 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) in adapter_alloc_cq()
1152 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1154 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1158 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1164 struct nvme_ctrl *ctrl = &dev->ctrl; in adapter_alloc_sq()
1169 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't in adapter_alloc_sq()
1173 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) in adapter_alloc_sq()
1182 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1184 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1188 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1204 struct nvme_queue *nvmeq = iod->nvmeq; in abort_endio()
1206 dev_warn(nvmeq->dev->ctrl.device, in abort_endio()
1207 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1208 atomic_inc(&nvmeq->dev->ctrl.abort_limit); in abort_endio()
1217 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); in nvme_should_reset()
1220 switch (dev->ctrl.state) { in nvme_should_reset()
1243 result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, in nvme_warn_reset()
1246 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1250 dev_warn(dev->ctrl.device, in nvme_warn_reset()
1258 struct nvme_queue *nvmeq = iod->nvmeq; in nvme_timeout()
1259 struct nvme_dev *dev = nvmeq->dev; in nvme_timeout()
1262 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_timeout()
1268 if (pci_channel_offline(to_pci_dev(dev->dev))) in nvme_timeout()
1277 nvme_reset_ctrl(&dev->ctrl); in nvme_timeout()
1284 if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_timeout()
1285 nvme_poll(req->mq_hctx); in nvme_timeout()
1290 dev_warn(dev->ctrl.device, in nvme_timeout()
1292 req->tag, nvmeq->qid); in nvme_timeout()
1302 switch (dev->ctrl.state) { in nvme_timeout()
1304 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_timeout()
1307 dev_warn_ratelimited(dev->ctrl.device, in nvme_timeout()
1309 req->tag, nvmeq->qid); in nvme_timeout()
1310 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1324 if (!nvmeq->qid || iod->aborted) { in nvme_timeout()
1325 dev_warn(dev->ctrl.device, in nvme_timeout()
1327 req->tag, nvmeq->qid); in nvme_timeout()
1328 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_timeout()
1330 nvme_reset_ctrl(&dev->ctrl); in nvme_timeout()
1335 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1336 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1339 iod->aborted = 1; in nvme_timeout()
1344 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_timeout()
1346 dev_warn(nvmeq->dev->ctrl.device, in nvme_timeout()
1348 req->tag, nvmeq->qid); in nvme_timeout()
1350 abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, in nvme_timeout()
1353 atomic_inc(&dev->ctrl.abort_limit); in nvme_timeout()
1357 abort_req->end_io_data = NULL; in nvme_timeout()
1358 blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); in nvme_timeout()
1370 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), in nvme_free_queue()
1371 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1372 if (!nvmeq->sq_cmds) in nvme_free_queue()
1375 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { in nvme_free_queue()
1376 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), in nvme_free_queue()
1377 nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_free_queue()
1379 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), in nvme_free_queue()
1380 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1388 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { in nvme_free_queues()
1389 dev->ctrl.queue_count--; in nvme_free_queues()
1390 nvme_free_queue(&dev->queues[i]); in nvme_free_queues()
1395 * nvme_suspend_queue - put queue into suspended state
1400 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) in nvme_suspend_queue()
1406 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1407 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) in nvme_suspend_queue()
1408 nvme_stop_admin_queue(&nvmeq->dev->ctrl); in nvme_suspend_queue()
1409 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) in nvme_suspend_queue()
1410 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); in nvme_suspend_queue()
1418 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1419 nvme_suspend_queue(&dev->queues[i]); in nvme_suspend_io_queues()
1424 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_disable_admin_queue()
1427 nvme_shutdown_ctrl(&dev->ctrl); in nvme_disable_admin_queue()
1429 nvme_disable_ctrl(&dev->ctrl); in nvme_disable_admin_queue()
1444 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1445 spin_lock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1446 nvme_process_cq(&dev->queues[i]); in nvme_reap_pending_cqes()
1447 spin_unlock(&dev->queues[i].cq_poll_lock); in nvme_reap_pending_cqes()
1454 int q_depth = dev->q_depth; in nvme_cmb_qdepth()
1458 if (q_size_aligned * nr_io_queues > dev->cmb_size) { in nvme_cmb_qdepth()
1459 u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); in nvme_cmb_qdepth()
1470 return -ENOMEM; in nvme_cmb_qdepth()
1479 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_alloc_sq_cmds()
1481 if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { in nvme_alloc_sq_cmds()
1482 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1483 if (nvmeq->sq_cmds) { in nvme_alloc_sq_cmds()
1484 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, in nvme_alloc_sq_cmds()
1485 nvmeq->sq_cmds); in nvme_alloc_sq_cmds()
1486 if (nvmeq->sq_dma_addr) { in nvme_alloc_sq_cmds()
1487 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); in nvme_alloc_sq_cmds()
1491 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); in nvme_alloc_sq_cmds()
1495 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), in nvme_alloc_sq_cmds()
1496 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_sq_cmds()
1497 if (!nvmeq->sq_cmds) in nvme_alloc_sq_cmds()
1498 return -ENOMEM; in nvme_alloc_sq_cmds()
1504 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue()
1506 if (dev->ctrl.queue_count > qid) in nvme_alloc_queue()
1509 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; in nvme_alloc_queue()
1510 nvmeq->q_depth = depth; in nvme_alloc_queue()
1511 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue()
1512 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1513 if (!nvmeq->cqes) in nvme_alloc_queue()
1519 nvmeq->dev = dev; in nvme_alloc_queue()
1520 spin_lock_init(&nvmeq->sq_lock); in nvme_alloc_queue()
1521 spin_lock_init(&nvmeq->cq_poll_lock); in nvme_alloc_queue()
1522 nvmeq->cq_head = 0; in nvme_alloc_queue()
1523 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1524 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1525 nvmeq->qid = qid; in nvme_alloc_queue()
1526 dev->ctrl.queue_count++; in nvme_alloc_queue()
1531 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue()
1532 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1534 return -ENOMEM; in nvme_alloc_queue()
1539 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); in queue_request_irq()
1540 int nr = nvmeq->dev->ctrl.instance; in queue_request_irq()
1543 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, in queue_request_irq()
1544 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1546 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, in queue_request_irq()
1547 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); in queue_request_irq()
1553 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1555 nvmeq->sq_tail = 0; in nvme_init_queue()
1556 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1557 nvmeq->cq_head = 0; in nvme_init_queue()
1558 nvmeq->cq_phase = 1; in nvme_init_queue()
1559 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1560 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1562 dev->online_queues++; in nvme_init_queue()
1568 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1572 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_create_queue()
1579 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1581 set_bit(NVMEQ_POLLED, &nvmeq->flags); in nvme_create_queue()
1593 nvmeq->cq_vector = vector; in nvme_create_queue()
1602 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_create_queue()
1606 dev->online_queues--; in nvme_create_queue()
1634 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { in nvme_dev_remove_admin()
1640 nvme_start_admin_queue(&dev->ctrl); in nvme_dev_remove_admin()
1641 blk_cleanup_queue(dev->ctrl.admin_q); in nvme_dev_remove_admin()
1642 blk_mq_free_tag_set(&dev->admin_tagset); in nvme_dev_remove_admin()
1648 if (!dev->ctrl.admin_q) { in nvme_alloc_admin_tags()
1649 dev->admin_tagset.ops = &nvme_mq_admin_ops; in nvme_alloc_admin_tags()
1650 dev->admin_tagset.nr_hw_queues = 1; in nvme_alloc_admin_tags()
1652 dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_alloc_admin_tags()
1653 dev->admin_tagset.timeout = ADMIN_TIMEOUT; in nvme_alloc_admin_tags()
1654 dev->admin_tagset.numa_node = dev->ctrl.numa_node; in nvme_alloc_admin_tags()
1655 dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); in nvme_alloc_admin_tags()
1656 dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; in nvme_alloc_admin_tags()
1657 dev->admin_tagset.driver_data = dev; in nvme_alloc_admin_tags()
1659 if (blk_mq_alloc_tag_set(&dev->admin_tagset)) in nvme_alloc_admin_tags()
1660 return -ENOMEM; in nvme_alloc_admin_tags()
1661 dev->ctrl.admin_tagset = &dev->admin_tagset; in nvme_alloc_admin_tags()
1663 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); in nvme_alloc_admin_tags()
1664 if (IS_ERR(dev->ctrl.admin_q)) { in nvme_alloc_admin_tags()
1665 blk_mq_free_tag_set(&dev->admin_tagset); in nvme_alloc_admin_tags()
1666 dev->ctrl.admin_q = NULL; in nvme_alloc_admin_tags()
1667 return -ENOMEM; in nvme_alloc_admin_tags()
1669 if (!blk_get_queue(dev->ctrl.admin_q)) { in nvme_alloc_admin_tags()
1671 dev->ctrl.admin_q = NULL; in nvme_alloc_admin_tags()
1672 return -ENODEV; in nvme_alloc_admin_tags()
1675 nvme_start_admin_queue(&dev->ctrl); in nvme_alloc_admin_tags()
1682 return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); in db_bar_size()
1687 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remap_bar()
1689 if (size <= dev->bar_mapped_size) in nvme_remap_bar()
1692 return -ENOMEM; in nvme_remap_bar()
1693 if (dev->bar) in nvme_remap_bar()
1694 iounmap(dev->bar); in nvme_remap_bar()
1695 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
1696 if (!dev->bar) { in nvme_remap_bar()
1697 dev->bar_mapped_size = 0; in nvme_remap_bar()
1698 return -ENOMEM; in nvme_remap_bar()
1700 dev->bar_mapped_size = size; in nvme_remap_bar()
1701 dev->dbs = dev->bar + NVME_REG_DBS; in nvme_remap_bar()
1716 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
1717 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
1719 if (dev->subsystem && in nvme_pci_configure_admin_queue()
1720 (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) in nvme_pci_configure_admin_queue()
1721 writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); in nvme_pci_configure_admin_queue()
1723 result = nvme_disable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
1731 dev->ctrl.numa_node = dev_to_node(dev->dev); in nvme_pci_configure_admin_queue()
1733 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1734 aqa = nvmeq->q_depth - 1; in nvme_pci_configure_admin_queue()
1737 writel(aqa, dev->bar + NVME_REG_AQA); in nvme_pci_configure_admin_queue()
1738 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); in nvme_pci_configure_admin_queue()
1739 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); in nvme_pci_configure_admin_queue()
1741 result = nvme_enable_ctrl(&dev->ctrl); in nvme_pci_configure_admin_queue()
1745 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1749 dev->online_queues--; in nvme_pci_configure_admin_queue()
1753 set_bit(NVMEQ_ENABLED, &nvmeq->flags); in nvme_pci_configure_admin_queue()
1762 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { in nvme_create_io_queues()
1763 if (nvme_alloc_queue(dev, i, dev->q_depth)) { in nvme_create_io_queues()
1764 ret = -ENOMEM; in nvme_create_io_queues()
1769 max = min(dev->max_qid, dev->ctrl.queue_count - 1); in nvme_create_io_queues()
1770 if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { in nvme_create_io_queues()
1771 rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + in nvme_create_io_queues()
1772 dev->io_queues[HCTX_TYPE_READ]; in nvme_create_io_queues()
1777 for (i = dev->online_queues; i <= max; i++) { in nvme_create_io_queues()
1780 ret = nvme_create_queue(&dev->queues[i], i, polled); in nvme_create_io_queues()
1801 ndev->cmbloc, ndev->cmbsz); in nvme_cmb_show()
1807 u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; in nvme_cmb_size_unit()
1814 return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; in nvme_cmb_size()
1821 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_map_cmb()
1824 if (dev->cmb_size) in nvme_map_cmb()
1827 if (NVME_CAP_CMBS(dev->ctrl.cap)) in nvme_map_cmb()
1828 writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1830 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); in nvme_map_cmb()
1831 if (!dev->cmbsz) in nvme_map_cmb()
1833 dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); in nvme_map_cmb()
1836 offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); in nvme_map_cmb()
1837 bar = NVME_CMB_BIR(dev->cmbloc); in nvme_map_cmb()
1847 if (NVME_CAP_CMBS(dev->ctrl.cap)) { in nvme_map_cmb()
1850 dev->bar + NVME_REG_CMBMSC); in nvme_map_cmb()
1858 if (size > bar_size - offset) in nvme_map_cmb()
1859 size = bar_size - offset; in nvme_map_cmb()
1862 dev_warn(dev->ctrl.device, in nvme_map_cmb()
1867 dev->cmb_size = size; in nvme_map_cmb()
1868 dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); in nvme_map_cmb()
1870 if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == in nvme_map_cmb()
1874 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, in nvme_map_cmb()
1876 dev_warn(dev->ctrl.device, in nvme_map_cmb()
1882 if (dev->cmb_size) { in nvme_release_cmb()
1883 sysfs_remove_file_from_group(&dev->ctrl.device->kobj, in nvme_release_cmb()
1885 dev->cmb_size = 0; in nvme_release_cmb()
1891 u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; in nvme_set_host_mem()
1892 u64 dma_addr = dev->host_mem_descs_dma; in nvme_set_host_mem()
1903 c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); in nvme_set_host_mem()
1905 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
1907 dev_warn(dev->ctrl.device, in nvme_set_host_mem()
1918 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem()
1919 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; in nvme_free_host_mem()
1920 size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; in nvme_free_host_mem()
1922 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], in nvme_free_host_mem()
1923 le64_to_cpu(desc->addr), in nvme_free_host_mem()
1927 kfree(dev->host_mem_desc_bufs); in nvme_free_host_mem()
1928 dev->host_mem_desc_bufs = NULL; in nvme_free_host_mem()
1929 dma_free_coherent(dev->dev, dev->host_mem_descs_size, in nvme_free_host_mem()
1930 dev->host_mem_descs, dev->host_mem_descs_dma); in nvme_free_host_mem()
1931 dev->host_mem_descs = NULL; in nvme_free_host_mem()
1932 dev->host_mem_descs_size = 0; in nvme_free_host_mem()
1933 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
1946 tmp = (preferred + chunk_size - 1); in __nvme_alloc_host_mem()
1950 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) in __nvme_alloc_host_mem()
1951 max_entries = dev->ctrl.hmmaxd; in __nvme_alloc_host_mem()
1954 descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, in __nvme_alloc_host_mem()
1966 len = min_t(u64, chunk_size, preferred - size); in __nvme_alloc_host_mem()
1967 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, in __nvme_alloc_host_mem()
1980 dev->nr_host_mem_descs = i; in __nvme_alloc_host_mem()
1981 dev->host_mem_size = size; in __nvme_alloc_host_mem()
1982 dev->host_mem_descs = descs; in __nvme_alloc_host_mem()
1983 dev->host_mem_descs_dma = descs_dma; in __nvme_alloc_host_mem()
1984 dev->host_mem_descs_size = descs_size; in __nvme_alloc_host_mem()
1985 dev->host_mem_desc_bufs = bufs; in __nvme_alloc_host_mem()
1989 while (--i >= 0) { in __nvme_alloc_host_mem()
1992 dma_free_attrs(dev->dev, size, bufs[i], in __nvme_alloc_host_mem()
1999 dma_free_coherent(dev->dev, descs_size, descs, descs_dma); in __nvme_alloc_host_mem()
2001 dev->host_mem_descs = NULL; in __nvme_alloc_host_mem()
2002 return -ENOMEM; in __nvme_alloc_host_mem()
2008 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); in nvme_alloc_host_mem()
2014 if (!min || dev->host_mem_size >= min) in nvme_alloc_host_mem()
2020 return -ENOMEM; in nvme_alloc_host_mem()
2026 u64 preferred = (u64)dev->ctrl.hmpre * 4096; in nvme_setup_host_mem()
2027 u64 min = (u64)dev->ctrl.hmmin * 4096; in nvme_setup_host_mem()
2033 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2043 if (dev->host_mem_descs) { in nvme_setup_host_mem()
2044 if (dev->host_mem_size >= min) in nvme_setup_host_mem()
2050 if (!dev->host_mem_descs) { in nvme_setup_host_mem()
2052 dev_warn(dev->ctrl.device, in nvme_setup_host_mem()
2057 dev_info(dev->ctrl.device, in nvme_setup_host_mem()
2059 dev->host_mem_size >> ilog2(SZ_1M)); in nvme_setup_host_mem()
2074 struct nvme_dev *dev = affd->priv; in nvme_calc_irq_sets()
2075 unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; in nvme_calc_irq_sets()
2096 nr_read_queues = nrirqs - nr_write_queues; in nvme_calc_irq_sets()
2099 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2100 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; in nvme_calc_irq_sets()
2101 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2102 affd->set_size[HCTX_TYPE_READ] = nr_read_queues; in nvme_calc_irq_sets()
2103 affd->nr_sets = nr_read_queues ? 2 : 1; in nvme_calc_irq_sets()
2108 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_irqs()
2118 * left over for non-polled I/O. in nvme_setup_irqs()
2120 poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); in nvme_setup_irqs()
2121 dev->io_queues[HCTX_TYPE_POLL] = poll_queues; in nvme_setup_irqs()
2127 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; in nvme_setup_irqs()
2128 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2131 * We need interrupts for the admin queue and each non-polled I/O queue, in nvme_setup_irqs()
2136 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) in nvme_setup_irqs()
2137 irq_queues += (nr_io_queues - poll_queues); in nvme_setup_irqs()
2150 return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; in nvme_max_io_queues()
2155 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2156 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_setup_io_queues()
2165 dev->nr_write_queues = write_queues; in nvme_setup_io_queues()
2166 dev->nr_poll_queues = poll_queues; in nvme_setup_io_queues()
2172 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_setup_io_queues()
2176 dev->nr_allocated_queues - 1); in nvme_setup_io_queues()
2178 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); in nvme_setup_io_queues()
2185 clear_bit(NVMEQ_ENABLED, &adminq->flags); in nvme_setup_io_queues()
2187 if (dev->cmb_use_sqes) { in nvme_setup_io_queues()
2191 dev->q_depth = result; in nvme_setup_io_queues()
2193 dev->cmb_use_sqes = false; in nvme_setup_io_queues()
2201 if (!--nr_io_queues) in nvme_setup_io_queues()
2202 return -ENOMEM; in nvme_setup_io_queues()
2204 adminq->q_db = dev->dbs; in nvme_setup_io_queues()
2218 return -EIO; in nvme_setup_io_queues()
2220 dev->num_vecs = result; in nvme_setup_io_queues()
2221 result = max(result - 1, 1); in nvme_setup_io_queues()
2222 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; in nvme_setup_io_queues()
2233 set_bit(NVMEQ_ENABLED, &adminq->flags); in nvme_setup_io_queues()
2236 if (result || dev->online_queues < 2) in nvme_setup_io_queues()
2239 if (dev->online_queues - 1 < dev->max_qid) { in nvme_setup_io_queues()
2240 nr_io_queues = dev->online_queues - 1; in nvme_setup_io_queues()
2245 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", in nvme_setup_io_queues()
2246 dev->io_queues[HCTX_TYPE_DEFAULT], in nvme_setup_io_queues()
2247 dev->io_queues[HCTX_TYPE_READ], in nvme_setup_io_queues()
2248 dev->io_queues[HCTX_TYPE_POLL]); in nvme_setup_io_queues()
2254 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_queue_end()
2257 complete(&nvmeq->delete_done); in nvme_del_queue_end()
2262 struct nvme_queue *nvmeq = req->end_io_data; in nvme_del_cq_end()
2265 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); in nvme_del_cq_end()
2272 struct request_queue *q = nvmeq->dev->ctrl.admin_q; in nvme_delete_queue()
2278 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); in nvme_delete_queue()
2284 req->end_io_data = nvmeq; in nvme_delete_queue()
2286 init_completion(&nvmeq->delete_done); in nvme_delete_queue()
2295 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_disable_io_queues()
2301 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_disable_io_queues()
2303 nr_queues--; in __nvme_disable_io_queues()
2307 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_disable_io_queues()
2309 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, in __nvme_disable_io_queues()
2314 sent--; in __nvme_disable_io_queues()
2325 if (!dev->ctrl.tagset) { in nvme_dev_add()
2326 dev->tagset.ops = &nvme_mq_ops; in nvme_dev_add()
2327 dev->tagset.nr_hw_queues = dev->online_queues - 1; in nvme_dev_add()
2328 dev->tagset.nr_maps = 2; /* default + read */ in nvme_dev_add()
2329 if (dev->io_queues[HCTX_TYPE_POLL]) in nvme_dev_add()
2330 dev->tagset.nr_maps++; in nvme_dev_add()
2331 dev->tagset.timeout = NVME_IO_TIMEOUT; in nvme_dev_add()
2332 dev->tagset.numa_node = dev->ctrl.numa_node; in nvme_dev_add()
2333 dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth, in nvme_dev_add()
2334 BLK_MQ_MAX_DEPTH) - 1; in nvme_dev_add()
2335 dev->tagset.cmd_size = sizeof(struct nvme_iod); in nvme_dev_add()
2336 dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_dev_add()
2337 dev->tagset.driver_data = dev; in nvme_dev_add()
2344 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) in nvme_dev_add()
2345 dev->tagset.reserved_tags = NVME_AQ_DEPTH; in nvme_dev_add()
2347 ret = blk_mq_alloc_tag_set(&dev->tagset); in nvme_dev_add()
2349 dev_warn(dev->ctrl.device, in nvme_dev_add()
2353 dev->ctrl.tagset = &dev->tagset; in nvme_dev_add()
2355 blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); in nvme_dev_add()
2358 nvme_free_queues(dev, dev->online_queues); in nvme_dev_add()
2366 int result = -ENOMEM; in nvme_pci_enable()
2367 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_enable()
2374 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64))) in nvme_pci_enable()
2377 if (readl(dev->bar + NVME_REG_CSTS) == -1) { in nvme_pci_enable()
2378 result = -ENODEV; in nvme_pci_enable()
2384 * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll in nvme_pci_enable()
2391 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); in nvme_pci_enable()
2393 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, in nvme_pci_enable()
2395 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2396 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); in nvme_pci_enable()
2397 dev->dbs = dev->bar + 4096; in nvme_pci_enable()
2400 * Some Apple controllers require a non-standard SQE size. in nvme_pci_enable()
2404 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) in nvme_pci_enable()
2405 dev->io_sqes = 7; in nvme_pci_enable()
2407 dev->io_sqes = NVME_NVM_IOSQES; in nvme_pci_enable()
2413 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { in nvme_pci_enable()
2414 dev->q_depth = 2; in nvme_pci_enable()
2415 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " in nvme_pci_enable()
2417 dev->q_depth); in nvme_pci_enable()
2418 } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && in nvme_pci_enable()
2419 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2420 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2421 dev->q_depth = 64; in nvme_pci_enable()
2422 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " in nvme_pci_enable()
2423 "set queue depth=%u\n", dev->q_depth); in nvme_pci_enable()
2427 * Controllers with the shared tags quirk need the IO queue to be in nvme_pci_enable()
2430 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && in nvme_pci_enable()
2431 (dev->q_depth < (NVME_AQ_DEPTH + 2))) { in nvme_pci_enable()
2432 dev->q_depth = NVME_AQ_DEPTH + 2; in nvme_pci_enable()
2433 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", in nvme_pci_enable()
2434 dev->q_depth); in nvme_pci_enable()
2451 if (dev->bar) in nvme_dev_unmap()
2452 iounmap(dev->bar); in nvme_dev_unmap()
2453 pci_release_mem_regions(to_pci_dev(dev->dev)); in nvme_dev_unmap()
2458 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_pci_disable()
2471 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_disable()
2473 mutex_lock(&dev->shutdown_lock); in nvme_dev_disable()
2475 u32 csts = readl(dev->bar + NVME_REG_CSTS); in nvme_dev_disable()
2477 if (dev->ctrl.state == NVME_CTRL_LIVE || in nvme_dev_disable()
2478 dev->ctrl.state == NVME_CTRL_RESETTING) { in nvme_dev_disable()
2480 nvme_start_freeze(&dev->ctrl); in nvme_dev_disable()
2483 pdev->error_state != pci_channel_io_normal); in nvme_dev_disable()
2491 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); in nvme_dev_disable()
2493 nvme_stop_queues(&dev->ctrl); in nvme_dev_disable()
2495 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
2500 nvme_suspend_queue(&dev->queues[0]); in nvme_dev_disable()
2504 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); in nvme_dev_disable()
2505 blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); in nvme_dev_disable()
2506 blk_mq_tagset_wait_completed_request(&dev->tagset); in nvme_dev_disable()
2507 blk_mq_tagset_wait_completed_request(&dev->admin_tagset); in nvme_dev_disable()
2512 * deadlocking blk-mq hot-cpu notifier. in nvme_dev_disable()
2515 nvme_start_queues(&dev->ctrl); in nvme_dev_disable()
2516 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) in nvme_dev_disable()
2517 nvme_start_admin_queue(&dev->ctrl); in nvme_dev_disable()
2519 mutex_unlock(&dev->shutdown_lock); in nvme_dev_disable()
2524 if (!nvme_wait_reset(&dev->ctrl)) in nvme_disable_prepare_reset()
2525 return -EBUSY; in nvme_disable_prepare_reset()
2532 dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, in nvme_setup_prp_pools()
2535 if (!dev->prp_page_pool) in nvme_setup_prp_pools()
2536 return -ENOMEM; in nvme_setup_prp_pools()
2539 dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, in nvme_setup_prp_pools()
2541 if (!dev->prp_small_pool) { in nvme_setup_prp_pools()
2542 dma_pool_destroy(dev->prp_page_pool); in nvme_setup_prp_pools()
2543 return -ENOMEM; in nvme_setup_prp_pools()
2550 dma_pool_destroy(dev->prp_page_pool); in nvme_release_prp_pools()
2551 dma_pool_destroy(dev->prp_small_pool); in nvme_release_prp_pools()
2561 dev->iod_mempool = mempool_create_node(1, in nvme_pci_alloc_iod_mempool()
2564 dev_to_node(dev->dev)); in nvme_pci_alloc_iod_mempool()
2565 if (!dev->iod_mempool) in nvme_pci_alloc_iod_mempool()
2566 return -ENOMEM; in nvme_pci_alloc_iod_mempool()
2572 if (dev->tagset.tags) in nvme_free_tagset()
2573 blk_mq_free_tag_set(&dev->tagset); in nvme_free_tagset()
2574 dev->ctrl.tagset = NULL; in nvme_free_tagset()
2584 if (dev->ctrl.admin_q) in nvme_pci_free_ctrl()
2585 blk_put_queue(dev->ctrl.admin_q); in nvme_pci_free_ctrl()
2586 free_opal_dev(dev->ctrl.opal_dev); in nvme_pci_free_ctrl()
2587 mempool_destroy(dev->iod_mempool); in nvme_pci_free_ctrl()
2588 put_device(dev->dev); in nvme_pci_free_ctrl()
2589 kfree(dev->queues); in nvme_pci_free_ctrl()
2599 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove_dead_ctrl()
2600 nvme_get_ctrl(&dev->ctrl); in nvme_remove_dead_ctrl()
2602 nvme_kill_queues(&dev->ctrl); in nvme_remove_dead_ctrl()
2603 if (!queue_work(nvme_wq, &dev->remove_work)) in nvme_remove_dead_ctrl()
2604 nvme_put_ctrl(&dev->ctrl); in nvme_remove_dead_ctrl()
2611 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); in nvme_reset_work()
2614 if (dev->ctrl.state != NVME_CTRL_RESETTING) { in nvme_reset_work()
2615 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", in nvme_reset_work()
2616 dev->ctrl.state); in nvme_reset_work()
2617 result = -ENODEV; in nvme_reset_work()
2625 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) in nvme_reset_work()
2627 nvme_sync_queues(&dev->ctrl); in nvme_reset_work()
2629 mutex_lock(&dev->shutdown_lock); in nvme_reset_work()
2642 dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); in nvme_reset_work()
2645 * Limit the max command size to prevent iod->sg allocations going in nvme_reset_work()
2648 dev->ctrl.max_hw_sectors = min_t(u32, in nvme_reset_work()
2649 NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); in nvme_reset_work()
2650 dev->ctrl.max_segments = NVME_MAX_SEGS; in nvme_reset_work()
2655 dma_set_max_seg_size(dev->dev, 0xffffffff); in nvme_reset_work()
2657 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2660 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the in nvme_reset_work()
2663 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_work()
2664 dev_warn(dev->ctrl.device, in nvme_reset_work()
2666 result = -EBUSY; in nvme_reset_work()
2674 dev->ctrl.max_integrity_segments = 1; in nvme_reset_work()
2676 result = nvme_init_identify(&dev->ctrl); in nvme_reset_work()
2680 if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { in nvme_reset_work()
2681 if (!dev->ctrl.opal_dev) in nvme_reset_work()
2682 dev->ctrl.opal_dev = in nvme_reset_work()
2683 init_opal_dev(&dev->ctrl, &nvme_sec_submit); in nvme_reset_work()
2685 opal_unlock_from_suspend(dev->ctrl.opal_dev); in nvme_reset_work()
2687 free_opal_dev(dev->ctrl.opal_dev); in nvme_reset_work()
2688 dev->ctrl.opal_dev = NULL; in nvme_reset_work()
2691 if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { in nvme_reset_work()
2694 dev_warn(dev->dev, in nvme_reset_work()
2698 if (dev->ctrl.hmpre) { in nvme_reset_work()
2712 if (dev->online_queues < 2) { in nvme_reset_work()
2713 dev_warn(dev->ctrl.device, "IO queues not created\n"); in nvme_reset_work()
2714 nvme_kill_queues(&dev->ctrl); in nvme_reset_work()
2715 nvme_remove_namespaces(&dev->ctrl); in nvme_reset_work()
2718 nvme_start_queues(&dev->ctrl); in nvme_reset_work()
2719 nvme_wait_freeze(&dev->ctrl); in nvme_reset_work()
2721 nvme_unfreeze(&dev->ctrl); in nvme_reset_work()
2728 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { in nvme_reset_work()
2729 dev_warn(dev->ctrl.device, in nvme_reset_work()
2731 result = -ENODEV; in nvme_reset_work()
2735 nvme_start_ctrl(&dev->ctrl); in nvme_reset_work()
2739 mutex_unlock(&dev->shutdown_lock); in nvme_reset_work()
2742 dev_warn(dev->ctrl.device, in nvme_reset_work()
2750 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_remove_dead_ctrl_work()
2753 device_release_driver(&pdev->dev); in nvme_remove_dead_ctrl_work()
2754 nvme_put_ctrl(&dev->ctrl); in nvme_remove_dead_ctrl_work()
2759 *val = readl(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read32()
2765 writel(val, to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_write32()
2771 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); in nvme_pci_reg_read64()
2777 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); in nvme_pci_get_address()
2779 return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); in nvme_pci_get_address()
2797 struct pci_dev *pdev = to_pci_dev(dev->dev); in nvme_dev_map()
2800 return -ENODEV; in nvme_dev_map()
2808 return -ENODEV; in nvme_dev_map()
2813 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
2826 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
2829 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as in check_vendor_combination_bug()
2830 * within few minutes after bootup on a Coffee Lake board - in check_vendor_combination_bug()
2831 * ASUS PRIME Z370-A in check_vendor_combination_bug()
2834 (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || in check_vendor_combination_bug()
2835 dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) in check_vendor_combination_bug()
2837 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
2838 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
2839 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
2858 flush_work(&dev->ctrl.reset_work); in nvme_async_probe()
2859 flush_work(&dev->ctrl.scan_work); in nvme_async_probe()
2860 nvme_put_ctrl(&dev->ctrl); in nvme_async_probe()
2866 unsigned long quirks = id->driver_data; in nvme_pci_alloc_dev()
2867 int node = dev_to_node(&pdev->dev); in nvme_pci_alloc_dev()
2869 int ret = -ENOMEM; in nvme_pci_alloc_dev()
2873 return ERR_PTR(-ENOMEM); in nvme_pci_alloc_dev()
2874 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); in nvme_pci_alloc_dev()
2875 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); in nvme_pci_alloc_dev()
2876 mutex_init(&dev->shutdown_lock); in nvme_pci_alloc_dev()
2878 dev->nr_write_queues = write_queues; in nvme_pci_alloc_dev()
2879 dev->nr_poll_queues = poll_queues; in nvme_pci_alloc_dev()
2880 dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; in nvme_pci_alloc_dev()
2881 dev->queues = kcalloc_node(dev->nr_allocated_queues, in nvme_pci_alloc_dev()
2883 if (!dev->queues) in nvme_pci_alloc_dev()
2886 dev->dev = get_device(&pdev->dev); in nvme_pci_alloc_dev()
2889 if (!noacpi && acpi_storage_d3(&pdev->dev)) { in nvme_pci_alloc_dev()
2894 dev_info(&pdev->dev, in nvme_pci_alloc_dev()
2895 "platform quirk: setting simple suspend\n"); in nvme_pci_alloc_dev()
2898 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, in nvme_pci_alloc_dev()
2905 put_device(dev->dev); in nvme_pci_alloc_dev()
2906 kfree(dev->queues); in nvme_pci_alloc_dev()
2915 int result = -ENOMEM; in nvme_probe()
2933 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); in nvme_probe()
2936 nvme_reset_ctrl(&dev->ctrl); in nvme_probe()
2945 nvme_uninit_ctrl(&dev->ctrl); in nvme_probe()
2956 * with ->remove(). in nvme_reset_prepare()
2959 nvme_sync_queues(&dev->ctrl); in nvme_reset_prepare()
2966 if (!nvme_try_sched_reset(&dev->ctrl)) in nvme_reset_done()
2967 flush_work(&dev->ctrl.reset_work); in nvme_reset_done()
2986 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); in nvme_remove()
2990 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); in nvme_remove()
2994 flush_work(&dev->ctrl.reset_work); in nvme_remove()
2995 nvme_stop_ctrl(&dev->ctrl); in nvme_remove()
2996 nvme_remove_namespaces(&dev->ctrl); in nvme_remove()
3004 nvme_uninit_ctrl(&dev->ctrl); in nvme_remove()
3021 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_resume()
3023 if (ndev->last_ps == U32_MAX || in nvme_resume()
3024 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3025 return nvme_try_sched_reset(&ndev->ctrl); in nvme_resume()
3033 struct nvme_ctrl *ctrl = &ndev->ctrl; in nvme_suspend()
3034 int ret = -EBUSY; in nvme_suspend()
3036 ndev->last_ps = U32_MAX; in nvme_suspend()
3043 * device does not support any non-default power states, shut down the in nvme_suspend()
3048 * down, so as to allow the platform to achieve its minimum low-power in nvme_suspend()
3052 * specification allows the device to access the host memory buffer in in nvme_suspend()
3053 * host DRAM from all power states, but hosts will fail access to DRAM in nvme_suspend()
3056 if (pm_suspend_via_firmware() || !ctrl->npss || in nvme_suspend()
3058 ndev->nr_host_mem_descs || in nvme_suspend()
3059 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) in nvme_suspend()
3066 if (ctrl->state != NVME_CTRL_LIVE) in nvme_suspend()
3069 ret = nvme_get_power_state(ctrl, &ndev->last_ps); in nvme_suspend()
3080 ret = nvme_set_power_state(ctrl, ctrl->npss); in nvme_suspend()
3093 ctrl->npss = 0; in nvme_suspend()
3112 return nvme_try_sched_reset(&ndev->ctrl); in nvme_simple_resume()
3139 dev_warn(dev->ctrl.device, in nvme_error_detected()
3144 dev_warn(dev->ctrl.device, in nvme_error_detected()
3155 dev_info(dev->ctrl.device, "restart after slot reset\n"); in nvme_slot_reset()
3157 nvme_reset_ctrl(&dev->ctrl); in nvme_slot_reset()
3165 flush_work(&dev->ctrl.reset_work); in nvme_error_resume()