• Home
  • Raw
  • Download

Lines Matching refs:nvmeq

113 static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)  in nvme_cmd_info()  argument
115 return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)]; in nvme_cmd_info()
133 static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx, in alloc_cmdid() argument
136 int depth = nvmeq->q_depth - 1; in alloc_cmdid()
137 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); in alloc_cmdid()
141 cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth); in alloc_cmdid()
144 } while (test_and_set_bit(cmdid, nvmeq->cmdid_data)); in alloc_cmdid()
152 static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, in alloc_cmdid_killable() argument
156 wait_event_killable(nvmeq->sq_full, in alloc_cmdid_killable()
157 (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0); in alloc_cmdid_killable()
194 static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, in free_cmdid() argument
198 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); in free_cmdid()
200 if (cmdid >= nvmeq->q_depth) { in free_cmdid()
209 clear_bit(cmdid, nvmeq->cmdid_data); in free_cmdid()
210 wake_up(&nvmeq->sq_full); in free_cmdid()
214 static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, in cancel_cmdid() argument
218 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); in cancel_cmdid()
232 void put_nvmeq(struct nvme_queue *nvmeq) in put_nvmeq() argument
244 static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in nvme_submit_cmd() argument
248 spin_lock_irqsave(&nvmeq->q_lock, flags); in nvme_submit_cmd()
249 tail = nvmeq->sq_tail; in nvme_submit_cmd()
250 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); in nvme_submit_cmd()
251 if (++tail == nvmeq->q_depth) in nvme_submit_cmd()
253 writel(tail, nvmeq->q_db); in nvme_submit_cmd()
254 nvmeq->sq_tail = tail; in nvme_submit_cmd()
255 spin_unlock_irqrestore(&nvmeq->q_lock, flags); in nvme_submit_cmd()
498 static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, in nvme_split_and_submit() argument
505 if (bio_list_empty(&nvmeq->sq_cong)) in nvme_split_and_submit()
506 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); in nvme_split_and_submit()
507 bio_list_add(&nvmeq->sq_cong, &bp->b1); in nvme_split_and_submit()
508 bio_list_add(&nvmeq->sq_cong, &bp->b2); in nvme_split_and_submit()
517 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, in nvme_map_bio() argument
524 if (nvmeq->dev->stripe_size) in nvme_map_bio()
525 split_len = nvmeq->dev->stripe_size - in nvme_map_bio()
526 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); in nvme_map_bio()
534 return nvme_split_and_submit(bio, nvmeq, i, in nvme_map_bio()
544 return nvme_split_and_submit(bio, nvmeq, i, split_len, in nvme_map_bio()
551 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) in nvme_map_bio()
563 static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_discard() argument
567 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; in nvme_submit_discard()
569 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, in nvme_submit_discard()
589 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_discard()
590 nvmeq->sq_tail = 0; in nvme_submit_discard()
591 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_submit_discard()
596 static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_flush() argument
599 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; in nvme_submit_flush()
606 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_flush()
607 nvmeq->sq_tail = 0; in nvme_submit_flush()
608 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_submit_flush()
613 int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) in nvme_submit_flush_data() argument
615 int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, in nvme_submit_flush_data()
620 return nvme_submit_flush(nvmeq, ns, cmdid); in nvme_submit_flush_data()
626 static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_bio_queue() argument
638 result = nvme_submit_flush_data(nvmeq, ns); in nvme_submit_bio_queue()
650 cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); in nvme_submit_bio_queue()
655 result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); in nvme_submit_bio_queue()
661 return nvme_submit_flush(nvmeq, ns, cmdid); in nvme_submit_bio_queue()
673 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; in nvme_submit_bio_queue()
684 result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); in nvme_submit_bio_queue()
691 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, in nvme_submit_bio_queue()
698 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_bio_queue()
699 nvmeq->sq_tail = 0; in nvme_submit_bio_queue()
700 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_submit_bio_queue()
705 free_cmdid(nvmeq, cmdid, NULL); in nvme_submit_bio_queue()
707 nvme_free_iod(nvmeq->dev, iod); in nvme_submit_bio_queue()
715 struct nvme_queue *nvmeq = get_nvmeq(ns->dev); in nvme_make_request() local
718 spin_lock_irq(&nvmeq->q_lock); in nvme_make_request()
719 if (bio_list_empty(&nvmeq->sq_cong)) in nvme_make_request()
720 result = nvme_submit_bio_queue(nvmeq, ns, bio); in nvme_make_request()
722 if (bio_list_empty(&nvmeq->sq_cong)) in nvme_make_request()
723 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); in nvme_make_request()
724 bio_list_add(&nvmeq->sq_cong, bio); in nvme_make_request()
727 spin_unlock_irq(&nvmeq->q_lock); in nvme_make_request()
728 put_nvmeq(nvmeq); in nvme_make_request()
731 static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq) in nvme_process_cq() argument
735 head = nvmeq->cq_head; in nvme_process_cq()
736 phase = nvmeq->cq_phase; in nvme_process_cq()
741 struct nvme_completion cqe = nvmeq->cqes[head]; in nvme_process_cq()
744 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); in nvme_process_cq()
745 if (++head == nvmeq->q_depth) { in nvme_process_cq()
750 ctx = free_cmdid(nvmeq, cqe.command_id, &fn); in nvme_process_cq()
751 fn(nvmeq->dev, ctx, &cqe); in nvme_process_cq()
760 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) in nvme_process_cq()
763 writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); in nvme_process_cq()
764 nvmeq->cq_head = head; in nvme_process_cq()
765 nvmeq->cq_phase = phase; in nvme_process_cq()
773 struct nvme_queue *nvmeq = data; in nvme_irq() local
774 spin_lock(&nvmeq->q_lock); in nvme_irq()
775 result = nvme_process_cq(nvmeq); in nvme_irq()
776 spin_unlock(&nvmeq->q_lock); in nvme_irq()
782 struct nvme_queue *nvmeq = data; in nvme_irq_check() local
783 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; in nvme_irq_check()
784 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) in nvme_irq_check()
789 static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid) in nvme_abort_command() argument
791 spin_lock_irq(&nvmeq->q_lock); in nvme_abort_command()
792 cancel_cmdid(nvmeq, cmdid, NULL); in nvme_abort_command()
793 spin_unlock_irq(&nvmeq->q_lock); in nvme_abort_command()
815 int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, in nvme_submit_sync_cmd() argument
824 cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, in nvme_submit_sync_cmd()
831 nvme_submit_cmd(nvmeq, cmd); in nvme_submit_sync_cmd()
835 nvme_abort_command(nvmeq, cmdid); in nvme_submit_sync_cmd()
867 struct nvme_queue *nvmeq) in adapter_alloc_cq() argument
875 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
877 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
879 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); in adapter_alloc_cq()
888 struct nvme_queue *nvmeq) in adapter_alloc_sq() argument
896 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
898 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
965 static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) in nvme_cancel_ios() argument
967 int depth = nvmeq->q_depth - 1; in nvme_cancel_ios()
968 struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); in nvme_cancel_ios()
972 for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { in nvme_cancel_ios()
983 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); in nvme_cancel_ios()
984 ctx = cancel_cmdid(nvmeq, cmdid, &fn); in nvme_cancel_ios()
985 fn(nvmeq->dev, ctx, &cqe); in nvme_cancel_ios()
989 static void nvme_free_queue_mem(struct nvme_queue *nvmeq) in nvme_free_queue_mem() argument
991 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue_mem()
992 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue_mem()
993 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_free_queue_mem()
994 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue_mem()
995 kfree(nvmeq); in nvme_free_queue_mem()
1000 struct nvme_queue *nvmeq = dev->queues[qid]; in nvme_free_queue() local
1001 int vector = dev->entry[nvmeq->cq_vector].vector; in nvme_free_queue()
1003 spin_lock_irq(&nvmeq->q_lock); in nvme_free_queue()
1004 nvme_cancel_ios(nvmeq, false); in nvme_free_queue()
1005 while (bio_list_peek(&nvmeq->sq_cong)) { in nvme_free_queue()
1006 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); in nvme_free_queue()
1009 spin_unlock_irq(&nvmeq->q_lock); in nvme_free_queue()
1012 free_irq(vector, nvmeq); in nvme_free_queue()
1020 nvme_free_queue_mem(nvmeq); in nvme_free_queue()
1029 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); in nvme_alloc_queue() local
1030 if (!nvmeq) in nvme_alloc_queue()
1033 nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth), in nvme_alloc_queue()
1034 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1035 if (!nvmeq->cqes) in nvme_alloc_queue()
1037 memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth)); in nvme_alloc_queue()
1039 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), in nvme_alloc_queue()
1040 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1041 if (!nvmeq->sq_cmds) in nvme_alloc_queue()
1044 nvmeq->q_dmadev = dmadev; in nvme_alloc_queue()
1045 nvmeq->dev = dev; in nvme_alloc_queue()
1046 spin_lock_init(&nvmeq->q_lock); in nvme_alloc_queue()
1047 nvmeq->cq_head = 0; in nvme_alloc_queue()
1048 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1049 init_waitqueue_head(&nvmeq->sq_full); in nvme_alloc_queue()
1050 init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); in nvme_alloc_queue()
1051 bio_list_init(&nvmeq->sq_cong); in nvme_alloc_queue()
1052 nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; in nvme_alloc_queue()
1053 nvmeq->q_depth = depth; in nvme_alloc_queue()
1054 nvmeq->cq_vector = vector; in nvme_alloc_queue()
1056 return nvmeq; in nvme_alloc_queue()
1059 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, in nvme_alloc_queue()
1060 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1062 kfree(nvmeq); in nvme_alloc_queue()
1066 static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, in queue_request_irq() argument
1070 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, in queue_request_irq()
1073 name, nvmeq); in queue_request_irq()
1074 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, in queue_request_irq()
1075 IRQF_DISABLED | IRQF_SHARED, name, nvmeq); in queue_request_irq()
1082 struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector); in nvme_create_queue() local
1084 if (!nvmeq) in nvme_create_queue()
1087 result = adapter_alloc_cq(dev, qid, nvmeq); in nvme_create_queue()
1091 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1095 result = queue_request_irq(dev, nvmeq, "nvme"); in nvme_create_queue()
1099 return nvmeq; in nvme_create_queue()
1106 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_create_queue()
1107 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_create_queue()
1108 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_create_queue()
1109 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_create_queue()
1110 kfree(nvmeq); in nvme_create_queue()
1160 struct nvme_queue *nvmeq; in nvme_configure_admin_queue() local
1169 nvmeq = nvme_alloc_queue(dev, 0, 64, 0); in nvme_configure_admin_queue()
1170 if (!nvmeq) in nvme_configure_admin_queue()
1173 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
1182 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); in nvme_configure_admin_queue()
1183 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); in nvme_configure_admin_queue()
1190 result = queue_request_irq(dev, nvmeq, "nvme admin"); in nvme_configure_admin_queue()
1194 dev->queues[0] = nvmeq; in nvme_configure_admin_queue()
1198 nvme_free_queue_mem(nvmeq); in nvme_configure_admin_queue()
1274 struct nvme_queue *nvmeq; in nvme_submit_io() local
1349 nvmeq = get_nvmeq(dev); in nvme_submit_io()
1356 put_nvmeq(nvmeq); in nvme_submit_io()
1360 status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); in nvme_submit_io()
1476 static void nvme_resubmit_bios(struct nvme_queue *nvmeq) in nvme_resubmit_bios() argument
1478 while (bio_list_peek(&nvmeq->sq_cong)) { in nvme_resubmit_bios()
1479 struct bio *bio = bio_list_pop(&nvmeq->sq_cong); in nvme_resubmit_bios()
1482 if (bio_list_empty(&nvmeq->sq_cong)) in nvme_resubmit_bios()
1483 remove_wait_queue(&nvmeq->sq_full, in nvme_resubmit_bios()
1484 &nvmeq->sq_cong_wait); in nvme_resubmit_bios()
1485 if (nvme_submit_bio_queue(nvmeq, ns, bio)) { in nvme_resubmit_bios()
1486 if (bio_list_empty(&nvmeq->sq_cong)) in nvme_resubmit_bios()
1487 add_wait_queue(&nvmeq->sq_full, in nvme_resubmit_bios()
1488 &nvmeq->sq_cong_wait); in nvme_resubmit_bios()
1489 bio_list_add_head(&nvmeq->sq_cong, bio); in nvme_resubmit_bios()
1505 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_kthread() local
1506 if (!nvmeq) in nvme_kthread()
1508 spin_lock_irq(&nvmeq->q_lock); in nvme_kthread()
1509 if (nvme_process_cq(nvmeq)) in nvme_kthread()
1511 nvme_cancel_ios(nvmeq, true); in nvme_kthread()
1512 nvme_resubmit_bios(nvmeq); in nvme_kthread()
1513 spin_unlock_irq(&nvmeq->q_lock); in nvme_kthread()