Lines Matching +full:ctrl +full:- +full:len
1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
25 * A non-zero value being sufficient to indicate general consideration of any
35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36 * because dependencies are tracked for both nvme-tcp and user contexts. Using
37 * a separate class prevents lockdep from conflating nvme-tcp socket use with
38 * user-space socket API use.
45 struct sock *sk = sock->sk; in nvme_tcp_reclassify_socket()
50 switch (sk->sk_family) { in nvme_tcp_reclassify_socket()
52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
54 "sk_lock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
60 "sk_lock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
135 struct nvme_tcp_ctrl *ctrl; member
163 struct nvme_ctrl ctrl; member
178 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
180 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
185 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
193 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
194 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
199 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
204 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
209 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
214 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
226 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
227 req->data_len <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
232 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
237 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
242 return min_t(size_t, iov_iter_single_seg_count(&req->iter), in nvme_tcp_req_cur_length()
243 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
248 return req->iter.iov_offset; in nvme_tcp_req_offset()
254 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
258 int len) in nvme_tcp_pdu_last_send() argument
260 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
272 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
273 vec = &rq->special_vec; in nvme_tcp_init_iter()
278 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
280 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
282 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
283 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
286 iov_iter_bvec(&req->iter, dir, vec, nsegs, size); in nvme_tcp_init_iter()
287 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
291 int len) in nvme_tcp_advance_req() argument
293 req->data_sent += len; in nvme_tcp_advance_req()
294 req->pdu_sent += len; in nvme_tcp_advance_req()
295 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
296 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
297 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
298 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
315 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
316 !llist_empty(&queue->req_list); in nvme_tcp_queue_more()
322 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
325 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
326 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
333 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
334 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
336 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
340 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
348 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
350 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
359 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
363 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
369 list_del(&req->entry); in nvme_tcp_fetch_request()
381 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
386 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
387 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
392 void *pdu, size_t len) in nvme_tcp_hdgst() argument
396 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
397 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
408 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
409 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
412 return -EPROTO; in nvme_tcp_verify_hdgst()
415 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
416 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
417 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
419 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
422 return -EIO; in nvme_tcp_verify_hdgst()
432 u32 len; in nvme_tcp_check_ddgst() local
434 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
435 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
437 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
438 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
441 return -EPROTO; in nvme_tcp_check_ddgst()
443 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
453 page_frag_free(req->pdu); in nvme_tcp_exit_request()
460 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_init_request() local
462 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
463 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
466 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
469 if (!req->pdu) in nvme_tcp_init_request()
470 return -ENOMEM; in nvme_tcp_init_request()
472 req->queue = queue; in nvme_tcp_init_request()
473 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
481 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_hctx() local
482 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
484 hctx->driver_data = queue; in nvme_tcp_init_hctx()
491 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_admin_hctx() local
492 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
494 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
501 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
502 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
508 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
510 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
511 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
512 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
515 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
517 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
520 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
521 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
529 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
531 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
533 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
534 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
535 return -EINVAL; in nvme_tcp_process_nvme_cqe()
538 if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
540 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
550 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
552 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
554 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
555 return -ENOENT; in nvme_tcp_handle_c2h_data()
559 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
561 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
562 return -EIO; in nvme_tcp_handle_c2h_data()
565 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
567 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
568 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
569 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
571 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
572 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
573 return -EPROTO; in nvme_tcp_handle_c2h_data()
582 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
592 cqe->command_id))) in nvme_tcp_handle_comp()
593 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
594 &cqe->result); in nvme_tcp_handle_comp()
604 struct nvme_tcp_data_pdu *data = req->pdu; in nvme_tcp_setup_h2c_data_pdu()
605 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
610 req->pdu_len = le32_to_cpu(pdu->r2t_length); in nvme_tcp_setup_h2c_data_pdu()
611 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
613 if (unlikely(!req->pdu_len)) { in nvme_tcp_setup_h2c_data_pdu()
614 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
615 "req %d r2t len is %u, probably a bug...\n", in nvme_tcp_setup_h2c_data_pdu()
616 rq->tag, req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
617 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
620 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { in nvme_tcp_setup_h2c_data_pdu()
621 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
622 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_setup_h2c_data_pdu()
623 rq->tag, req->pdu_len, req->data_len, in nvme_tcp_setup_h2c_data_pdu()
624 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
625 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
628 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { in nvme_tcp_setup_h2c_data_pdu()
629 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
631 rq->tag, le32_to_cpu(pdu->r2t_offset), in nvme_tcp_setup_h2c_data_pdu()
632 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
633 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
637 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
638 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
639 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
640 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
641 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
642 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
643 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
644 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
645 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
646 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
647 data->ttag = pdu->ttag; in nvme_tcp_setup_h2c_data_pdu()
648 data->command_id = nvme_cid(rq); in nvme_tcp_setup_h2c_data_pdu()
649 data->data_offset = pdu->r2t_offset; in nvme_tcp_setup_h2c_data_pdu()
650 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
661 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
663 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
665 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
666 return -ENOENT; in nvme_tcp_handle_r2t()
674 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_handle_r2t()
675 req->offset = 0; in nvme_tcp_handle_r2t()
683 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
686 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
687 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
691 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
695 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
696 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
698 *len -= rcv_len; in nvme_tcp_recv_pdu()
699 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
702 hdr = queue->pdu; in nvme_tcp_recv_pdu()
703 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
704 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
710 if (queue->data_digest) { in nvme_tcp_recv_pdu()
711 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
716 switch (hdr->type) { in nvme_tcp_recv_pdu()
718 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
721 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
724 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
726 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
727 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
728 return -EINVAL; in nvme_tcp_recv_pdu()
741 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
743 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
745 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
751 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
755 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
756 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
762 if (!req->curr_bio) { in nvme_tcp_recv_data()
763 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
765 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
767 return -EIO; in nvme_tcp_recv_data()
774 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
776 if (queue->data_digest) in nvme_tcp_recv_data()
778 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
781 &req->iter, recv_len); in nvme_tcp_recv_data()
783 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
785 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
789 *len -= recv_len; in nvme_tcp_recv_data()
791 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
794 if (!queue->data_remaining) { in nvme_tcp_recv_data()
795 if (queue->data_digest) { in nvme_tcp_recv_data()
796 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
797 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
799 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
801 queue->nr_cqe++; in nvme_tcp_recv_data()
811 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
813 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
814 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
815 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
816 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
823 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
825 *len -= recv_len; in nvme_tcp_recv_ddgst()
826 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
829 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
830 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
832 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
833 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
834 return -EIO; in nvme_tcp_recv_ddgst()
837 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
839 pdu->command_id); in nvme_tcp_recv_ddgst()
842 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
850 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
852 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
853 size_t consumed = len; in nvme_tcp_recv_skb()
856 while (len) { in nvme_tcp_recv_skb()
859 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
862 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
865 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
868 result = -EFAULT; in nvme_tcp_recv_skb()
871 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
873 queue->rd_enabled = false; in nvme_tcp_recv_skb()
874 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
886 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
887 queue = sk->sk_user_data; in nvme_tcp_data_ready()
888 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
889 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
890 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
891 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
898 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
899 queue = sk->sk_user_data; in nvme_tcp_write_space()
901 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
902 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
904 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
911 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
912 queue = sk->sk_user_data; in nvme_tcp_state_change()
916 switch (sk->sk_state) { in nvme_tcp_state_change()
922 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
925 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
927 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
930 queue->state_change(sk); in nvme_tcp_state_change()
932 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
937 queue->request = NULL; in nvme_tcp_done_send_req()
945 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
955 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
956 int req_data_len = req->data_len; in nvme_tcp_try_send_data()
961 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
962 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
963 int req_data_sent = req->data_sent; in nvme_tcp_try_send_data()
966 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
972 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
975 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
981 if (queue->data_digest) in nvme_tcp_try_send_data()
982 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
994 if (last && ret == len) { in nvme_tcp_try_send_data()
995 if (queue->data_digest) { in nvme_tcp_try_send_data()
996 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
997 &req->ddgst); in nvme_tcp_try_send_data()
998 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
999 req->offset = 0; in nvme_tcp_try_send_data()
1006 return -EAGAIN; in nvme_tcp_try_send_data()
1011 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
1012 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_try_send_cmd_pdu()
1015 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
1024 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1025 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1027 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
1028 offset_in_page(pdu) + req->offset, len, flags); in nvme_tcp_try_send_cmd_pdu()
1032 len -= ret; in nvme_tcp_try_send_cmd_pdu()
1033 if (!len) { in nvme_tcp_try_send_cmd_pdu()
1035 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
1036 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1037 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1044 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
1046 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
1051 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
1052 struct nvme_tcp_data_pdu *pdu = req->pdu; in nvme_tcp_try_send_data_pdu()
1054 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
1057 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1058 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1060 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
1061 offset_in_page(pdu) + req->offset, len, in nvme_tcp_try_send_data_pdu()
1066 len -= ret; in nvme_tcp_try_send_data_pdu()
1067 if (!len) { in nvme_tcp_try_send_data_pdu()
1068 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1069 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1070 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1071 if (!req->data_sent) in nvme_tcp_try_send_data_pdu()
1075 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1077 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1082 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1083 size_t offset = req->offset; in nvme_tcp_try_send_ddgst()
1087 .iov_base = (u8 *)&req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1088 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1096 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1105 req->offset += ret; in nvme_tcp_try_send_ddgst()
1106 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1114 if (!queue->request) { in nvme_tcp_try_send()
1115 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1116 if (!queue->request) in nvme_tcp_try_send()
1119 req = queue->request; in nvme_tcp_try_send()
1121 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1129 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1135 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1141 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1144 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1147 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1149 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1157 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1158 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1165 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1166 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1181 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1183 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1196 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1201 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1206 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1208 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1209 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1221 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1222 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1224 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1226 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1227 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1229 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1233 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1236 return -ENOMEM; in nvme_tcp_alloc_crypto()
1239 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1241 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1243 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1246 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1248 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1249 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1252 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1255 if (!async->pdu) in nvme_tcp_alloc_async_req()
1256 return -ENOMEM; in nvme_tcp_alloc_async_req()
1258 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1264 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1265 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1267 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1270 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1273 sock_release(queue->sock); in nvme_tcp_free_queue()
1274 kfree(queue->pdu); in nvme_tcp_free_queue()
1275 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1289 return -ENOMEM; in nvme_tcp_init_connection()
1293 ret = -ENOMEM; in nvme_tcp_init_connection()
1297 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1298 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1299 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1300 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1301 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1302 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1303 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1304 if (queue->hdr_digest) in nvme_tcp_init_connection()
1305 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1306 if (queue->data_digest) in nvme_tcp_init_connection()
1307 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1311 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1318 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1323 ret = -EINVAL; in nvme_tcp_init_connection()
1324 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1326 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1330 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1332 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1336 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1338 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1342 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1343 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1344 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1345 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1347 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1352 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1353 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1354 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1355 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1357 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1362 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1364 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1383 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1387 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1392 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1397 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1398 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1403 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1409 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1410 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1411 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1416 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1421 n = qid - 1; in nvme_tcp_set_queue_io_cpu()
1423 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; in nvme_tcp_set_queue_io_cpu()
1425 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - in nvme_tcp_set_queue_io_cpu()
1426 ctrl->io_queues[HCTX_TYPE_READ] - 1; in nvme_tcp_set_queue_io_cpu()
1427 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1433 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1434 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1437 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1438 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1439 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1440 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1441 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1442 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1443 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1446 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1448 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1451 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1452 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1454 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1459 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1462 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1465 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1472 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1475 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1478 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1479 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1482 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1484 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1486 queue->request = NULL; in nvme_tcp_alloc_queue()
1487 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1488 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1489 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1490 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1491 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1493 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1494 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1495 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1497 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1504 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1505 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1506 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1509 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1517 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1518 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1519 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1523 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1526 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1527 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1529 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1538 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1543 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1545 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1547 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1550 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1551 queue->sock = NULL; in nvme_tcp_alloc_queue()
1553 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1559 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1561 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1562 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_ops()
1563 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1564 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1565 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1566 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1571 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1573 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1578 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue() local
1579 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue()
1581 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue()
1582 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1584 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue()
1589 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1590 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1591 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1592 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1593 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
1594 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
1595 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
1596 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
1598 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
1600 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1605 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1606 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue()
1609 queue->rd_enabled = true; in nvme_tcp_start_queue()
1619 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
1621 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
1623 dev_err(nctrl->device, in nvme_tcp_start_queue()
1632 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_tagset() local
1637 set = &ctrl->admin_tag_set; in nvme_tcp_alloc_tagset()
1639 set->ops = &nvme_tcp_admin_mq_ops; in nvme_tcp_alloc_tagset()
1640 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_tcp_alloc_tagset()
1641 set->reserved_tags = 2; /* connect + keep-alive */ in nvme_tcp_alloc_tagset()
1642 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1643 set->flags = BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1644 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1645 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1646 set->nr_hw_queues = 1; in nvme_tcp_alloc_tagset()
1647 set->timeout = ADMIN_TIMEOUT; in nvme_tcp_alloc_tagset()
1649 set = &ctrl->tag_set; in nvme_tcp_alloc_tagset()
1651 set->ops = &nvme_tcp_mq_ops; in nvme_tcp_alloc_tagset()
1652 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset()
1653 set->reserved_tags = 1; /* fabric connect */ in nvme_tcp_alloc_tagset()
1654 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1655 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1656 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1657 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1658 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_tcp_alloc_tagset()
1659 set->timeout = NVME_IO_TIMEOUT; in nvme_tcp_alloc_tagset()
1660 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_tcp_alloc_tagset()
1670 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1672 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1673 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
1674 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1675 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1678 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1681 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1685 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1686 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1689 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1693 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1694 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1697 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_start_io_queues() argument
1701 for (i = 1; i < ctrl->queue_count; i++) { in nvme_tcp_start_io_queues()
1702 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1710 for (i--; i >= 1; i--) in nvme_tcp_start_io_queues()
1711 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1715 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1719 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_tcp_alloc_admin_queue()
1723 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1730 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1734 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1738 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1739 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
1740 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues()
1748 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1749 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1754 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_nr_io_queues() argument
1758 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1759 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1760 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1768 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_set_io_queues() local
1769 struct nvmf_ctrl_options *opts = nctrl->opts; in nvme_tcp_set_io_queues()
1771 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { in nvme_tcp_set_io_queues()
1777 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; in nvme_tcp_set_io_queues()
1778 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_set_io_queues()
1779 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1780 min(opts->nr_write_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1781 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1788 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1789 min(opts->nr_io_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1790 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1793 if (opts->nr_poll_queues && nr_io_queues) { in nvme_tcp_set_io_queues()
1795 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_tcp_set_io_queues()
1796 min(opts->nr_poll_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1800 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1805 nr_io_queues = nvme_tcp_nr_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1806 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1811 dev_err(ctrl->device, in nvme_tcp_alloc_io_queues()
1813 return -ENOMEM; in nvme_tcp_alloc_io_queues()
1816 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1817 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1820 nvme_tcp_set_io_queues(ctrl, nr_io_queues); in nvme_tcp_alloc_io_queues()
1822 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1825 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1827 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1829 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_destroy_io_queues()
1830 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_destroy_io_queues()
1832 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1835 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
1839 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1844 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); in nvme_tcp_configure_io_queues()
1845 if (IS_ERR(ctrl->tagset)) { in nvme_tcp_configure_io_queues()
1846 ret = PTR_ERR(ctrl->tagset); in nvme_tcp_configure_io_queues()
1850 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); in nvme_tcp_configure_io_queues()
1851 if (IS_ERR(ctrl->connect_q)) { in nvme_tcp_configure_io_queues()
1852 ret = PTR_ERR(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1857 ret = nvme_tcp_start_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1862 nvme_start_freeze(ctrl); in nvme_tcp_configure_io_queues()
1863 nvme_start_queues(ctrl); in nvme_tcp_configure_io_queues()
1864 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
1870 ret = -ENODEV; in nvme_tcp_configure_io_queues()
1871 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1874 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
1875 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
1876 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1882 nvme_stop_queues(ctrl); in nvme_tcp_configure_io_queues()
1883 nvme_sync_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1884 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1886 nvme_cancel_tagset(ctrl); in nvme_tcp_configure_io_queues()
1888 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1891 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_configure_io_queues()
1893 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1897 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
1899 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
1901 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_destroy_admin_queue()
1902 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_destroy_admin_queue()
1903 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_destroy_admin_queue()
1905 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
1908 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
1912 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1917 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); in nvme_tcp_configure_admin_queue()
1918 if (IS_ERR(ctrl->admin_tagset)) { in nvme_tcp_configure_admin_queue()
1919 error = PTR_ERR(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1923 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1924 if (IS_ERR(ctrl->fabrics_q)) { in nvme_tcp_configure_admin_queue()
1925 error = PTR_ERR(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1929 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1930 if (IS_ERR(ctrl->admin_q)) { in nvme_tcp_configure_admin_queue()
1931 error = PTR_ERR(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1936 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1940 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
1944 nvme_start_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1946 error = nvme_init_identify(ctrl); in nvme_tcp_configure_admin_queue()
1953 nvme_stop_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1954 blk_sync_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1956 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1957 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_configure_admin_queue()
1960 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1963 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1966 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1968 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1972 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
1975 nvme_stop_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
1976 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1977 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
1978 if (ctrl->admin_tagset) { in nvme_tcp_teardown_admin_queue()
1979 blk_mq_tagset_busy_iter(ctrl->admin_tagset, in nvme_tcp_teardown_admin_queue()
1980 nvme_cancel_request, ctrl); in nvme_tcp_teardown_admin_queue()
1981 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); in nvme_tcp_teardown_admin_queue()
1984 nvme_start_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
1985 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
1988 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
1991 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
1993 nvme_stop_admin_queue(ctrl); in nvme_tcp_teardown_io_queues()
1994 nvme_stop_queues(ctrl); in nvme_tcp_teardown_io_queues()
1995 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1996 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1997 if (ctrl->tagset) { in nvme_tcp_teardown_io_queues()
1998 blk_mq_tagset_busy_iter(ctrl->tagset, in nvme_tcp_teardown_io_queues()
1999 nvme_cancel_request, ctrl); in nvme_tcp_teardown_io_queues()
2000 blk_mq_tagset_wait_completed_request(ctrl->tagset); in nvme_tcp_teardown_io_queues()
2003 nvme_start_queues(ctrl); in nvme_tcp_teardown_io_queues()
2004 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
2007 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
2010 if (ctrl->state != NVME_CTRL_CONNECTING) { in nvme_tcp_reconnect_or_remove()
2011 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || in nvme_tcp_reconnect_or_remove()
2012 ctrl->state == NVME_CTRL_LIVE); in nvme_tcp_reconnect_or_remove()
2016 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
2017 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
2018 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
2019 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
2020 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
2022 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
2023 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
2027 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
2029 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
2032 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2036 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
2037 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
2041 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
2042 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2043 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
2044 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
2046 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
2047 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2048 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
2049 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
2050 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2053 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2054 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2059 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
2061 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
2065 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_setup_ctrl()
2066 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_setup_ctrl()
2068 ret = -EINVAL; in nvme_tcp_setup_ctrl()
2072 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
2076 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2077 nvme_stop_queues(ctrl); in nvme_tcp_setup_ctrl()
2078 nvme_sync_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2079 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2080 nvme_cancel_tagset(ctrl); in nvme_tcp_setup_ctrl()
2081 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2084 nvme_stop_admin_queue(ctrl); in nvme_tcp_setup_ctrl()
2085 blk_sync_queue(ctrl->admin_q); in nvme_tcp_setup_ctrl()
2086 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_setup_ctrl()
2087 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_setup_ctrl()
2088 nvme_tcp_destroy_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2096 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2098 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2100 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
2103 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
2104 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2106 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2111 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
2112 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2113 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
2120 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2122 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2123 flush_work(&ctrl->async_event_work); in nvme_tcp_error_recovery_work()
2124 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2126 nvme_start_queues(ctrl); in nvme_tcp_error_recovery_work()
2127 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2128 nvme_start_admin_queue(ctrl); in nvme_tcp_error_recovery_work()
2130 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2131 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2132 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_error_recovery_work()
2133 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_error_recovery_work()
2137 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
2140 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2142 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2143 nvme_stop_admin_queue(ctrl); in nvme_tcp_teardown_ctrl()
2145 nvme_shutdown_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2147 nvme_disable_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2148 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2151 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2153 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2158 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2161 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2162 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2164 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2165 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2166 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_reset_ctrl_work()
2167 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_reset_ctrl_work()
2171 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
2177 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2178 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
2181 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_stop_ctrl() argument
2183 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_stop_ctrl()
2184 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_stop_ctrl()
2189 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2191 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2195 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2198 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2200 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2201 kfree(ctrl); in nvme_tcp_free_ctrl()
2206 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2208 sg->addr = 0; in nvme_tcp_set_sg_null()
2209 sg->length = 0; in nvme_tcp_set_sg_null()
2210 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2217 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2219 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2220 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2221 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2227 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2229 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2230 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2231 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2237 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2238 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2239 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2240 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2244 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2245 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2246 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2247 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2248 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2250 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2251 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2252 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2255 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2256 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2257 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2258 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2260 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2266 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2268 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2270 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_tcp_complete_timed_out()
2279 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2280 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_timeout()
2282 dev_warn(ctrl->device, in nvme_tcp_timeout()
2284 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2286 if (ctrl->state != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2291 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2292 * - connect requests in nvme_tcp_timeout()
2293 * - initialization admin requests in nvme_tcp_timeout()
2294 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2308 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2316 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_map_data()
2317 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2319 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2324 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2325 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2327 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2336 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_setup_cmd_pdu()
2337 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2341 ret = nvme_setup_cmd(ns, rq, &pdu->cmd); in nvme_tcp_setup_cmd_pdu()
2345 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2346 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2347 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2348 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2349 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2350 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2352 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2355 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2356 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2357 else if (req->curr_bio) in nvme_tcp_setup_cmd_pdu()
2360 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2361 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2362 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2363 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2364 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2365 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2368 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2369 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2370 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2371 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2376 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2386 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2388 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2389 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2395 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2396 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2397 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2399 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2402 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2403 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2411 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2418 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_map_queues() local
2419 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_tcp_map_queues()
2421 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_tcp_map_queues()
2423 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2424 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2425 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2426 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2427 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2428 set->map[HCTX_TYPE_READ].queue_offset = in nvme_tcp_map_queues()
2429 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2432 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2433 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2434 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2435 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2436 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2437 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_tcp_map_queues()
2439 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); in nvme_tcp_map_queues()
2440 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); in nvme_tcp_map_queues()
2442 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_tcp_map_queues()
2444 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
2445 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_map_queues()
2446 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_tcp_map_queues()
2447 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_map_queues()
2448 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2449 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2452 dev_info(ctrl->ctrl.device, in nvme_tcp_map_queues()
2454 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_tcp_map_queues()
2455 ctrl->io_queues[HCTX_TYPE_READ], in nvme_tcp_map_queues()
2456 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2463 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2464 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2466 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2469 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2470 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2473 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2474 return queue->nr_cqe; in nvme_tcp_poll()
2515 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2519 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2520 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2532 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2535 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2536 if (!ctrl) in nvme_tcp_create_ctrl()
2537 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2539 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2540 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2541 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2542 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2543 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2544 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2546 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2548 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2549 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2551 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2552 opts->trsvcid = in nvme_tcp_create_ctrl()
2554 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2555 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2558 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2562 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2565 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2569 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2571 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2574 opts->host_traddr); in nvme_tcp_create_ctrl()
2579 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2580 ret = -EALREADY; in nvme_tcp_create_ctrl()
2584 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2586 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2587 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2591 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2595 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2597 ret = -EINTR; in nvme_tcp_create_ctrl()
2601 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2605 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", in nvme_tcp_create_ctrl()
2606 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_tcp_create_ctrl()
2609 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2612 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2615 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2616 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2618 ret = -EIO; in nvme_tcp_create_ctrl()
2621 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2623 kfree(ctrl); in nvme_tcp_create_ctrl()
2644 return -ENOMEM; in nvme_tcp_init_module()
2652 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2657 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2658 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()