Lines Matching +full:ctrl +full:- +full:len
1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
25 * A non-zero value being sufficient to indicate general consideration of any
98 struct nvme_tcp_ctrl *ctrl; member
126 struct nvme_ctrl ctrl; member
141 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
143 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
148 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
156 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
157 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
162 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
167 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
177 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
189 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
190 req->data_len <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
195 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
200 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
205 return min_t(size_t, iov_iter_single_seg_count(&req->iter), in nvme_tcp_req_cur_length()
206 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
211 return req->iter.iov_offset; in nvme_tcp_req_offset()
217 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
221 int len) in nvme_tcp_pdu_last_send() argument
223 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
235 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
236 vec = &rq->special_vec; in nvme_tcp_init_iter()
241 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
243 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
245 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
246 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
249 iov_iter_bvec(&req->iter, dir, vec, nsegs, size); in nvme_tcp_init_iter()
250 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
254 int len) in nvme_tcp_advance_req() argument
256 req->data_sent += len; in nvme_tcp_advance_req()
257 req->pdu_sent += len; in nvme_tcp_advance_req()
258 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
259 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
260 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
261 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
278 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
279 !llist_empty(&queue->req_list) || queue->more_requests; in nvme_tcp_queue_more()
285 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
288 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
289 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
296 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
297 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
298 queue->more_requests = !last; in nvme_tcp_queue_request()
300 queue->more_requests = false; in nvme_tcp_queue_request()
301 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
305 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
313 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
315 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
324 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
328 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
334 list_del(&req->entry); in nvme_tcp_fetch_request()
346 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
351 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
352 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
357 void *pdu, size_t len) in nvme_tcp_hdgst() argument
361 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
362 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
373 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
374 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
377 return -EPROTO; in nvme_tcp_verify_hdgst()
380 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
381 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
382 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
384 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
387 return -EIO; in nvme_tcp_verify_hdgst()
397 u32 len; in nvme_tcp_check_ddgst() local
399 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
400 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
402 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
403 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
406 return -EPROTO; in nvme_tcp_check_ddgst()
408 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
418 page_frag_free(req->pdu); in nvme_tcp_exit_request()
425 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_init_request() local
427 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
428 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
431 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
434 if (!req->pdu) in nvme_tcp_init_request()
435 return -ENOMEM; in nvme_tcp_init_request()
437 req->queue = queue; in nvme_tcp_init_request()
438 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
446 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_hctx() local
447 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
449 hctx->driver_data = queue; in nvme_tcp_init_hctx()
456 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_admin_hctx() local
457 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
459 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
466 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
467 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
473 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
475 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
476 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
477 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
480 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
482 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
485 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
486 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
494 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
496 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
498 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
499 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
500 return -EINVAL; in nvme_tcp_process_nvme_cqe()
503 if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
505 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
515 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
517 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
519 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
520 return -ENOENT; in nvme_tcp_handle_c2h_data()
524 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
526 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
527 return -EIO; in nvme_tcp_handle_c2h_data()
530 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
532 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
533 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
534 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
536 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
537 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
538 return -EPROTO; in nvme_tcp_handle_c2h_data()
547 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
557 cqe->command_id))) in nvme_tcp_handle_comp()
558 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
559 &cqe->result); in nvme_tcp_handle_comp()
569 struct nvme_tcp_data_pdu *data = req->pdu; in nvme_tcp_setup_h2c_data_pdu()
570 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
575 req->pdu_len = le32_to_cpu(pdu->r2t_length); in nvme_tcp_setup_h2c_data_pdu()
576 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
578 if (unlikely(!req->pdu_len)) { in nvme_tcp_setup_h2c_data_pdu()
579 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
580 "req %d r2t len is %u, probably a bug...\n", in nvme_tcp_setup_h2c_data_pdu()
581 rq->tag, req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
582 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
585 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { in nvme_tcp_setup_h2c_data_pdu()
586 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
587 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_setup_h2c_data_pdu()
588 rq->tag, req->pdu_len, req->data_len, in nvme_tcp_setup_h2c_data_pdu()
589 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
590 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
593 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { in nvme_tcp_setup_h2c_data_pdu()
594 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
596 rq->tag, le32_to_cpu(pdu->r2t_offset), in nvme_tcp_setup_h2c_data_pdu()
597 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
598 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
602 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
603 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
604 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
605 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
606 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
607 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
608 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
609 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
610 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
611 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
612 data->ttag = pdu->ttag; in nvme_tcp_setup_h2c_data_pdu()
613 data->command_id = nvme_cid(rq); in nvme_tcp_setup_h2c_data_pdu()
614 data->data_offset = pdu->r2t_offset; in nvme_tcp_setup_h2c_data_pdu()
615 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
626 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
628 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
630 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
631 return -ENOENT; in nvme_tcp_handle_r2t()
639 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_handle_r2t()
640 req->offset = 0; in nvme_tcp_handle_r2t()
648 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
651 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
652 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
656 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
660 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
661 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
663 *len -= rcv_len; in nvme_tcp_recv_pdu()
664 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
667 hdr = queue->pdu; in nvme_tcp_recv_pdu()
668 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
669 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
675 if (queue->data_digest) { in nvme_tcp_recv_pdu()
676 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
681 switch (hdr->type) { in nvme_tcp_recv_pdu()
683 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
686 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
689 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
691 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
692 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
693 return -EINVAL; in nvme_tcp_recv_pdu()
706 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
708 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
710 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
716 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
720 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
721 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
727 if (!req->curr_bio) { in nvme_tcp_recv_data()
728 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
730 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
732 return -EIO; in nvme_tcp_recv_data()
739 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
741 if (queue->data_digest) in nvme_tcp_recv_data()
743 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
746 &req->iter, recv_len); in nvme_tcp_recv_data()
748 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
750 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
754 *len -= recv_len; in nvme_tcp_recv_data()
756 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
759 if (!queue->data_remaining) { in nvme_tcp_recv_data()
760 if (queue->data_digest) { in nvme_tcp_recv_data()
761 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
762 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
764 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
766 queue->nr_cqe++; in nvme_tcp_recv_data()
776 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
778 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
779 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
780 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
781 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
788 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
790 *len -= recv_len; in nvme_tcp_recv_ddgst()
791 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
794 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
795 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
797 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
798 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
799 return -EIO; in nvme_tcp_recv_ddgst()
802 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
804 pdu->command_id); in nvme_tcp_recv_ddgst()
807 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
815 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
817 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
818 size_t consumed = len; in nvme_tcp_recv_skb()
821 while (len) { in nvme_tcp_recv_skb()
824 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
827 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
830 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
833 result = -EFAULT; in nvme_tcp_recv_skb()
836 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
838 queue->rd_enabled = false; in nvme_tcp_recv_skb()
839 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
851 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
852 queue = sk->sk_user_data; in nvme_tcp_data_ready()
853 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
854 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
855 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
856 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
863 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
864 queue = sk->sk_user_data; in nvme_tcp_write_space()
866 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
867 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
869 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
876 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
877 queue = sk->sk_user_data; in nvme_tcp_state_change()
881 switch (sk->sk_state) { in nvme_tcp_state_change()
887 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
890 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
892 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
895 queue->state_change(sk); in nvme_tcp_state_change()
897 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
902 queue->request = NULL; in nvme_tcp_done_send_req()
912 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
913 int req_data_len = req->data_len; in nvme_tcp_try_send_data()
918 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
919 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
920 int req_data_sent = req->data_sent; in nvme_tcp_try_send_data()
923 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
929 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
932 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
938 if (queue->data_digest) in nvme_tcp_try_send_data()
939 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
951 if (last && ret == len) { in nvme_tcp_try_send_data()
952 if (queue->data_digest) { in nvme_tcp_try_send_data()
953 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
954 &req->ddgst); in nvme_tcp_try_send_data()
955 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
956 req->offset = 0; in nvme_tcp_try_send_data()
963 return -EAGAIN; in nvme_tcp_try_send_data()
968 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
969 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_try_send_cmd_pdu()
972 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
981 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
982 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
984 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
985 offset_in_page(pdu) + req->offset, len, flags); in nvme_tcp_try_send_cmd_pdu()
989 len -= ret; in nvme_tcp_try_send_cmd_pdu()
990 if (!len) { in nvme_tcp_try_send_cmd_pdu()
992 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
993 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
994 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1001 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
1003 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
1008 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
1009 struct nvme_tcp_data_pdu *pdu = req->pdu; in nvme_tcp_try_send_data_pdu()
1011 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
1014 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1015 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1017 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
1018 offset_in_page(pdu) + req->offset, len, in nvme_tcp_try_send_data_pdu()
1023 len -= ret; in nvme_tcp_try_send_data_pdu()
1024 if (!len) { in nvme_tcp_try_send_data_pdu()
1025 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1026 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1027 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1028 if (!req->data_sent) in nvme_tcp_try_send_data_pdu()
1032 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1034 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1039 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1040 size_t offset = req->offset; in nvme_tcp_try_send_ddgst()
1044 .iov_base = (u8 *)&req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1045 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1053 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1062 req->offset += ret; in nvme_tcp_try_send_ddgst()
1063 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1071 if (!queue->request) { in nvme_tcp_try_send()
1072 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1073 if (!queue->request) in nvme_tcp_try_send()
1076 req = queue->request; in nvme_tcp_try_send()
1078 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1086 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1092 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1098 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1101 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1104 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1106 if (ret != -EPIPE && ret != -ECONNRESET) in nvme_tcp_try_send()
1107 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1115 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1116 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1123 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1124 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1139 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1141 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1159 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1164 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1166 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1167 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1179 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1180 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1182 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1184 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1185 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1187 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1191 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1194 return -ENOMEM; in nvme_tcp_alloc_crypto()
1197 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1199 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1201 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1204 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1206 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1207 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1210 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1213 if (!async->pdu) in nvme_tcp_alloc_async_req()
1214 return -ENOMEM; in nvme_tcp_alloc_async_req()
1216 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1222 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1223 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1225 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1228 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1231 sock_release(queue->sock); in nvme_tcp_free_queue()
1232 kfree(queue->pdu); in nvme_tcp_free_queue()
1233 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1247 return -ENOMEM; in nvme_tcp_init_connection()
1251 ret = -ENOMEM; in nvme_tcp_init_connection()
1255 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1256 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1257 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1258 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1259 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1260 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1261 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1262 if (queue->hdr_digest) in nvme_tcp_init_connection()
1263 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1264 if (queue->data_digest) in nvme_tcp_init_connection()
1265 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1269 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1276 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1281 ret = -EINVAL; in nvme_tcp_init_connection()
1282 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1284 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1288 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1290 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1294 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1296 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1300 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1301 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1302 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1303 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1305 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1310 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1311 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1312 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1313 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1315 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1320 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1322 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1341 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1345 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1350 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1355 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1356 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1361 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1367 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1368 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1369 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1374 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1379 n = qid - 1; in nvme_tcp_set_queue_io_cpu()
1381 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; in nvme_tcp_set_queue_io_cpu()
1383 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - in nvme_tcp_set_queue_io_cpu()
1384 ctrl->io_queues[HCTX_TYPE_READ] - 1; in nvme_tcp_set_queue_io_cpu()
1385 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1391 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1392 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1395 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1396 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1397 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1398 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1399 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1400 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1401 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1404 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1406 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1409 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1410 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1412 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1418 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1421 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1428 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1431 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1434 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1435 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1438 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1440 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1442 queue->request = NULL; in nvme_tcp_alloc_queue()
1443 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1444 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1445 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1446 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1447 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1449 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1450 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1451 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1453 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1460 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1461 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1462 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1465 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1473 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1474 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1475 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1479 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1482 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1483 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1485 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1494 queue->rd_enabled = true; in nvme_tcp_alloc_queue()
1495 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1498 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1499 queue->sock->sk->sk_user_data = queue; in nvme_tcp_alloc_queue()
1500 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_alloc_queue()
1501 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_alloc_queue()
1502 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_alloc_queue()
1503 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_alloc_queue()
1504 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_alloc_queue()
1505 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_alloc_queue()
1507 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_alloc_queue()
1509 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1514 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1516 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1518 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1521 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1522 queue->sock = NULL; in nvme_tcp_alloc_queue()
1524 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1530 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_calls()
1532 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1533 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_calls()
1534 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_calls()
1535 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_calls()
1536 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_calls()
1537 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1542 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1544 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1549 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue() local
1550 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue()
1552 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue()
1553 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1555 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue()
1560 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1569 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); in nvme_tcp_start_queue()
1571 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) in nvme_tcp_start_queue()
1572 __nvme_tcp_stop_queue(&ctrl->queues[idx]); in nvme_tcp_start_queue()
1573 dev_err(nctrl->device, in nvme_tcp_start_queue()
1582 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_tagset() local
1587 set = &ctrl->admin_tag_set; in nvme_tcp_alloc_tagset()
1589 set->ops = &nvme_tcp_admin_mq_ops; in nvme_tcp_alloc_tagset()
1590 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_tcp_alloc_tagset()
1591 set->reserved_tags = 2; /* connect + keep-alive */ in nvme_tcp_alloc_tagset()
1592 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1593 set->flags = BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1594 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1595 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1596 set->nr_hw_queues = 1; in nvme_tcp_alloc_tagset()
1597 set->timeout = ADMIN_TIMEOUT; in nvme_tcp_alloc_tagset()
1599 set = &ctrl->tag_set; in nvme_tcp_alloc_tagset()
1601 set->ops = &nvme_tcp_mq_ops; in nvme_tcp_alloc_tagset()
1602 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset()
1603 set->reserved_tags = 1; /* fabric connect */ in nvme_tcp_alloc_tagset()
1604 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1605 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1606 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1607 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1608 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_tcp_alloc_tagset()
1609 set->timeout = NVME_IO_TIMEOUT; in nvme_tcp_alloc_tagset()
1610 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_tcp_alloc_tagset()
1620 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1622 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1623 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
1624 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1625 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1628 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1631 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1635 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1636 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1639 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1643 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1644 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1647 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_start_io_queues() argument
1651 for (i = 1; i < ctrl->queue_count; i++) { in nvme_tcp_start_io_queues()
1652 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1660 for (i--; i >= 1; i--) in nvme_tcp_start_io_queues()
1661 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1665 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1669 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_tcp_alloc_admin_queue()
1673 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1680 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1684 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1688 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1689 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
1690 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues()
1698 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1699 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1704 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_nr_io_queues() argument
1708 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1709 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1710 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1718 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_set_io_queues() local
1719 struct nvmf_ctrl_options *opts = nctrl->opts; in nvme_tcp_set_io_queues()
1721 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { in nvme_tcp_set_io_queues()
1727 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; in nvme_tcp_set_io_queues()
1728 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_set_io_queues()
1729 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1730 min(opts->nr_write_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1731 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1738 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1739 min(opts->nr_io_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1740 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1743 if (opts->nr_poll_queues && nr_io_queues) { in nvme_tcp_set_io_queues()
1745 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_tcp_set_io_queues()
1746 min(opts->nr_poll_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1750 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1755 nr_io_queues = nvme_tcp_nr_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1756 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1761 dev_err(ctrl->device, in nvme_tcp_alloc_io_queues()
1763 return -ENOMEM; in nvme_tcp_alloc_io_queues()
1766 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1767 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1770 nvme_tcp_set_io_queues(ctrl, nr_io_queues); in nvme_tcp_alloc_io_queues()
1772 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1775 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1777 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1779 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_destroy_io_queues()
1780 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_destroy_io_queues()
1782 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1785 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
1789 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1794 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); in nvme_tcp_configure_io_queues()
1795 if (IS_ERR(ctrl->tagset)) { in nvme_tcp_configure_io_queues()
1796 ret = PTR_ERR(ctrl->tagset); in nvme_tcp_configure_io_queues()
1800 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); in nvme_tcp_configure_io_queues()
1801 if (IS_ERR(ctrl->connect_q)) { in nvme_tcp_configure_io_queues()
1802 ret = PTR_ERR(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1807 ret = nvme_tcp_start_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1812 nvme_start_queues(ctrl); in nvme_tcp_configure_io_queues()
1813 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
1819 ret = -ENODEV; in nvme_tcp_configure_io_queues()
1822 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
1823 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
1824 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1830 nvme_stop_queues(ctrl); in nvme_tcp_configure_io_queues()
1831 nvme_sync_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1832 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1834 nvme_cancel_tagset(ctrl); in nvme_tcp_configure_io_queues()
1836 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1839 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_configure_io_queues()
1841 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1845 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
1847 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
1849 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_destroy_admin_queue()
1850 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_destroy_admin_queue()
1851 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_destroy_admin_queue()
1853 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
1856 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
1860 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1865 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); in nvme_tcp_configure_admin_queue()
1866 if (IS_ERR(ctrl->admin_tagset)) { in nvme_tcp_configure_admin_queue()
1867 error = PTR_ERR(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1871 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1872 if (IS_ERR(ctrl->fabrics_q)) { in nvme_tcp_configure_admin_queue()
1873 error = PTR_ERR(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1877 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1878 if (IS_ERR(ctrl->admin_q)) { in nvme_tcp_configure_admin_queue()
1879 error = PTR_ERR(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1884 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1888 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
1892 nvme_start_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1894 error = nvme_init_identify(ctrl); in nvme_tcp_configure_admin_queue()
1901 nvme_stop_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1902 blk_sync_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1904 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1905 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_configure_admin_queue()
1908 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1911 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1914 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1916 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1920 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
1923 nvme_stop_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
1924 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1925 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
1926 if (ctrl->admin_tagset) { in nvme_tcp_teardown_admin_queue()
1927 blk_mq_tagset_busy_iter(ctrl->admin_tagset, in nvme_tcp_teardown_admin_queue()
1928 nvme_cancel_request, ctrl); in nvme_tcp_teardown_admin_queue()
1929 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); in nvme_tcp_teardown_admin_queue()
1932 nvme_start_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
1933 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
1936 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
1939 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
1941 nvme_stop_admin_queue(ctrl); in nvme_tcp_teardown_io_queues()
1942 nvme_start_freeze(ctrl); in nvme_tcp_teardown_io_queues()
1943 nvme_stop_queues(ctrl); in nvme_tcp_teardown_io_queues()
1944 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1945 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1946 if (ctrl->tagset) { in nvme_tcp_teardown_io_queues()
1947 blk_mq_tagset_busy_iter(ctrl->tagset, in nvme_tcp_teardown_io_queues()
1948 nvme_cancel_request, ctrl); in nvme_tcp_teardown_io_queues()
1949 blk_mq_tagset_wait_completed_request(ctrl->tagset); in nvme_tcp_teardown_io_queues()
1952 nvme_start_queues(ctrl); in nvme_tcp_teardown_io_queues()
1953 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
1956 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
1959 if (ctrl->state != NVME_CTRL_CONNECTING) { in nvme_tcp_reconnect_or_remove()
1960 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || in nvme_tcp_reconnect_or_remove()
1961 ctrl->state == NVME_CTRL_LIVE); in nvme_tcp_reconnect_or_remove()
1965 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
1966 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
1967 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
1968 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
1969 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
1971 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
1972 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
1976 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
1978 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
1981 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
1985 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
1986 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
1990 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
1991 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
1992 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
1993 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
1995 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
1996 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
1997 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
1998 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
1999 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2002 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2003 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2008 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
2010 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
2014 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_setup_ctrl()
2015 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_setup_ctrl()
2017 ret = -EINVAL; in nvme_tcp_setup_ctrl()
2021 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
2025 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2026 nvme_stop_queues(ctrl); in nvme_tcp_setup_ctrl()
2027 nvme_sync_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2028 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2029 nvme_cancel_tagset(ctrl); in nvme_tcp_setup_ctrl()
2030 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2033 nvme_stop_admin_queue(ctrl); in nvme_tcp_setup_ctrl()
2034 blk_sync_queue(ctrl->admin_q); in nvme_tcp_setup_ctrl()
2035 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_setup_ctrl()
2036 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_setup_ctrl()
2037 nvme_tcp_destroy_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2045 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2047 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2049 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
2052 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
2053 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2055 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2060 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
2061 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2062 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
2069 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2071 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2072 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2074 nvme_start_queues(ctrl); in nvme_tcp_error_recovery_work()
2075 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2076 nvme_start_admin_queue(ctrl); in nvme_tcp_error_recovery_work()
2078 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2079 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2080 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_error_recovery_work()
2081 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_error_recovery_work()
2085 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
2088 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2090 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_teardown_ctrl()
2091 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_teardown_ctrl()
2093 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2094 nvme_stop_admin_queue(ctrl); in nvme_tcp_teardown_ctrl()
2096 nvme_shutdown_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2098 nvme_disable_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2099 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2102 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2104 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2109 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2112 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2113 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2115 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2116 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2117 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_reset_ctrl_work()
2118 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_reset_ctrl_work()
2122 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
2128 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2129 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
2134 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2136 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2140 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2143 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2145 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2146 kfree(ctrl); in nvme_tcp_free_ctrl()
2151 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2153 sg->addr = 0; in nvme_tcp_set_sg_null()
2154 sg->length = 0; in nvme_tcp_set_sg_null()
2155 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2162 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2164 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2165 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2166 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2172 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2174 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2175 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2176 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2182 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2183 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2184 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2185 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2189 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2190 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2191 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2192 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2193 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2195 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2196 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2197 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2200 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2201 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2202 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2203 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2205 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2211 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2213 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2215 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_tcp_complete_timed_out()
2224 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2225 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_timeout()
2227 dev_warn(ctrl->device, in nvme_tcp_timeout()
2229 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2231 if (ctrl->state != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2236 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2237 * - connect requests in nvme_tcp_timeout()
2238 * - initialization admin requests in nvme_tcp_timeout()
2239 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2253 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2261 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_map_data()
2262 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2264 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2269 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2270 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2272 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2281 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_setup_cmd_pdu()
2282 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2286 ret = nvme_setup_cmd(ns, rq, &pdu->cmd); in nvme_tcp_setup_cmd_pdu()
2290 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2291 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2292 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2293 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2294 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2295 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2297 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2300 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2301 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2302 else if (req->curr_bio) in nvme_tcp_setup_cmd_pdu()
2305 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2306 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2307 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2308 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2309 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2310 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2313 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2314 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2315 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2316 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2321 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2331 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2333 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2334 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2340 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2341 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2342 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2344 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2347 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2348 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2356 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2363 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_map_queues() local
2364 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_tcp_map_queues()
2366 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_tcp_map_queues()
2368 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2369 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2370 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2371 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2372 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2373 set->map[HCTX_TYPE_READ].queue_offset = in nvme_tcp_map_queues()
2374 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2377 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2378 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2379 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2380 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2381 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2382 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_tcp_map_queues()
2384 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); in nvme_tcp_map_queues()
2385 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); in nvme_tcp_map_queues()
2387 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_tcp_map_queues()
2389 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
2390 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_map_queues()
2391 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_tcp_map_queues()
2392 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_map_queues()
2393 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2394 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2397 dev_info(ctrl->ctrl.device, in nvme_tcp_map_queues()
2399 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_tcp_map_queues()
2400 ctrl->io_queues[HCTX_TYPE_READ], in nvme_tcp_map_queues()
2401 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2408 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2409 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2411 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2414 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2415 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2418 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2419 return queue->nr_cqe; in nvme_tcp_poll()
2459 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2463 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2464 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2476 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2479 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2480 if (!ctrl) in nvme_tcp_create_ctrl()
2481 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2483 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2484 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2485 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2486 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2487 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2488 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2490 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2492 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2493 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2495 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2496 opts->trsvcid = in nvme_tcp_create_ctrl()
2498 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2499 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2502 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2506 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2509 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2513 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2515 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2518 opts->host_traddr); in nvme_tcp_create_ctrl()
2523 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2524 ret = -EALREADY; in nvme_tcp_create_ctrl()
2528 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2530 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2531 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2535 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2539 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2541 ret = -EINTR; in nvme_tcp_create_ctrl()
2545 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2549 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", in nvme_tcp_create_ctrl()
2550 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_tcp_create_ctrl()
2553 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2556 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2559 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2560 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2562 ret = -EIO; in nvme_tcp_create_ctrl()
2565 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2567 kfree(ctrl); in nvme_tcp_create_ctrl()
2588 return -ENOMEM; in nvme_tcp_init_module()
2596 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2601 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2602 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()