• Home
  • Raw
  • Download

Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
26 * A non-zero value being sufficient to indicate general consideration of any
36 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
37 * because dependencies are tracked for both nvme-tcp and user contexts. Using
38 * a separate class prevents lockdep from conflating nvme-tcp socket use with
39 * user-space socket API use.
46 struct sock *sk = sock->sk; in nvme_tcp_reclassify_socket()
51 switch (sk->sk_family) { in nvme_tcp_reclassify_socket()
53 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
55 "sk_lock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
59 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
61 "sk_lock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
139 struct nvme_tcp_ctrl *ctrl; member
167 struct nvme_ctrl ctrl; member
182 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
184 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
189 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
197 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
198 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
208 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
213 return req->pdu; in nvme_tcp_req_cmd_pdu()
219 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - in nvme_tcp_req_data_pdu()
225 if (nvme_is_fabrics(req->req.cmd)) in nvme_tcp_inline_data_size()
227 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
232 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
244 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
245 req->data_len <= nvme_tcp_inline_data_size(req); in nvme_tcp_has_inline_data()
250 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
255 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
260 return min_t(size_t, iov_iter_single_seg_count(&req->iter), in nvme_tcp_req_cur_length()
261 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
267 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
271 int len) in nvme_tcp_pdu_last_send() argument
273 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
285 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
286 vec = &rq->special_vec; in nvme_tcp_init_iter()
291 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
295 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
300 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
301 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
304 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); in nvme_tcp_init_iter()
305 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
309 int len) in nvme_tcp_advance_req() argument
311 req->data_sent += len; in nvme_tcp_advance_req()
312 req->pdu_sent += len; in nvme_tcp_advance_req()
313 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
314 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
315 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
316 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
333 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
334 !llist_empty(&queue->req_list); in nvme_tcp_queue_more()
340 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
343 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
344 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
351 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
352 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
354 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
358 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
366 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
368 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
377 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
381 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
387 list_del(&req->entry); in nvme_tcp_fetch_request()
399 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
404 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
405 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
410 void *pdu, size_t len) in nvme_tcp_hdgst() argument
414 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
415 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
426 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
427 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
430 return -EPROTO; in nvme_tcp_verify_hdgst()
433 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
434 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
435 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
437 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
440 return -EIO; in nvme_tcp_verify_hdgst()
450 u32 len; in nvme_tcp_check_ddgst() local
452 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
453 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
455 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
456 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
459 return -EPROTO; in nvme_tcp_check_ddgst()
461 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
471 page_frag_free(req->pdu); in nvme_tcp_exit_request()
478 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); in nvme_tcp_init_request() local
481 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
485 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
488 if (!req->pdu) in nvme_tcp_init_request()
489 return -ENOMEM; in nvme_tcp_init_request()
491 pdu = req->pdu; in nvme_tcp_init_request()
492 req->queue = queue; in nvme_tcp_init_request()
493 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
494 nvme_req(rq)->cmd = &pdu->cmd; in nvme_tcp_init_request()
502 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); in nvme_tcp_init_hctx() local
503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
505 hctx->driver_data = queue; in nvme_tcp_init_hctx()
512 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); in nvme_tcp_init_admin_hctx() local
513 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
515 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
522 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
523 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
529 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
531 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
532 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
533 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
536 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
538 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
541 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
542 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
551 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
553 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
555 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
556 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
557 return -EINVAL; in nvme_tcp_process_nvme_cqe()
561 if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) in nvme_tcp_process_nvme_cqe()
562 req->status = cqe->status; in nvme_tcp_process_nvme_cqe()
564 if (!nvme_try_complete_req(rq, req->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
566 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
576 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
578 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
580 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
581 return -ENOENT; in nvme_tcp_handle_c2h_data()
585 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
587 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
588 return -EIO; in nvme_tcp_handle_c2h_data()
591 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
593 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
594 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
595 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
597 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
598 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
599 return -EPROTO; in nvme_tcp_handle_c2h_data()
608 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
618 cqe->command_id))) in nvme_tcp_handle_comp()
619 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
620 &cqe->result); in nvme_tcp_handle_comp()
630 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
632 u32 h2cdata_sent = req->pdu_len; in nvme_tcp_setup_h2c_data_pdu()
636 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_setup_h2c_data_pdu()
637 req->offset = 0; in nvme_tcp_setup_h2c_data_pdu()
638 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); in nvme_tcp_setup_h2c_data_pdu()
639 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
640 req->h2cdata_left -= req->pdu_len; in nvme_tcp_setup_h2c_data_pdu()
641 req->h2cdata_offset += h2cdata_sent; in nvme_tcp_setup_h2c_data_pdu()
644 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
645 if (!req->h2cdata_left) in nvme_tcp_setup_h2c_data_pdu()
646 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
647 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
648 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
649 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
650 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
651 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
652 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
653 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
654 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
655 data->ttag = req->ttag; in nvme_tcp_setup_h2c_data_pdu()
656 data->command_id = nvme_cid(rq); in nvme_tcp_setup_h2c_data_pdu()
657 data->data_offset = cpu_to_le32(req->h2cdata_offset); in nvme_tcp_setup_h2c_data_pdu()
658 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
666 u32 r2t_length = le32_to_cpu(pdu->r2t_length); in nvme_tcp_handle_r2t()
667 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); in nvme_tcp_handle_r2t()
669 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
671 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
673 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
674 return -ENOENT; in nvme_tcp_handle_r2t()
679 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
680 "req %d r2t len is %u, probably a bug...\n", in nvme_tcp_handle_r2t()
681 rq->tag, r2t_length); in nvme_tcp_handle_r2t()
682 return -EPROTO; in nvme_tcp_handle_r2t()
685 if (unlikely(req->data_sent + r2t_length > req->data_len)) { in nvme_tcp_handle_r2t()
686 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
687 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_handle_r2t()
688 rq->tag, r2t_length, req->data_len, req->data_sent); in nvme_tcp_handle_r2t()
689 return -EPROTO; in nvme_tcp_handle_r2t()
692 if (unlikely(r2t_offset < req->data_sent)) { in nvme_tcp_handle_r2t()
693 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
695 rq->tag, r2t_offset, req->data_sent); in nvme_tcp_handle_r2t()
696 return -EPROTO; in nvme_tcp_handle_r2t()
699 req->pdu_len = 0; in nvme_tcp_handle_r2t()
700 req->h2cdata_left = r2t_length; in nvme_tcp_handle_r2t()
701 req->h2cdata_offset = r2t_offset; in nvme_tcp_handle_r2t()
702 req->ttag = pdu->ttag; in nvme_tcp_handle_r2t()
715 u32 plen = le32_to_cpu(pdu->hdr.plen); in nvme_tcp_handle_c2h_term()
728 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
734 fes = le16_to_cpu(pdu->fes); in nvme_tcp_handle_c2h_term()
740 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
745 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
748 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
749 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
753 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
757 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
758 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
760 *len -= rcv_len; in nvme_tcp_recv_pdu()
761 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
764 hdr = queue->pdu; in nvme_tcp_recv_pdu()
765 if (unlikely(hdr->type == nvme_tcp_c2h_term)) { in nvme_tcp_recv_pdu()
770 nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
771 return -EINVAL; in nvme_tcp_recv_pdu()
774 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
775 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
781 if (queue->data_digest) { in nvme_tcp_recv_pdu()
782 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
787 switch (hdr->type) { in nvme_tcp_recv_pdu()
789 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
792 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
795 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
797 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
798 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
799 return -EINVAL; in nvme_tcp_recv_pdu()
812 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
814 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
816 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
822 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
826 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
827 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
833 if (!req->curr_bio) { in nvme_tcp_recv_data()
834 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
836 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
838 return -EIO; in nvme_tcp_recv_data()
845 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
847 if (queue->data_digest) in nvme_tcp_recv_data()
849 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
852 &req->iter, recv_len); in nvme_tcp_recv_data()
854 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
856 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
860 *len -= recv_len; in nvme_tcp_recv_data()
862 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
865 if (!queue->data_remaining) { in nvme_tcp_recv_data()
866 if (queue->data_digest) { in nvme_tcp_recv_data()
867 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
868 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
870 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
872 le16_to_cpu(req->status)); in nvme_tcp_recv_data()
873 queue->nr_cqe++; in nvme_tcp_recv_data()
883 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
885 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
886 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
887 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
888 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
895 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
897 *len -= recv_len; in nvme_tcp_recv_ddgst()
898 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
901 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
903 pdu->command_id); in nvme_tcp_recv_ddgst()
906 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); in nvme_tcp_recv_ddgst()
908 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
910 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
911 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
914 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
916 pdu->command_id); in nvme_tcp_recv_ddgst()
919 nvme_tcp_end_request(rq, le16_to_cpu(req->status)); in nvme_tcp_recv_ddgst()
920 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
928 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
930 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
931 size_t consumed = len; in nvme_tcp_recv_skb()
934 if (unlikely(!queue->rd_enabled)) in nvme_tcp_recv_skb()
935 return -EFAULT; in nvme_tcp_recv_skb()
937 while (len) { in nvme_tcp_recv_skb()
940 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
943 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
946 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
949 result = -EFAULT; in nvme_tcp_recv_skb()
952 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
954 queue->rd_enabled = false; in nvme_tcp_recv_skb()
955 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
969 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
970 queue = sk->sk_user_data; in nvme_tcp_data_ready()
971 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
972 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
973 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
974 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
981 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
982 queue = sk->sk_user_data; in nvme_tcp_write_space()
984 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
985 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
987 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
994 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
995 queue = sk->sk_user_data; in nvme_tcp_state_change()
999 switch (sk->sk_state) { in nvme_tcp_state_change()
1005 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
1008 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
1010 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
1013 queue->state_change(sk); in nvme_tcp_state_change()
1015 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
1020 queue->request = NULL; in nvme_tcp_done_send_req()
1028 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
1038 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
1039 int req_data_len = req->data_len; in nvme_tcp_try_send_data()
1040 u32 h2cdata_left = req->h2cdata_left; in nvme_tcp_try_send_data()
1049 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
1050 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
1051 int req_data_sent = req->data_sent; in nvme_tcp_try_send_data()
1054 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
1062 bvec_set_page(&bvec, page, len, offset); in nvme_tcp_try_send_data()
1063 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data()
1064 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data()
1068 if (queue->data_digest) in nvme_tcp_try_send_data()
1069 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
1081 if (last && ret == len) { in nvme_tcp_try_send_data()
1082 if (queue->data_digest) { in nvme_tcp_try_send_data()
1083 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
1084 &req->ddgst); in nvme_tcp_try_send_data()
1085 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
1086 req->offset = 0; in nvme_tcp_try_send_data()
1096 return -EAGAIN; in nvme_tcp_try_send_data()
1101 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
1107 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
1115 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1116 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1118 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_cmd_pdu()
1119 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_cmd_pdu()
1120 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_cmd_pdu()
1124 len -= ret; in nvme_tcp_try_send_cmd_pdu()
1125 if (!len) { in nvme_tcp_try_send_cmd_pdu()
1127 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
1128 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1129 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1135 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
1137 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
1142 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
1147 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
1150 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1151 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1153 if (!req->h2cdata_left) in nvme_tcp_try_send_data_pdu()
1156 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_data_pdu()
1157 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data_pdu()
1158 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data_pdu()
1162 len -= ret; in nvme_tcp_try_send_data_pdu()
1163 if (!len) { in nvme_tcp_try_send_data_pdu()
1164 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1165 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1166 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1169 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1171 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1176 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1177 size_t offset = req->offset; in nvme_tcp_try_send_ddgst()
1178 u32 h2cdata_left = req->h2cdata_left; in nvme_tcp_try_send_ddgst()
1182 .iov_base = (u8 *)&req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1183 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1191 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1203 req->offset += ret; in nvme_tcp_try_send_ddgst()
1204 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1213 if (!queue->request) { in nvme_tcp_try_send()
1214 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1215 if (!queue->request) in nvme_tcp_try_send()
1218 req = queue->request; in nvme_tcp_try_send()
1221 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1229 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1235 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1241 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1244 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1247 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1249 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1259 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1260 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1267 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1268 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1283 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1285 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1298 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1303 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1308 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1310 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1311 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1323 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1324 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1326 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1328 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1329 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1331 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1335 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1338 return -ENOMEM; in nvme_tcp_alloc_crypto()
1341 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1343 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1345 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1348 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1350 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1351 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1354 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1357 if (!async->pdu) in nvme_tcp_alloc_async_req()
1358 return -ENOMEM; in nvme_tcp_alloc_async_req()
1360 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1367 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1368 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1371 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1374 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1377 if (queue->pf_cache.va) { in nvme_tcp_free_queue()
1378 page = virt_to_head_page(queue->pf_cache.va); in nvme_tcp_free_queue()
1379 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); in nvme_tcp_free_queue()
1380 queue->pf_cache.va = NULL; in nvme_tcp_free_queue()
1384 sock_release(queue->sock); in nvme_tcp_free_queue()
1387 kfree(queue->pdu); in nvme_tcp_free_queue()
1388 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1389 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1404 return -ENOMEM; in nvme_tcp_init_connection()
1408 ret = -ENOMEM; in nvme_tcp_init_connection()
1412 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1413 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1414 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1415 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1416 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1417 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1418 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1419 if (queue->hdr_digest) in nvme_tcp_init_connection()
1420 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1421 if (queue->data_digest) in nvme_tcp_init_connection()
1422 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1426 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1433 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1438 ret = -EINVAL; in nvme_tcp_init_connection()
1439 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1441 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1445 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1447 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1451 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1453 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1457 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1458 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1459 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1460 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1462 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1467 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1468 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1469 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1470 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1472 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1477 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1479 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1483 maxh2cdata = le32_to_cpu(icresp->maxdata); in nvme_tcp_init_connection()
1489 queue->maxh2cdata = maxh2cdata; in nvme_tcp_init_connection()
1506 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1510 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1515 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1520 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1521 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1526 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1532 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1533 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1534 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1539 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1544 n = qid - 1; in nvme_tcp_set_queue_io_cpu()
1546 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; in nvme_tcp_set_queue_io_cpu()
1548 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - in nvme_tcp_set_queue_io_cpu()
1549 ctrl->io_queues[HCTX_TYPE_READ] - 1; in nvme_tcp_set_queue_io_cpu()
1550 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1555 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1556 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1559 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1560 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1561 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1562 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1563 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1564 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1567 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1569 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1572 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1573 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1575 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1580 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1583 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1586 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1593 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1596 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1599 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1600 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1603 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1605 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1606 queue->sock->sk->sk_use_task_frag = false; in nvme_tcp_alloc_queue()
1608 queue->request = NULL; in nvme_tcp_alloc_queue()
1609 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1610 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1611 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1612 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1613 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1615 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1616 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1617 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1619 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1626 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_alloc_queue()
1627 char *iface = nctrl->opts->host_iface; in nvme_tcp_alloc_queue()
1630 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1633 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1640 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1641 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1642 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1645 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1653 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1654 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1655 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1659 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1662 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1663 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1665 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1674 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1679 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1681 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1683 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1686 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1687 queue->sock = NULL; in nvme_tcp_alloc_queue()
1689 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1690 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1696 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1698 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1699 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_ops()
1700 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1701 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1702 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1703 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1708 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1710 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1715 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue_nowait() local
1716 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue_nowait()
1718 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1721 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1722 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1724 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1729 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_wait_queue() local
1730 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_wait_queue()
1734 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || in nvme_tcp_wait_queue()
1735 !sk_wmem_alloc_get(queue->sock->sk)) in nvme_tcp_wait_queue()
1738 timeout -= 2; in nvme_tcp_wait_queue()
1740 dev_warn(nctrl->device, in nvme_tcp_wait_queue()
1754 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1755 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1756 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1757 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1758 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
1759 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
1760 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
1761 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
1763 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
1765 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1770 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1771 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue()
1774 queue->rd_enabled = true; in nvme_tcp_start_queue()
1784 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
1786 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
1788 dev_err(nctrl->device, in nvme_tcp_start_queue()
1794 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1796 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1797 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
1798 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1799 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1802 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1805 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1809 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1810 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1813 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1817 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1818 nvme_tcp_stop_queue_nowait(ctrl, i); in nvme_tcp_stop_io_queues()
1819 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1820 nvme_tcp_wait_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1823 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_start_io_queues() argument
1829 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1837 for (i--; i >= first; i--) in nvme_tcp_start_io_queues()
1838 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1842 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1846 ret = nvme_tcp_alloc_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1850 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1857 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1861 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1865 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1866 ret = nvme_tcp_alloc_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1874 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1875 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1880 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1885 nr_io_queues = nvmf_nr_io_queues(ctrl->opts); in nvme_tcp_alloc_io_queues()
1886 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1891 dev_err(ctrl->device, in nvme_tcp_alloc_io_queues()
1893 return -ENOMEM; in nvme_tcp_alloc_io_queues()
1896 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1897 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1900 nvmf_set_io_queues(ctrl->opts, nr_io_queues, in nvme_tcp_alloc_io_queues()
1901 to_tcp_ctrl(ctrl)->io_queues); in nvme_tcp_alloc_io_queues()
1902 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1905 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1907 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1909 nvme_remove_io_tag_set(ctrl); in nvme_tcp_destroy_io_queues()
1910 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1913 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
1917 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1922 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set, in nvme_tcp_configure_io_queues()
1924 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, in nvme_tcp_configure_io_queues()
1935 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); in nvme_tcp_configure_io_queues()
1936 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); in nvme_tcp_configure_io_queues()
1941 nvme_start_freeze(ctrl); in nvme_tcp_configure_io_queues()
1942 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1943 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
1949 ret = -ENODEV; in nvme_tcp_configure_io_queues()
1950 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1953 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
1954 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
1955 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1962 ret = nvme_tcp_start_io_queues(ctrl, nr_queues, in nvme_tcp_configure_io_queues()
1963 ctrl->tagset->nr_hw_queues + 1); in nvme_tcp_configure_io_queues()
1970 nvme_quiesce_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1971 nvme_sync_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1972 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1974 nvme_cancel_tagset(ctrl); in nvme_tcp_configure_io_queues()
1976 nvme_remove_io_tag_set(ctrl); in nvme_tcp_configure_io_queues()
1978 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1982 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
1984 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
1986 nvme_remove_admin_tag_set(ctrl); in nvme_tcp_destroy_admin_queue()
1987 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
1990 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
1994 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1999 error = nvme_alloc_admin_tag_set(ctrl, in nvme_tcp_configure_admin_queue()
2000 &to_tcp_ctrl(ctrl)->admin_tag_set, in nvme_tcp_configure_admin_queue()
2007 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
2011 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
2015 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2017 error = nvme_init_ctrl_finish(ctrl, false); in nvme_tcp_configure_admin_queue()
2024 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2025 blk_sync_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
2027 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
2028 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_configure_admin_queue()
2031 nvme_remove_admin_tag_set(ctrl); in nvme_tcp_configure_admin_queue()
2033 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2037 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
2040 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2041 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
2042 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
2043 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_teardown_admin_queue()
2045 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2046 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
2049 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
2052 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
2054 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_io_queues()
2055 nvme_quiesce_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2056 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2057 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2058 nvme_cancel_tagset(ctrl); in nvme_tcp_teardown_io_queues()
2060 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2061 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
2064 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
2066 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_reconnect_or_remove()
2074 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
2075 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
2076 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
2077 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
2078 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
2080 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
2081 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
2085 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
2087 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
2090 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2094 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
2095 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2096 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
2100 if (!nvme_ctrl_sgl_supported(ctrl)) { in nvme_tcp_setup_ctrl()
2101 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2102 dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); in nvme_tcp_setup_ctrl()
2106 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
2107 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2108 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
2109 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
2111 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
2112 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2113 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
2114 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
2115 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2118 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2119 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2124 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
2126 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
2130 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_setup_ctrl()
2135 ret = -EINVAL; in nvme_tcp_setup_ctrl()
2139 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
2143 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2144 nvme_quiesce_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2145 nvme_sync_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2146 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2147 nvme_cancel_tagset(ctrl); in nvme_tcp_setup_ctrl()
2148 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2151 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_setup_ctrl()
2152 blk_sync_queue(ctrl->admin_q); in nvme_tcp_setup_ctrl()
2153 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_setup_ctrl()
2154 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_setup_ctrl()
2155 nvme_tcp_destroy_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2163 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2165 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2167 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
2170 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
2171 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2173 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2178 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
2179 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2180 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
2187 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2189 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2190 flush_work(&ctrl->async_event_work); in nvme_tcp_error_recovery_work()
2191 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2193 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_error_recovery_work()
2194 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2195 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_error_recovery_work()
2196 nvme_auth_stop(ctrl); in nvme_tcp_error_recovery_work()
2198 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2199 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2200 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_error_recovery_work()
2207 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
2210 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2212 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2213 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_ctrl()
2214 nvme_disable_ctrl(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2215 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2218 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2220 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2225 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2228 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2229 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2231 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2232 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2233 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_reset_ctrl_work()
2240 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
2246 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2247 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
2250 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_stop_ctrl() argument
2252 flush_work(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_stop_ctrl()
2253 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_stop_ctrl()
2258 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2260 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2264 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2267 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2269 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2270 kfree(ctrl); in nvme_tcp_free_ctrl()
2275 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2277 sg->addr = 0; in nvme_tcp_set_sg_null()
2278 sg->length = 0; in nvme_tcp_set_sg_null()
2279 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2286 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2288 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2289 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2290 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2296 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2298 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2299 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2300 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2306 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2307 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2308 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2309 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2313 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2314 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2315 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2316 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2317 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2319 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2320 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2321 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2324 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2325 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2326 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2327 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2329 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2335 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2337 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2344 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2346 u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; in nvme_tcp_timeout()
2347 int qid = nvme_tcp_queue_id(req->queue); in nvme_tcp_timeout()
2349 dev_warn(ctrl->device, in nvme_tcp_timeout()
2351 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type, in nvme_tcp_timeout()
2354 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2359 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2360 * - connect requests in nvme_tcp_timeout()
2361 * - initialization admin requests in nvme_tcp_timeout()
2362 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2376 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2385 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2387 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2392 req->data_len <= nvme_tcp_inline_data_size(req)) in nvme_tcp_map_data()
2393 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2395 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2405 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2413 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2414 req->status = cpu_to_le16(NVME_SC_SUCCESS); in nvme_tcp_setup_cmd_pdu()
2415 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2416 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2417 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2418 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2419 req->h2cdata_left = 0; in nvme_tcp_setup_cmd_pdu()
2420 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2422 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2423 if (req->curr_bio && req->data_len) in nvme_tcp_setup_cmd_pdu()
2427 req->data_len <= nvme_tcp_inline_data_size(req)) in nvme_tcp_setup_cmd_pdu()
2428 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2430 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2431 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2432 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2433 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2434 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2435 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2438 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2439 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2440 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2441 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2446 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2456 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2458 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2459 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2465 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2466 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2467 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2469 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2472 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2473 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2481 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2488 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); in nvme_tcp_map_queues() local
2490 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); in nvme_tcp_map_queues()
2495 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2496 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2499 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2502 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2503 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2506 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2507 return ret < 0 ? ret : queue->nr_cqe; in nvme_tcp_poll()
2510 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) in nvme_tcp_get_address() argument
2512 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; in nvme_tcp_get_address()
2514 int ret, len; in nvme_tcp_get_address() local
2516 len = nvmf_get_address(ctrl, buf, size); in nvme_tcp_get_address()
2518 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_get_address()
2519 return len; in nvme_tcp_get_address()
2521 mutex_lock(&queue->queue_lock); in nvme_tcp_get_address()
2523 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); in nvme_tcp_get_address()
2525 if (len > 0) in nvme_tcp_get_address()
2526 len--; /* strip trailing newline */ in nvme_tcp_get_address()
2527 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", in nvme_tcp_get_address()
2528 (len) ? "," : "", &src_addr); in nvme_tcp_get_address()
2531 mutex_unlock(&queue->queue_lock); in nvme_tcp_get_address()
2533 return len; in nvme_tcp_get_address()
2574 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2578 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2579 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2591 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2594 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2595 if (!ctrl) in nvme_tcp_create_ctrl()
2596 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2598 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2599 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2600 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2601 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2602 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2603 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2605 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2607 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2608 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2610 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2611 opts->trsvcid = in nvme_tcp_create_ctrl()
2613 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2614 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2617 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2621 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2624 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2628 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2630 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2633 opts->host_traddr); in nvme_tcp_create_ctrl()
2638 if (opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_create_ctrl()
2639 if (!__dev_get_by_name(&init_net, opts->host_iface)) { in nvme_tcp_create_ctrl()
2641 opts->host_iface); in nvme_tcp_create_ctrl()
2642 ret = -ENODEV; in nvme_tcp_create_ctrl()
2647 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2648 ret = -EALREADY; in nvme_tcp_create_ctrl()
2652 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2654 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2655 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2659 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2663 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2665 ret = -EINTR; in nvme_tcp_create_ctrl()
2669 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2673 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", in nvme_tcp_create_ctrl()
2674 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); in nvme_tcp_create_ctrl()
2677 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2680 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2683 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2684 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2686 ret = -EIO; in nvme_tcp_create_ctrl()
2689 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2691 kfree(ctrl); in nvme_tcp_create_ctrl()
2721 return -ENOMEM; in nvme_tcp_init_module()
2729 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2734 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2735 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()