• Home
  • Raw
  • Download

Lines Matching refs:queue

56 	struct nvmet_tcp_queue		*queue;  member
158 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument
161 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag()
166 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
194 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument
198 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
214 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
217 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
220 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument
222 return queue->sock->sk->sk_incoming_cpu; in queue_cpu()
225 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_hdgst_len() argument
227 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_hdgst_len()
230 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_ddgst_len() argument
232 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_ddgst_len()
245 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, in nvmet_tcp_verify_hdgst() argument
254 queue->idx); in nvmet_tcp_verify_hdgst()
259 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); in nvmet_tcp_verify_hdgst()
263 queue->idx, le32_to_cpu(recv_digest), in nvmet_tcp_verify_hdgst()
271 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) in nvmet_tcp_check_ddgst() argument
274 u8 digest_len = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_check_ddgst()
281 pr_err("queue %d: data digest flag is cleared\n", queue->idx); in nvmet_tcp_check_ddgst()
328 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) in nvmet_tcp_fatal_error() argument
330 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_fatal_error()
331 if (queue->nvme_sq.ctrl) in nvmet_tcp_fatal_error()
332 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_tcp_fatal_error()
334 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_fatal_error()
337 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) in nvmet_tcp_socket_error() argument
340 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_socket_error()
342 nvmet_tcp_fatal_error(queue); in nvmet_tcp_socket_error()
410 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu() local
411 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
412 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
418 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? in nvmet_setup_c2h_data_pdu()
429 if (queue->data_digest) { in nvmet_setup_c2h_data_pdu()
431 nvmet_tcp_send_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
434 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
436 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_c2h_data_pdu()
443 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu() local
444 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
456 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
459 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
461 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_r2t_pdu()
468 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu() local
469 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
479 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
481 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_response_pdu()
485 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) in nvmet_tcp_process_resp_list() argument
490 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { in nvmet_tcp_process_resp_list()
492 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
493 queue->send_list_len++; in nvmet_tcp_process_resp_list()
497 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_fetch_cmd() argument
499 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, in nvmet_tcp_fetch_cmd()
501 if (!queue->snd_cmd) { in nvmet_tcp_fetch_cmd()
502 nvmet_tcp_process_resp_list(queue); in nvmet_tcp_fetch_cmd()
503 queue->snd_cmd = in nvmet_tcp_fetch_cmd()
504 list_first_entry_or_null(&queue->resp_send_list, in nvmet_tcp_fetch_cmd()
506 if (unlikely(!queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
510 list_del_init(&queue->snd_cmd->entry); in nvmet_tcp_fetch_cmd()
511 queue->send_list_len--; in nvmet_tcp_fetch_cmd()
513 if (nvmet_tcp_need_data_out(queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
514 nvmet_setup_c2h_data_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
515 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
516 nvmet_setup_r2t_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
518 nvmet_setup_response_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
520 return queue->snd_cmd; in nvmet_tcp_fetch_cmd()
527 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response() local
531 if (unlikely(cmd == queue->cmd)) { in nvmet_tcp_queue_response()
540 if (queue->rcv_state == NVMET_TCP_RECV_PDU && in nvmet_tcp_queue_response()
546 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
547 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
560 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
564 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), in nvmet_try_send_data_pdu()
583 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data() local
591 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
593 queue->data_digest || !queue->nvme_sq.sqhd_disabled) in nvmet_try_send_data()
596 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, in nvmet_try_send_data()
611 if (queue->data_digest) { in nvmet_try_send_data()
615 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_data()
616 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
623 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_data()
635 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
640 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
645 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), in nvmet_try_send_response()
657 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
664 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
669 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
674 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), in nvmet_try_send_r2t()
684 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
690 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst() local
699 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
704 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvmet_try_send_ddgst()
714 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_ddgst()
715 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
723 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_send_one() argument
726 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one()
729 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
730 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
772 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_send() argument
778 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); in nvmet_tcp_try_send()
780 nvmet_tcp_socket_error(queue, ret); in nvmet_tcp_try_send()
791 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) in nvmet_prepare_receive_pdu() argument
793 queue->offset = 0; in nvmet_prepare_receive_pdu()
794 queue->left = sizeof(struct nvme_tcp_hdr); in nvmet_prepare_receive_pdu()
795 queue->cmd = NULL; in nvmet_prepare_receive_pdu()
796 queue->rcv_state = NVMET_TCP_RECV_PDU; in nvmet_prepare_receive_pdu()
799 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_crypto() argument
801 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvmet_tcp_free_crypto()
803 ahash_request_free(queue->rcv_hash); in nvmet_tcp_free_crypto()
804 ahash_request_free(queue->snd_hash); in nvmet_tcp_free_crypto()
808 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) in nvmet_tcp_alloc_crypto() argument
816 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvmet_tcp_alloc_crypto()
817 if (!queue->snd_hash) in nvmet_tcp_alloc_crypto()
819 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvmet_tcp_alloc_crypto()
821 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvmet_tcp_alloc_crypto()
822 if (!queue->rcv_hash) in nvmet_tcp_alloc_crypto()
824 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvmet_tcp_alloc_crypto()
828 ahash_request_free(queue->snd_hash); in nvmet_tcp_alloc_crypto()
835 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) in nvmet_tcp_handle_icreq() argument
837 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; in nvmet_tcp_handle_icreq()
838 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; in nvmet_tcp_handle_icreq()
846 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_icreq()
850 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); in nvmet_tcp_handle_icreq()
855 pr_err("queue %d: unsupported hpda %d\n", queue->idx, in nvmet_tcp_handle_icreq()
860 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvmet_tcp_handle_icreq()
861 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvmet_tcp_handle_icreq()
862 if (queue->hdr_digest || queue->data_digest) { in nvmet_tcp_handle_icreq()
863 ret = nvmet_tcp_alloc_crypto(queue); in nvmet_tcp_handle_icreq()
876 if (queue->hdr_digest) in nvmet_tcp_handle_icreq()
878 if (queue->data_digest) in nvmet_tcp_handle_icreq()
883 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvmet_tcp_handle_icreq()
887 queue->state = NVMET_TCP_Q_LIVE; in nvmet_tcp_handle_icreq()
888 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_handle_icreq()
891 if (queue->hdr_digest || queue->data_digest) in nvmet_tcp_handle_icreq()
892 nvmet_tcp_free_crypto(queue); in nvmet_tcp_handle_icreq()
896 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, in nvmet_tcp_handle_req_failure() argument
904 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_handle_req_failure()
910 pr_err("queue %d: failed to map data\n", queue->idx); in nvmet_tcp_handle_req_failure()
911 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_req_failure()
915 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_handle_req_failure()
920 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_handle_h2c_data_pdu() argument
922 struct nvme_tcp_data_pdu *data = &queue->pdu.data; in nvmet_tcp_handle_h2c_data_pdu()
925 if (likely(queue->nr_cmds)) { in nvmet_tcp_handle_h2c_data_pdu()
926 if (unlikely(data->ttag >= queue->nr_cmds)) { in nvmet_tcp_handle_h2c_data_pdu()
928 queue->idx, data->ttag, queue->nr_cmds); in nvmet_tcp_handle_h2c_data_pdu()
929 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_h2c_data_pdu()
932 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
934 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
950 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
951 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_handle_h2c_data_pdu()
956 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_done_recv_pdu() argument
958 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
959 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
963 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { in nvmet_tcp_done_recv_pdu()
967 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
970 return nvmet_tcp_handle_icreq(queue); in nvmet_tcp_done_recv_pdu()
974 ret = nvmet_tcp_handle_h2c_data_pdu(queue); in nvmet_tcp_done_recv_pdu()
980 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
981 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
984 queue->idx, queue->nr_cmds, queue->send_list_len, in nvmet_tcp_done_recv_pdu()
986 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
990 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
993 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, in nvmet_tcp_done_recv_pdu()
994 &queue->nvme_sq, &nvmet_tcp_ops))) { in nvmet_tcp_done_recv_pdu()
1000 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1004 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1006 pr_err("queue %d: failed to map data\n", queue->idx); in nvmet_tcp_done_recv_pdu()
1007 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1008 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1015 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1016 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1017 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_done_recv_pdu()
1018 nvmet_tcp_map_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1022 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1026 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1028 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_done_recv_pdu()
1060 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_pdu() argument
1062 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1068 iov.iov_base = (void *)&queue->pdu + queue->offset; in nvmet_tcp_try_recv_pdu()
1069 iov.iov_len = queue->left; in nvmet_tcp_try_recv_pdu()
1070 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_recv_pdu()
1075 queue->offset += len; in nvmet_tcp_try_recv_pdu()
1076 queue->left -= len; in nvmet_tcp_try_recv_pdu()
1077 if (queue->left) in nvmet_tcp_try_recv_pdu()
1080 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { in nvmet_tcp_try_recv_pdu()
1081 u8 hdgst = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_try_recv_pdu()
1085 nvmet_tcp_fatal_error(queue); in nvmet_tcp_try_recv_pdu()
1094 queue->left = hdr->hlen - queue->offset + hdgst; in nvmet_tcp_try_recv_pdu()
1098 if (queue->hdr_digest && in nvmet_tcp_try_recv_pdu()
1099 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { in nvmet_tcp_try_recv_pdu()
1100 nvmet_tcp_fatal_error(queue); /* fatal */ in nvmet_tcp_try_recv_pdu()
1104 if (queue->data_digest && in nvmet_tcp_try_recv_pdu()
1105 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { in nvmet_tcp_try_recv_pdu()
1106 nvmet_tcp_fatal_error(queue); /* fatal */ in nvmet_tcp_try_recv_pdu()
1110 return nvmet_tcp_done_recv_pdu(queue); in nvmet_tcp_try_recv_pdu()
1115 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst() local
1117 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1118 queue->offset = 0; in nvmet_tcp_prep_recv_ddgst()
1119 queue->left = NVME_TCP_DIGEST_LENGTH; in nvmet_tcp_prep_recv_ddgst()
1120 queue->rcv_state = NVMET_TCP_RECV_DDGST; in nvmet_tcp_prep_recv_ddgst()
1123 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_data() argument
1125 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data()
1129 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1139 if (queue->data_digest) { in nvmet_tcp_try_recv_data()
1147 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_try_recv_data()
1151 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_ddgst() argument
1153 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst()
1157 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1158 .iov_len = queue->left in nvmet_tcp_try_recv_ddgst()
1161 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_recv_ddgst()
1166 queue->offset += ret; in nvmet_tcp_try_recv_ddgst()
1167 queue->left -= ret; in nvmet_tcp_try_recv_ddgst()
1168 if (queue->left) in nvmet_tcp_try_recv_ddgst()
1171 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1173 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1174 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1177 nvmet_tcp_fatal_error(queue); in nvmet_tcp_try_recv_ddgst()
1187 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_try_recv_ddgst()
1191 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_one() argument
1195 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) in nvmet_tcp_try_recv_one()
1198 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { in nvmet_tcp_try_recv_one()
1199 result = nvmet_tcp_try_recv_pdu(queue); in nvmet_tcp_try_recv_one()
1204 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { in nvmet_tcp_try_recv_one()
1205 result = nvmet_tcp_try_recv_data(queue); in nvmet_tcp_try_recv_one()
1210 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { in nvmet_tcp_try_recv_one()
1211 result = nvmet_tcp_try_recv_ddgst(queue); in nvmet_tcp_try_recv_one()
1225 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_recv() argument
1231 ret = nvmet_tcp_try_recv_one(queue); in nvmet_tcp_try_recv()
1233 nvmet_tcp_socket_error(queue, ret); in nvmet_tcp_try_recv()
1244 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) in nvmet_tcp_schedule_release_queue() argument
1246 spin_lock(&queue->state_lock); in nvmet_tcp_schedule_release_queue()
1247 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_schedule_release_queue()
1248 queue->state = NVMET_TCP_Q_DISCONNECTING; in nvmet_tcp_schedule_release_queue()
1249 schedule_work(&queue->release_work); in nvmet_tcp_schedule_release_queue()
1251 spin_unlock(&queue->state_lock); in nvmet_tcp_schedule_release_queue()
1256 struct nvmet_tcp_queue *queue = in nvmet_tcp_io_work() local
1264 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); in nvmet_tcp_io_work()
1270 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); in nvmet_tcp_io_work()
1282 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_io_work()
1285 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, in nvmet_tcp_alloc_cmd() argument
1288 u8 hdgst = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_alloc_cmd()
1290 c->queue = queue; in nvmet_tcp_alloc_cmd()
1291 c->req.port = queue->port->nport; in nvmet_tcp_alloc_cmd()
1293 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1299 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1305 c->data_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1310 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1317 list_add_tail(&c->entry, &queue->free_list); in nvmet_tcp_alloc_cmd()
1337 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_alloc_cmds() argument
1340 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; in nvmet_tcp_alloc_cmds()
1347 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); in nvmet_tcp_alloc_cmds()
1352 queue->cmds = cmds; in nvmet_tcp_alloc_cmds()
1363 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_cmds() argument
1365 struct nvmet_tcp_cmd *cmds = queue->cmds; in nvmet_tcp_free_cmds()
1368 for (i = 0; i < queue->nr_cmds; i++) in nvmet_tcp_free_cmds()
1371 nvmet_tcp_free_cmd(&queue->connect); in nvmet_tcp_free_cmds()
1375 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) in nvmet_tcp_restore_socket_callbacks() argument
1377 struct socket *sock = queue->sock; in nvmet_tcp_restore_socket_callbacks()
1380 sock->sk->sk_data_ready = queue->data_ready; in nvmet_tcp_restore_socket_callbacks()
1381 sock->sk->sk_state_change = queue->state_change; in nvmet_tcp_restore_socket_callbacks()
1382 sock->sk->sk_write_space = queue->write_space; in nvmet_tcp_restore_socket_callbacks()
1395 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_uninit_data_in_cmds() argument
1397 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds()
1400 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1405 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { in nvmet_tcp_uninit_data_in_cmds()
1407 nvmet_tcp_finish_cmd(&queue->connect); in nvmet_tcp_uninit_data_in_cmds()
1414 struct nvmet_tcp_queue *queue = in nvmet_tcp_release_queue_work() local
1418 list_del_init(&queue->queue_list); in nvmet_tcp_release_queue_work()
1421 nvmet_tcp_restore_socket_callbacks(queue); in nvmet_tcp_release_queue_work()
1422 flush_work(&queue->io_work); in nvmet_tcp_release_queue_work()
1424 nvmet_tcp_uninit_data_in_cmds(queue); in nvmet_tcp_release_queue_work()
1425 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_tcp_release_queue_work()
1426 cancel_work_sync(&queue->io_work); in nvmet_tcp_release_queue_work()
1427 sock_release(queue->sock); in nvmet_tcp_release_queue_work()
1428 nvmet_tcp_free_cmds(queue); in nvmet_tcp_release_queue_work()
1429 if (queue->hdr_digest || queue->data_digest) in nvmet_tcp_release_queue_work()
1430 nvmet_tcp_free_crypto(queue); in nvmet_tcp_release_queue_work()
1431 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); in nvmet_tcp_release_queue_work()
1433 page = virt_to_head_page(queue->pf_cache.va); in nvmet_tcp_release_queue_work()
1434 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); in nvmet_tcp_release_queue_work()
1435 kfree(queue); in nvmet_tcp_release_queue_work()
1440 struct nvmet_tcp_queue *queue; in nvmet_tcp_data_ready() local
1443 queue = sk->sk_user_data; in nvmet_tcp_data_ready()
1444 if (likely(queue)) in nvmet_tcp_data_ready()
1445 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_data_ready()
1451 struct nvmet_tcp_queue *queue; in nvmet_tcp_write_space() local
1454 queue = sk->sk_user_data; in nvmet_tcp_write_space()
1455 if (unlikely(!queue)) in nvmet_tcp_write_space()
1458 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { in nvmet_tcp_write_space()
1459 queue->write_space(sk); in nvmet_tcp_write_space()
1465 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_write_space()
1473 struct nvmet_tcp_queue *queue; in nvmet_tcp_state_change() local
1476 queue = sk->sk_user_data; in nvmet_tcp_state_change()
1477 if (!queue) in nvmet_tcp_state_change()
1488 nvmet_tcp_schedule_release_queue(queue); in nvmet_tcp_state_change()
1492 queue->idx, sk->sk_state); in nvmet_tcp_state_change()
1498 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) in nvmet_tcp_set_queue_sock() argument
1500 struct socket *sock = queue->sock; in nvmet_tcp_set_queue_sock()
1505 (struct sockaddr *)&queue->sockaddr); in nvmet_tcp_set_queue_sock()
1510 (struct sockaddr *)&queue->sockaddr_peer); in nvmet_tcp_set_queue_sock()
1537 sock->sk->sk_user_data = queue; in nvmet_tcp_set_queue_sock()
1538 queue->data_ready = sock->sk->sk_data_ready; in nvmet_tcp_set_queue_sock()
1540 queue->state_change = sock->sk->sk_state_change; in nvmet_tcp_set_queue_sock()
1542 queue->write_space = sock->sk->sk_write_space; in nvmet_tcp_set_queue_sock()
1544 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_set_queue_sock()
1554 struct nvmet_tcp_queue *queue; in nvmet_tcp_alloc_queue() local
1557 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_tcp_alloc_queue()
1558 if (!queue) in nvmet_tcp_alloc_queue()
1561 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); in nvmet_tcp_alloc_queue()
1562 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); in nvmet_tcp_alloc_queue()
1563 queue->sock = newsock; in nvmet_tcp_alloc_queue()
1564 queue->port = port; in nvmet_tcp_alloc_queue()
1565 queue->nr_cmds = 0; in nvmet_tcp_alloc_queue()
1566 spin_lock_init(&queue->state_lock); in nvmet_tcp_alloc_queue()
1567 queue->state = NVMET_TCP_Q_CONNECTING; in nvmet_tcp_alloc_queue()
1568 INIT_LIST_HEAD(&queue->free_list); in nvmet_tcp_alloc_queue()
1569 init_llist_head(&queue->resp_list); in nvmet_tcp_alloc_queue()
1570 INIT_LIST_HEAD(&queue->resp_send_list); in nvmet_tcp_alloc_queue()
1572 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); in nvmet_tcp_alloc_queue()
1573 if (queue->idx < 0) { in nvmet_tcp_alloc_queue()
1574 ret = queue->idx; in nvmet_tcp_alloc_queue()
1578 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); in nvmet_tcp_alloc_queue()
1582 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_tcp_alloc_queue()
1586 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_alloc_queue()
1589 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); in nvmet_tcp_alloc_queue()
1592 ret = nvmet_tcp_set_queue_sock(queue); in nvmet_tcp_alloc_queue()
1599 list_del_init(&queue->queue_list); in nvmet_tcp_alloc_queue()
1601 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_tcp_alloc_queue()
1603 nvmet_tcp_free_cmd(&queue->connect); in nvmet_tcp_alloc_queue()
1605 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); in nvmet_tcp_alloc_queue()
1607 kfree(queue); in nvmet_tcp_alloc_queue()
1728 struct nvmet_tcp_queue *queue; in nvmet_tcp_destroy_port_queues() local
1731 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_destroy_port_queues()
1732 if (queue->port == port) in nvmet_tcp_destroy_port_queues()
1733 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_destroy_port_queues()
1758 struct nvmet_tcp_queue *queue; in nvmet_tcp_delete_ctrl() local
1761 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_delete_ctrl()
1762 if (queue->nvme_sq.ctrl == ctrl) in nvmet_tcp_delete_ctrl()
1763 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_delete_ctrl()
1769 struct nvmet_tcp_queue *queue = in nvmet_tcp_install_queue() local
1777 queue->nr_cmds = sq->size * 2; in nvmet_tcp_install_queue()
1778 if (nvmet_tcp_alloc_cmds(queue)) in nvmet_tcp_install_queue()
1791 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr() local
1793 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); in nvmet_tcp_disc_port_addr()
1832 struct nvmet_tcp_queue *queue; in nvmet_tcp_exit() local
1838 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_exit()
1839 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_exit()