• Home
  • Raw
  • Download

Lines Matching full:queue

82 	struct nvme_tcp_queue	*queue;  member
180 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
187 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
189 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
192 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
194 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
197 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
198 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
201 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument
203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
206 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_ddgst_len() argument
208 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
227 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
232 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
321 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) in nvme_tcp_send_all() argument
325 /* drain the send queue as much as we can... */ in nvme_tcp_send_all()
327 ret = nvme_tcp_try_send(queue); in nvme_tcp_send_all()
331 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) in nvme_tcp_queue_more() argument
333 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
334 !llist_empty(&queue->req_list); in nvme_tcp_queue_more()
340 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request() local
343 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
344 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
348 * directly, otherwise queue io_work. Also, only do that if we in nvme_tcp_queue_request()
351 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
352 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
353 nvme_tcp_send_all(queue); in nvme_tcp_queue_request()
354 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
357 if (last && nvme_tcp_queue_more(queue)) in nvme_tcp_queue_request()
358 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
361 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) in nvme_tcp_process_req_list() argument
366 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
368 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
373 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) in nvme_tcp_fetch_request() argument
377 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
380 nvme_tcp_process_req_list(queue); in nvme_tcp_fetch_request()
381 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
419 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, in nvme_tcp_verify_hdgst() argument
427 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
428 "queue %d: header digest flag is cleared\n", in nvme_tcp_verify_hdgst()
429 nvme_tcp_queue_id(queue)); in nvme_tcp_verify_hdgst()
434 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
437 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
446 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) in nvme_tcp_check_ddgst() argument
449 u8 digest_len = nvme_tcp_hdgst_len(queue); in nvme_tcp_check_ddgst()
456 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
457 "queue %d: data digest flag is cleared\n", in nvme_tcp_check_ddgst()
458 nvme_tcp_queue_id(queue)); in nvme_tcp_check_ddgst()
461 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request() local
483 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_init_request()
485 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
492 req->queue = queue; in nvme_tcp_init_request()
503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() local
505 hctx->driver_data = queue; in nvme_tcp_init_hctx()
513 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx() local
515 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
520 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) in nvme_tcp_recv_state() argument
522 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
523 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
527 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) in nvme_tcp_init_recv_ctx() argument
529 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
530 nvme_tcp_hdgst_len(queue); in nvme_tcp_init_recv_ctx()
531 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
532 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
533 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
545 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, in nvme_tcp_process_nvme_cqe() argument
551 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
553 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
554 "got bad cqe.command_id %#x on queue %d\n", in nvme_tcp_process_nvme_cqe()
555 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
556 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
566 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
571 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_data() argument
576 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
578 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
579 "got bad c2hdata.command_id %#x on queue %d\n", in nvme_tcp_handle_c2h_data()
580 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
585 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
586 "queue %d tag %#x unexpected data\n", in nvme_tcp_handle_c2h_data()
587 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
591 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
595 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
596 "queue %d tag %#x SUCCESS set but not last PDU\n", in nvme_tcp_handle_c2h_data()
597 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
598 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
605 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, in nvme_tcp_handle_comp() argument
613 * survive any kind of queue freeze and often don't respond to in nvme_tcp_handle_comp()
617 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), in nvme_tcp_handle_comp()
619 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
622 ret = nvme_tcp_process_nvme_cqe(queue, cqe); in nvme_tcp_handle_comp()
630 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu() local
633 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
634 u8 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
638 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); in nvme_tcp_setup_h2c_data_pdu()
647 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
649 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
661 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, in nvme_tcp_handle_r2t() argument
669 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
671 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
672 "got bad r2t.command_id %#x on queue %d\n", in nvme_tcp_handle_r2t()
673 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
679 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
686 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
693 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
710 static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_term() argument
728 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
740 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
744 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_pdu() argument
748 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
749 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
753 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
757 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
758 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
761 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
764 hdr = queue->pdu; in nvme_tcp_recv_pdu()
770 nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
774 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
775 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
781 if (queue->data_digest) { in nvme_tcp_recv_pdu()
782 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
789 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
791 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
792 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
794 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
795 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
797 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
811 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_data() argument
814 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
816 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
822 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
834 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
835 "queue %d no space in request %#x", in nvme_tcp_recv_data()
836 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
837 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
847 if (queue->data_digest) in nvme_tcp_recv_data()
849 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
854 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
855 "queue %d failed to copy request %#x data", in nvme_tcp_recv_data()
856 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
862 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
865 if (!queue->data_remaining) { in nvme_tcp_recv_data()
866 if (queue->data_digest) { in nvme_tcp_recv_data()
867 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
868 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
873 queue->nr_cqe++; in nvme_tcp_recv_data()
875 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
882 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, in nvme_tcp_recv_ddgst() argument
885 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
886 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
887 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
888 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
895 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
898 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
901 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
902 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
908 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
910 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
911 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
915 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
920 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
923 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_ddgst()
930 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb() local
934 if (unlikely(!queue->rd_enabled)) in nvme_tcp_recv_skb()
938 switch (nvme_tcp_recv_state(queue)) { in nvme_tcp_recv_skb()
940 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
943 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
946 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
952 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
954 queue->rd_enabled = false; in nvme_tcp_recv_skb()
955 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
965 struct nvme_tcp_queue *queue; in nvme_tcp_data_ready() local
970 queue = sk->sk_user_data; in nvme_tcp_data_ready()
971 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
972 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
973 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
979 struct nvme_tcp_queue *queue; in nvme_tcp_write_space() local
982 queue = sk->sk_user_data; in nvme_tcp_write_space()
983 if (likely(queue && sk_stream_is_writeable(sk))) { in nvme_tcp_write_space()
985 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
992 struct nvme_tcp_queue *queue; in nvme_tcp_state_change() local
995 queue = sk->sk_user_data; in nvme_tcp_state_change()
996 if (!queue) in nvme_tcp_state_change()
1005 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
1008 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
1009 "queue %d socket state %d\n", in nvme_tcp_state_change()
1010 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
1013 queue->state_change(sk); in nvme_tcp_state_change()
1018 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) in nvme_tcp_done_send_req() argument
1020 queue->request = NULL; in nvme_tcp_done_send_req()
1028 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
1038 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data() local
1054 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
1064 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data()
1068 if (queue->data_digest) in nvme_tcp_try_send_data()
1069 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
1082 if (queue->data_digest) { in nvme_tcp_try_send_data()
1083 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
1091 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_data()
1101 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu() local
1106 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_cmd_pdu()
1110 if (inline_data || nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_cmd_pdu()
1115 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1116 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1120 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_cmd_pdu()
1128 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1129 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1131 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_cmd_pdu()
1142 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu() local
1146 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_data_pdu()
1150 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1151 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1158 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data_pdu()
1165 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1166 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1176 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst() local
1186 if (nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_ddgst()
1191 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1199 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_ddgst()
1207 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) in nvme_tcp_try_send() argument
1213 if (!queue->request) { in nvme_tcp_try_send()
1214 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1215 if (!queue->request) in nvme_tcp_try_send()
1218 req = queue->request; in nvme_tcp_try_send()
1247 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1249 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1250 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send()
1257 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) in nvme_tcp_try_recv() argument
1259 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1264 rd_desc.arg.data = queue; in nvme_tcp_try_recv()
1267 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1275 struct nvme_tcp_queue *queue = in nvme_tcp_io_work() local
1283 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1284 result = nvme_tcp_try_send(queue); in nvme_tcp_io_work()
1285 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1292 result = nvme_tcp_try_recv(queue); in nvme_tcp_io_work()
1298 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1303 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1306 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_free_crypto() argument
1308 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1310 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1311 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1315 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_alloc_crypto() argument
1323 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1324 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1326 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1328 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1329 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1331 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1335 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1350 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req() local
1352 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_async_req()
1354 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1360 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1368 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue() local
1371 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1374 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1375 nvme_tcp_free_crypto(queue); in nvme_tcp_free_queue()
1377 if (queue->pf_cache.va) { in nvme_tcp_free_queue()
1378 page = virt_to_head_page(queue->pf_cache.va); in nvme_tcp_free_queue()
1379 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); in nvme_tcp_free_queue()
1380 queue->pf_cache.va = NULL; in nvme_tcp_free_queue()
1384 sock_release(queue->sock); in nvme_tcp_free_queue()
1387 kfree(queue->pdu); in nvme_tcp_free_queue()
1388 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1389 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1392 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) in nvme_tcp_init_connection() argument
1419 if (queue->hdr_digest) in nvme_tcp_init_connection()
1421 if (queue->data_digest) in nvme_tcp_init_connection()
1426 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1433 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1440 pr_err("queue %d: bad type returned %d\n", in nvme_tcp_init_connection()
1441 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1446 pr_err("queue %d: bad pdu length returned %d\n", in nvme_tcp_init_connection()
1447 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1452 pr_err("queue %d: bad pfv returned %d\n", in nvme_tcp_init_connection()
1453 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1458 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1459 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1460 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1461 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1462 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1468 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1469 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1470 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1471 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1472 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1478 pr_err("queue %d: unsupported cpda returned %d\n", in nvme_tcp_init_connection()
1479 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1485 pr_err("queue %d: invalid maxh2cdata returned %u\n", in nvme_tcp_init_connection()
1486 nvme_tcp_queue_id(queue), maxh2cdata); in nvme_tcp_init_connection()
1489 queue->maxh2cdata = maxh2cdata; in nvme_tcp_init_connection()
1499 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) in nvme_tcp_admin_queue() argument
1501 return nvme_tcp_queue_id(queue) == 0; in nvme_tcp_admin_queue()
1504 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) in nvme_tcp_default_queue() argument
1506 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue()
1507 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_default_queue()
1509 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_default_queue()
1513 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) in nvme_tcp_read_queue() argument
1515 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue()
1516 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_read_queue()
1518 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_read_queue()
1519 !nvme_tcp_default_queue(queue) && in nvme_tcp_read_queue()
1524 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) in nvme_tcp_poll_queue() argument
1526 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue()
1527 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_poll_queue()
1529 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_poll_queue()
1530 !nvme_tcp_default_queue(queue) && in nvme_tcp_poll_queue()
1531 !nvme_tcp_read_queue(queue) && in nvme_tcp_poll_queue()
1537 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) in nvme_tcp_set_queue_io_cpu() argument
1539 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu()
1540 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_set_queue_io_cpu()
1543 if (nvme_tcp_default_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1545 else if (nvme_tcp_read_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1547 else if (nvme_tcp_poll_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1550 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1556 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue() local
1559 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1560 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1561 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1562 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1563 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1564 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1567 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1569 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1573 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1580 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1583 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1586 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1589 * Cleanup whatever is sitting in the TCP transmit queue on socket in nvme_tcp_alloc_queue()
1593 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1596 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1600 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1603 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1605 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1606 queue->sock->sk->sk_use_task_frag = false; in nvme_tcp_alloc_queue()
1607 nvme_tcp_set_queue_io_cpu(queue); in nvme_tcp_alloc_queue()
1608 queue->request = NULL; in nvme_tcp_alloc_queue()
1609 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1610 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1611 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1612 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1613 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1616 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1620 "failed to bind queue %d socket %d\n", in nvme_tcp_alloc_queue()
1630 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1634 "failed to bind to interface %s queue %d err %d\n", in nvme_tcp_alloc_queue()
1640 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1641 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1642 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1643 ret = nvme_tcp_alloc_crypto(queue); in nvme_tcp_alloc_queue()
1646 "failed to allocate queue %d crypto\n", qid); in nvme_tcp_alloc_queue()
1652 nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_queue()
1653 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1654 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1659 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1660 nvme_tcp_queue_id(queue)); in nvme_tcp_alloc_queue()
1662 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1670 ret = nvme_tcp_init_connection(queue); in nvme_tcp_alloc_queue()
1674 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1679 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1681 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1683 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1684 nvme_tcp_free_crypto(queue); in nvme_tcp_alloc_queue()
1686 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1687 queue->sock = NULL; in nvme_tcp_alloc_queue()
1689 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1690 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1694 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue) in nvme_tcp_restore_sock_ops() argument
1696 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1700 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1701 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1702 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1706 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) in __nvme_tcp_stop_queue() argument
1708 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1709 nvme_tcp_restore_sock_ops(queue); in __nvme_tcp_stop_queue()
1710 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1716 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue_nowait() local
1718 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1721 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1722 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1723 __nvme_tcp_stop_queue(queue); in nvme_tcp_stop_queue_nowait()
1724 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1730 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_wait_queue() local
1734 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || in nvme_tcp_wait_queue()
1735 !sk_wmem_alloc_get(queue->sock->sk)) in nvme_tcp_wait_queue()
1752 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) in nvme_tcp_setup_sock_ops() argument
1754 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1755 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1756 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1757 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1758 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
1759 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
1760 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
1761 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
1763 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
1765 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1771 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue() local
1774 queue->rd_enabled = true; in nvme_tcp_start_queue()
1775 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_start_queue()
1776 nvme_tcp_setup_sock_ops(queue); in nvme_tcp_start_queue()
1784 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
1786 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
1787 __nvme_tcp_stop_queue(queue); in nvme_tcp_start_queue()
1789 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_tcp_start_queue()
1933 * queue number might have changed. in nvme_tcp_configure_io_queues()
2283 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, in nvme_tcp_set_sg_inline() argument
2288 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2307 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event() local
2310 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_submit_async_event()
2314 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2335 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out()
2337 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2344 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout()
2347 int qid = nvme_tcp_queue_id(req->queue); in nvme_tcp_timeout()
2350 "queue %d: timeout cid %#x type %d opcode %#x (%s)\n", in nvme_tcp_timeout()
2351 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type, in nvme_tcp_timeout()
2380 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, in nvme_tcp_map_data() argument
2393 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2405 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu() local
2406 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; in nvme_tcp_setup_cmd_pdu()
2432 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2434 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2436 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_cmd_pdu()
2443 ret = nvme_tcp_map_data(queue, rq); in nvme_tcp_setup_cmd_pdu()
2446 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2456 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() local
2458 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2459 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2465 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2466 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() local
2469 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2472 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2473 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2495 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll() local
2496 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2499 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2502 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2505 ret = nvme_tcp_try_recv(queue); in nvme_tcp_poll()
2506 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2507 return ret < 0 ? ret : queue->nr_cqe; in nvme_tcp_poll()
2512 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; in nvme_tcp_get_address() local
2518 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_get_address()
2521 mutex_lock(&queue->queue_lock); in nvme_tcp_get_address()
2523 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); in nvme_tcp_get_address()
2531 mutex_unlock(&queue->queue_lock); in nvme_tcp_get_address()