1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic iSCSI Offload Driver
4 * Copyright (c) 2016 Cavium Inc.
5 */
6
7 #include <linux/blkdev.h>
8 #include <scsi/scsi_tcq.h>
9 #include <linux/delay.h>
10
11 #include "qedi.h"
12 #include "qedi_iscsi.h"
13 #include "qedi_gbl.h"
14 #include "qedi_fw_iscsi.h"
15 #include "qedi_fw_scsi.h"
16
17 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
18 struct iscsi_task *mtask);
19
qedi_iscsi_unmap_sg_list(struct qedi_cmd * cmd)20 void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd)
21 {
22 struct scsi_cmnd *sc = cmd->scsi_cmd;
23
24 if (cmd->io_tbl.sge_valid && sc) {
25 cmd->io_tbl.sge_valid = 0;
26 scsi_dma_unmap(sc);
27 }
28 }
29
qedi_process_logout_resp(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn)30 static void qedi_process_logout_resp(struct qedi_ctx *qedi,
31 union iscsi_cqe *cqe,
32 struct iscsi_task *task,
33 struct qedi_conn *qedi_conn)
34 {
35 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
36 struct iscsi_logout_rsp *resp_hdr;
37 struct iscsi_session *session = conn->session;
38 struct iscsi_logout_response_hdr *cqe_logout_response;
39 struct qedi_cmd *cmd;
40
41 cmd = (struct qedi_cmd *)task->dd_data;
42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
43 spin_lock(&session->back_lock);
44 resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr;
45 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
46 resp_hdr->opcode = cqe_logout_response->opcode;
47 resp_hdr->flags = cqe_logout_response->flags;
48 resp_hdr->hlength = 0;
49
50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
51 resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn);
52 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
53 resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
54
55 resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
56 resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
57
58 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
59 "Freeing tid=0x%x for cid=0x%x\n",
60 cmd->task_id, qedi_conn->iscsi_conn_id);
61
62 spin_lock(&qedi_conn->list_lock);
63 if (likely(cmd->io_cmd_in_list)) {
64 cmd->io_cmd_in_list = false;
65 list_del_init(&cmd->io_cmd);
66 qedi_conn->active_cmd_count--;
67 } else {
68 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
69 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
70 cmd->task_id, qedi_conn->iscsi_conn_id,
71 &cmd->io_cmd);
72 }
73 spin_unlock(&qedi_conn->list_lock);
74
75 cmd->state = RESPONSE_RECEIVED;
76 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
77
78 spin_unlock(&session->back_lock);
79 }
80
qedi_process_text_resp(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn)81 static void qedi_process_text_resp(struct qedi_ctx *qedi,
82 union iscsi_cqe *cqe,
83 struct iscsi_task *task,
84 struct qedi_conn *qedi_conn)
85 {
86 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
87 struct iscsi_session *session = conn->session;
88 struct e4_iscsi_task_context *task_ctx;
89 struct iscsi_text_rsp *resp_hdr_ptr;
90 struct iscsi_text_response_hdr *cqe_text_response;
91 struct qedi_cmd *cmd;
92 int pld_len;
93
94 cmd = (struct qedi_cmd *)task->dd_data;
95 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
96
97 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
98 spin_lock(&session->back_lock);
99 resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr;
100 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr));
101 resp_hdr_ptr->opcode = cqe_text_response->opcode;
102 resp_hdr_ptr->flags = cqe_text_response->flags;
103 resp_hdr_ptr->hlength = 0;
104
105 hton24(resp_hdr_ptr->dlength,
106 (cqe_text_response->hdr_second_dword &
107 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
108
109 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
110 conn->session->age);
111 resp_hdr_ptr->ttt = cqe_text_response->ttt;
112 resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn);
113 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn);
114 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn);
115
116 pld_len = cqe_text_response->hdr_second_dword &
117 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK;
118 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
119
120 memset(task_ctx, '\0', sizeof(*task_ctx));
121
122 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
123 "Freeing tid=0x%x for cid=0x%x\n",
124 cmd->task_id, qedi_conn->iscsi_conn_id);
125
126 spin_lock(&qedi_conn->list_lock);
127 if (likely(cmd->io_cmd_in_list)) {
128 cmd->io_cmd_in_list = false;
129 list_del_init(&cmd->io_cmd);
130 qedi_conn->active_cmd_count--;
131 } else {
132 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
133 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n",
134 cmd->task_id, qedi_conn->iscsi_conn_id,
135 &cmd->io_cmd);
136 }
137 spin_unlock(&qedi_conn->list_lock);
138
139 cmd->state = RESPONSE_RECEIVED;
140
141 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
142 qedi_conn->gen_pdu.resp_buf,
143 (qedi_conn->gen_pdu.resp_wr_ptr -
144 qedi_conn->gen_pdu.resp_buf));
145 spin_unlock(&session->back_lock);
146 }
147
qedi_tmf_resp_work(struct work_struct * work)148 static void qedi_tmf_resp_work(struct work_struct *work)
149 {
150 struct qedi_cmd *qedi_cmd =
151 container_of(work, struct qedi_cmd, tmf_work);
152 struct qedi_conn *qedi_conn = qedi_cmd->conn;
153 struct qedi_ctx *qedi = qedi_conn->qedi;
154 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
155 struct iscsi_session *session = conn->session;
156 struct iscsi_tm_rsp *resp_hdr_ptr;
157 int rval = 0;
158
159 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
160 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
161
162 rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
163 if (rval)
164 goto exit_tmf_resp;
165
166 spin_lock(&session->back_lock);
167 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
168 spin_unlock(&session->back_lock);
169
170 exit_tmf_resp:
171 kfree(resp_hdr_ptr);
172 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
173 }
174
qedi_process_tmf_resp(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn)175 static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
176 union iscsi_cqe *cqe,
177 struct iscsi_task *task,
178 struct qedi_conn *qedi_conn)
179
180 {
181 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
182 struct iscsi_session *session = conn->session;
183 struct iscsi_tmf_response_hdr *cqe_tmp_response;
184 struct iscsi_tm_rsp *resp_hdr_ptr;
185 struct iscsi_tm *tmf_hdr;
186 struct qedi_cmd *qedi_cmd = NULL;
187
188 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
189
190 qedi_cmd = task->dd_data;
191 qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
192 if (!qedi_cmd->tmf_resp_buf) {
193 QEDI_ERR(&qedi->dbg_ctx,
194 "Failed to allocate resp buf, cid=0x%x\n",
195 qedi_conn->iscsi_conn_id);
196 return;
197 }
198
199 spin_lock(&session->back_lock);
200 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf;
201 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp));
202
203 /* Fill up the header */
204 resp_hdr_ptr->opcode = cqe_tmp_response->opcode;
205 resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags;
206 resp_hdr_ptr->response = cqe_tmp_response->hdr_response;
207 resp_hdr_ptr->hlength = 0;
208
209 hton24(resp_hdr_ptr->dlength,
210 (cqe_tmp_response->hdr_second_dword &
211 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
212 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
213 conn->session->age);
214 resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
215 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn);
216 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn);
217
218 tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
219
220 spin_lock(&qedi_conn->list_lock);
221 if (likely(qedi_cmd->io_cmd_in_list)) {
222 qedi_cmd->io_cmd_in_list = false;
223 list_del_init(&qedi_cmd->io_cmd);
224 qedi_conn->active_cmd_count--;
225 }
226 spin_unlock(&qedi_conn->list_lock);
227
228 if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
229 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
230 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
231 ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
232 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
233 ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
234 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work);
235 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
236 goto unblock_sess;
237 }
238
239 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
240 kfree(resp_hdr_ptr);
241
242 unblock_sess:
243 spin_unlock(&session->back_lock);
244 }
245
qedi_process_login_resp(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn)246 static void qedi_process_login_resp(struct qedi_ctx *qedi,
247 union iscsi_cqe *cqe,
248 struct iscsi_task *task,
249 struct qedi_conn *qedi_conn)
250 {
251 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
252 struct iscsi_session *session = conn->session;
253 struct e4_iscsi_task_context *task_ctx;
254 struct iscsi_login_rsp *resp_hdr_ptr;
255 struct iscsi_login_response_hdr *cqe_login_response;
256 struct qedi_cmd *cmd;
257 int pld_len;
258
259 cmd = (struct qedi_cmd *)task->dd_data;
260
261 cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
262 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
263
264 spin_lock(&session->back_lock);
265 resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr;
266 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp));
267 resp_hdr_ptr->opcode = cqe_login_response->opcode;
268 resp_hdr_ptr->flags = cqe_login_response->flags_attr;
269 resp_hdr_ptr->hlength = 0;
270
271 hton24(resp_hdr_ptr->dlength,
272 (cqe_login_response->hdr_second_dword &
273 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
274 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
275 conn->session->age);
276 resp_hdr_ptr->tsih = cqe_login_response->tsih;
277 resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn);
278 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn);
279 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn);
280 resp_hdr_ptr->status_class = cqe_login_response->status_class;
281 resp_hdr_ptr->status_detail = cqe_login_response->status_detail;
282 pld_len = cqe_login_response->hdr_second_dword &
283 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
284 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
285
286 spin_lock(&qedi_conn->list_lock);
287 if (likely(cmd->io_cmd_in_list)) {
288 cmd->io_cmd_in_list = false;
289 list_del_init(&cmd->io_cmd);
290 qedi_conn->active_cmd_count--;
291 }
292 spin_unlock(&qedi_conn->list_lock);
293
294 memset(task_ctx, '\0', sizeof(*task_ctx));
295
296 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr,
297 qedi_conn->gen_pdu.resp_buf,
298 (qedi_conn->gen_pdu.resp_wr_ptr -
299 qedi_conn->gen_pdu.resp_buf));
300
301 spin_unlock(&session->back_lock);
302 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
303 "Freeing tid=0x%x for cid=0x%x\n",
304 cmd->task_id, qedi_conn->iscsi_conn_id);
305 cmd->state = RESPONSE_RECEIVED;
306 }
307
qedi_get_rq_bdq_buf(struct qedi_ctx * qedi,struct iscsi_cqe_unsolicited * cqe,char * ptr,int len)308 static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
309 struct iscsi_cqe_unsolicited *cqe,
310 char *ptr, int len)
311 {
312 u16 idx = 0;
313
314 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
315 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n",
316 len, qedi->bdq_prod_idx,
317 (qedi->bdq_prod_idx % qedi->rq_num_entries));
318
319 /* Obtain buffer address from rqe_opaque */
320 idx = cqe->rqe_opaque;
321 if (idx > (QEDI_BDQ_NUM - 1)) {
322 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
323 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
324 idx);
325 return;
326 }
327
328 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
329 "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
330
331 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
332 "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
333 switch (cqe->unsol_cqe_type) {
334 case ISCSI_CQE_UNSOLICITED_SINGLE:
335 case ISCSI_CQE_UNSOLICITED_FIRST:
336 if (len)
337 memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len);
338 break;
339 case ISCSI_CQE_UNSOLICITED_MIDDLE:
340 case ISCSI_CQE_UNSOLICITED_LAST:
341 break;
342 default:
343 break;
344 }
345 }
346
qedi_put_rq_bdq_buf(struct qedi_ctx * qedi,struct iscsi_cqe_unsolicited * cqe,int count)347 static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
348 struct iscsi_cqe_unsolicited *cqe,
349 int count)
350 {
351 u16 idx = 0;
352 struct scsi_bd *pbl;
353
354 /* Obtain buffer address from rqe_opaque */
355 idx = cqe->rqe_opaque;
356 if (idx > (QEDI_BDQ_NUM - 1)) {
357 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
358 "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
359 idx);
360 return;
361 }
362
363 pbl = (struct scsi_bd *)qedi->bdq_pbl;
364 pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries);
365 pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma));
366 pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma));
367 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
368 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
369 pbl, pbl->address.hi, pbl->address.lo, idx);
370 pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
371 pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
372 pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
373 pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
374
375 /* Increment producer to let f/w know we've handled the frame */
376 qedi->bdq_prod_idx += count;
377
378 writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
379 readw(qedi->bdq_primary_prod);
380
381 writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
382 readw(qedi->bdq_secondary_prod);
383 }
384
qedi_unsol_pdu_adjust_bdq(struct qedi_ctx * qedi,struct iscsi_cqe_unsolicited * cqe,u32 pdu_len,u32 num_bdqs,char * bdq_data)385 static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi,
386 struct iscsi_cqe_unsolicited *cqe,
387 u32 pdu_len, u32 num_bdqs,
388 char *bdq_data)
389 {
390 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
391 "num_bdqs [%d]\n", num_bdqs);
392
393 qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
394 qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
395 }
396
qedi_process_nopin_mesg(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn,u16 que_idx)397 static int qedi_process_nopin_mesg(struct qedi_ctx *qedi,
398 union iscsi_cqe *cqe,
399 struct iscsi_task *task,
400 struct qedi_conn *qedi_conn, u16 que_idx)
401 {
402 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
403 struct iscsi_session *session = conn->session;
404 struct iscsi_nop_in_hdr *cqe_nop_in;
405 struct iscsi_nopin *hdr;
406 struct qedi_cmd *cmd;
407 int tgt_async_nop = 0;
408 u32 lun[2];
409 u32 pdu_len, num_bdqs;
410 char bdq_data[QEDI_BDQ_BUF_SIZE];
411 unsigned long flags;
412
413 spin_lock_bh(&session->back_lock);
414 cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
415
416 pdu_len = cqe_nop_in->hdr_second_dword &
417 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK;
418 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
419
420 hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr;
421 memset(hdr, 0, sizeof(struct iscsi_hdr));
422 hdr->opcode = cqe_nop_in->opcode;
423 hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn);
424 hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn);
425 hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn);
426 hdr->ttt = cpu_to_be32(cqe_nop_in->ttt);
427
428 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
429 spin_lock_irqsave(&qedi->hba_lock, flags);
430 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
431 pdu_len, num_bdqs, bdq_data);
432 hdr->itt = RESERVED_ITT;
433 tgt_async_nop = 1;
434 spin_unlock_irqrestore(&qedi->hba_lock, flags);
435 goto done;
436 }
437
438 /* Response to one of our nop-outs */
439 if (task) {
440 cmd = task->dd_data;
441 hdr->flags = ISCSI_FLAG_CMD_FINAL;
442 hdr->itt = build_itt(cqe->cqe_solicited.itid,
443 conn->session->age);
444 lun[0] = 0xffffffff;
445 lun[1] = 0xffffffff;
446 memcpy(&hdr->lun, lun, sizeof(struct scsi_lun));
447 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
448 "Freeing tid=0x%x for cid=0x%x\n",
449 cmd->task_id, qedi_conn->iscsi_conn_id);
450 cmd->state = RESPONSE_RECEIVED;
451 spin_lock(&qedi_conn->list_lock);
452 if (likely(cmd->io_cmd_in_list)) {
453 cmd->io_cmd_in_list = false;
454 list_del_init(&cmd->io_cmd);
455 qedi_conn->active_cmd_count--;
456 }
457
458 spin_unlock(&qedi_conn->list_lock);
459 }
460
461 done:
462 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len);
463
464 spin_unlock_bh(&session->back_lock);
465 return tgt_async_nop;
466 }
467
qedi_process_async_mesg(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn,u16 que_idx)468 static void qedi_process_async_mesg(struct qedi_ctx *qedi,
469 union iscsi_cqe *cqe,
470 struct iscsi_task *task,
471 struct qedi_conn *qedi_conn,
472 u16 que_idx)
473 {
474 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
475 struct iscsi_session *session = conn->session;
476 struct iscsi_async_msg_hdr *cqe_async_msg;
477 struct iscsi_async *resp_hdr;
478 u32 lun[2];
479 u32 pdu_len, num_bdqs;
480 char bdq_data[QEDI_BDQ_BUF_SIZE];
481 unsigned long flags;
482
483 spin_lock_bh(&session->back_lock);
484
485 cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
486 pdu_len = cqe_async_msg->hdr_second_dword &
487 ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK;
488 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE;
489
490 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
491 spin_lock_irqsave(&qedi->hba_lock, flags);
492 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
493 pdu_len, num_bdqs, bdq_data);
494 spin_unlock_irqrestore(&qedi->hba_lock, flags);
495 }
496
497 resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr;
498 memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
499 resp_hdr->opcode = cqe_async_msg->opcode;
500 resp_hdr->flags = 0x80;
501
502 lun[0] = cpu_to_be32(cqe_async_msg->lun.lo);
503 lun[1] = cpu_to_be32(cqe_async_msg->lun.hi);
504 memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun));
505 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn);
506 resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn);
507 resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn);
508
509 resp_hdr->async_event = cqe_async_msg->async_event;
510 resp_hdr->async_vcode = cqe_async_msg->async_vcode;
511
512 resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv);
513 resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv);
514 resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv);
515
516 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data,
517 pdu_len);
518
519 spin_unlock_bh(&session->back_lock);
520 }
521
qedi_process_reject_mesg(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn,uint16_t que_idx)522 static void qedi_process_reject_mesg(struct qedi_ctx *qedi,
523 union iscsi_cqe *cqe,
524 struct iscsi_task *task,
525 struct qedi_conn *qedi_conn,
526 uint16_t que_idx)
527 {
528 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
529 struct iscsi_session *session = conn->session;
530 struct iscsi_reject_hdr *cqe_reject;
531 struct iscsi_reject *hdr;
532 u32 pld_len, num_bdqs;
533 unsigned long flags;
534
535 spin_lock_bh(&session->back_lock);
536 cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
537 pld_len = cqe_reject->hdr_second_dword &
538 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK;
539 num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE;
540
541 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
542 spin_lock_irqsave(&qedi->hba_lock, flags);
543 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
544 pld_len, num_bdqs, conn->data);
545 spin_unlock_irqrestore(&qedi->hba_lock, flags);
546 }
547 hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr;
548 memset(hdr, 0, sizeof(struct iscsi_hdr));
549 hdr->opcode = cqe_reject->opcode;
550 hdr->reason = cqe_reject->hdr_reason;
551 hdr->flags = cqe_reject->hdr_flags;
552 hton24(hdr->dlength, (cqe_reject->hdr_second_dword &
553 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK));
554 hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn);
555 hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn);
556 hdr->statsn = cpu_to_be32(cqe_reject->stat_sn);
557 hdr->ffffffff = cpu_to_be32(0xffffffff);
558
559 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
560 conn->data, pld_len);
561 spin_unlock_bh(&session->back_lock);
562 }
563
qedi_scsi_completion(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct iscsi_conn * conn)564 static void qedi_scsi_completion(struct qedi_ctx *qedi,
565 union iscsi_cqe *cqe,
566 struct iscsi_task *task,
567 struct iscsi_conn *conn)
568 {
569 struct scsi_cmnd *sc_cmd;
570 struct qedi_cmd *cmd = task->dd_data;
571 struct iscsi_session *session = conn->session;
572 struct iscsi_scsi_rsp *hdr;
573 struct iscsi_data_in_hdr *cqe_data_in;
574 int datalen = 0;
575 struct qedi_conn *qedi_conn;
576 u32 iscsi_cid;
577 u8 cqe_err_bits = 0;
578
579 iscsi_cid = cqe->cqe_common.conn_id;
580 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
581
582 cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
583 cqe_err_bits =
584 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
585
586 spin_lock_bh(&session->back_lock);
587 /* get the scsi command */
588 sc_cmd = cmd->scsi_cmd;
589
590 if (!sc_cmd) {
591 QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n");
592 goto error;
593 }
594
595 if (!sc_cmd->SCp.ptr) {
596 QEDI_WARN(&qedi->dbg_ctx,
597 "SCp.ptr is NULL, returned in another context.\n");
598 goto error;
599 }
600
601 if (!sc_cmd->request) {
602 QEDI_WARN(&qedi->dbg_ctx,
603 "sc_cmd->request is NULL, sc_cmd=%p.\n",
604 sc_cmd);
605 goto error;
606 }
607
608 if (!sc_cmd->request->q) {
609 QEDI_WARN(&qedi->dbg_ctx,
610 "request->q is NULL so request is not valid, sc_cmd=%p.\n",
611 sc_cmd);
612 goto error;
613 }
614
615 qedi_iscsi_unmap_sg_list(cmd);
616
617 hdr = (struct iscsi_scsi_rsp *)task->hdr;
618 hdr->opcode = cqe_data_in->opcode;
619 hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn);
620 hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn);
621 hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
622 hdr->response = cqe_data_in->reserved1;
623 hdr->cmd_status = cqe_data_in->status_rsvd;
624 hdr->flags = cqe_data_in->flags;
625 hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count);
626
627 if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
628 datalen = cqe_data_in->reserved2 &
629 ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK;
630 memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen);
631 }
632
633 /* If f/w reports data underrun err then set residual to IO transfer
634 * length, set Underrun flag and clear Overrun flag explicitly
635 */
636 if (unlikely(cqe_err_bits &&
637 GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) {
638 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
639 "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n",
640 hdr->itt, cqe_data_in->flags, cmd->task_id,
641 qedi_conn->iscsi_conn_id, hdr->residual_count,
642 scsi_bufflen(sc_cmd));
643 hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd));
644 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
645 hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW);
646 }
647
648 spin_lock(&qedi_conn->list_lock);
649 if (likely(cmd->io_cmd_in_list)) {
650 cmd->io_cmd_in_list = false;
651 list_del_init(&cmd->io_cmd);
652 qedi_conn->active_cmd_count--;
653 }
654 spin_unlock(&qedi_conn->list_lock);
655
656 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
657 "Freeing tid=0x%x for cid=0x%x\n",
658 cmd->task_id, qedi_conn->iscsi_conn_id);
659 cmd->state = RESPONSE_RECEIVED;
660 if (qedi_io_tracing)
661 qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP);
662
663 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
664 conn->data, datalen);
665 error:
666 spin_unlock_bh(&session->back_lock);
667 }
668
qedi_mtask_completion(struct qedi_ctx * qedi,union iscsi_cqe * cqe,struct iscsi_task * task,struct qedi_conn * conn,uint16_t que_idx)669 static void qedi_mtask_completion(struct qedi_ctx *qedi,
670 union iscsi_cqe *cqe,
671 struct iscsi_task *task,
672 struct qedi_conn *conn, uint16_t que_idx)
673 {
674 struct iscsi_conn *iscsi_conn;
675 u32 hdr_opcode;
676
677 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
678 iscsi_conn = conn->cls_conn->dd_data;
679
680 switch (hdr_opcode) {
681 case ISCSI_OPCODE_SCSI_RESPONSE:
682 case ISCSI_OPCODE_DATA_IN:
683 qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
684 break;
685 case ISCSI_OPCODE_LOGIN_RESPONSE:
686 qedi_process_login_resp(qedi, cqe, task, conn);
687 break;
688 case ISCSI_OPCODE_TMF_RESPONSE:
689 qedi_process_tmf_resp(qedi, cqe, task, conn);
690 break;
691 case ISCSI_OPCODE_TEXT_RESPONSE:
692 qedi_process_text_resp(qedi, cqe, task, conn);
693 break;
694 case ISCSI_OPCODE_LOGOUT_RESPONSE:
695 qedi_process_logout_resp(qedi, cqe, task, conn);
696 break;
697 case ISCSI_OPCODE_NOP_IN:
698 qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
699 break;
700 default:
701 QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n");
702 }
703 }
704
qedi_process_nopin_local_cmpl(struct qedi_ctx * qedi,struct iscsi_cqe_solicited * cqe,struct iscsi_task * task,struct qedi_conn * qedi_conn)705 static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi,
706 struct iscsi_cqe_solicited *cqe,
707 struct iscsi_task *task,
708 struct qedi_conn *qedi_conn)
709 {
710 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
711 struct iscsi_session *session = conn->session;
712 struct qedi_cmd *cmd = task->dd_data;
713
714 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL,
715 "itid=0x%x, cmd task id=0x%x\n",
716 cqe->itid, cmd->task_id);
717
718 cmd->state = RESPONSE_RECEIVED;
719
720 spin_lock_bh(&session->back_lock);
721 __iscsi_put_task(task);
722 spin_unlock_bh(&session->back_lock);
723 }
724
qedi_process_cmd_cleanup_resp(struct qedi_ctx * qedi,struct iscsi_cqe_solicited * cqe,struct iscsi_task * task,struct iscsi_conn * conn)725 static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
726 struct iscsi_cqe_solicited *cqe,
727 struct iscsi_task *task,
728 struct iscsi_conn *conn)
729 {
730 struct qedi_work_map *work, *work_tmp;
731 u32 proto_itt = cqe->itid;
732 u32 ptmp_itt = 0;
733 itt_t protoitt = 0;
734 int found = 0;
735 struct qedi_cmd *qedi_cmd = NULL;
736 u32 iscsi_cid;
737 struct qedi_conn *qedi_conn;
738 struct qedi_cmd *dbg_cmd;
739 struct iscsi_task *mtask;
740 struct iscsi_tm *tmf_hdr = NULL;
741
742 iscsi_cid = cqe->conn_id;
743 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
744 if (!qedi_conn) {
745 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
746 "icid not found 0x%x\n", cqe->conn_id);
747 return;
748 }
749
750 /* Based on this itt get the corresponding qedi_cmd */
751 spin_lock_bh(&qedi_conn->tmf_work_lock);
752 list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list,
753 list) {
754 if (work->rtid == proto_itt) {
755 /* We found the command */
756 qedi_cmd = work->qedi_cmd;
757 if (!qedi_cmd->list_tmf_work) {
758 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
759 "TMF work not found, cqe->tid=0x%x, cid=0x%x\n",
760 proto_itt, qedi_conn->iscsi_conn_id);
761 WARN_ON(1);
762 }
763 found = 1;
764 mtask = qedi_cmd->task;
765 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
766
767 list_del_init(&work->list);
768 kfree(work);
769 qedi_cmd->list_tmf_work = NULL;
770 }
771 }
772 spin_unlock_bh(&qedi_conn->tmf_work_lock);
773
774 if (found) {
775 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
776 "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
777 proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id);
778
779 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
780 ISCSI_TM_FUNC_ABORT_TASK) {
781 spin_lock_bh(&conn->session->back_lock);
782
783 protoitt = build_itt(get_itt(tmf_hdr->rtt),
784 conn->session->age);
785 task = iscsi_itt_to_task(conn, protoitt);
786
787 spin_unlock_bh(&conn->session->back_lock);
788
789 if (!task) {
790 QEDI_NOTICE(&qedi->dbg_ctx,
791 "IO task completed, tmf rtt=0x%x, cid=0x%x\n",
792 get_itt(tmf_hdr->rtt),
793 qedi_conn->iscsi_conn_id);
794 return;
795 }
796
797 dbg_cmd = task->dd_data;
798
799 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
800 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n",
801 get_itt(tmf_hdr->rtt), get_itt(task->itt),
802 dbg_cmd->task_id, qedi_conn->iscsi_conn_id);
803
804 if (qedi_cmd->state == CLEANUP_WAIT_FAILED)
805 qedi_cmd->state = CLEANUP_RECV;
806
807 spin_lock(&qedi_conn->list_lock);
808 if (likely(dbg_cmd->io_cmd_in_list)) {
809 dbg_cmd->io_cmd_in_list = false;
810 list_del_init(&dbg_cmd->io_cmd);
811 qedi_conn->active_cmd_count--;
812 }
813 spin_unlock(&qedi_conn->list_lock);
814 qedi_cmd->state = CLEANUP_RECV;
815 wake_up_interruptible(&qedi_conn->wait_queue);
816 }
817 } else if (qedi_conn->cmd_cleanup_req > 0) {
818 spin_lock_bh(&conn->session->back_lock);
819 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
820 protoitt = build_itt(ptmp_itt, conn->session->age);
821 task = iscsi_itt_to_task(conn, protoitt);
822 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
823 "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n",
824 cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
825 qedi_conn->iscsi_conn_id);
826
827 spin_unlock_bh(&conn->session->back_lock);
828 if (!task) {
829 QEDI_NOTICE(&qedi->dbg_ctx,
830 "task is null, itid=0x%x, cid=0x%x\n",
831 cqe->itid, qedi_conn->iscsi_conn_id);
832 return;
833 }
834 qedi_conn->cmd_cleanup_cmpl++;
835 wake_up(&qedi_conn->wait_queue);
836
837 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
838 "Freeing tid=0x%x for cid=0x%x\n",
839 cqe->itid, qedi_conn->iscsi_conn_id);
840
841 } else {
842 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
843 protoitt = build_itt(ptmp_itt, conn->session->age);
844 task = iscsi_itt_to_task(conn, protoitt);
845 QEDI_ERR(&qedi->dbg_ctx,
846 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n",
847 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
848 }
849 }
850
qedi_fp_process_cqes(struct qedi_work * work)851 void qedi_fp_process_cqes(struct qedi_work *work)
852 {
853 struct qedi_ctx *qedi = work->qedi;
854 union iscsi_cqe *cqe = &work->cqe;
855 struct iscsi_task *task = NULL;
856 struct iscsi_nopout *nopout_hdr;
857 struct qedi_conn *q_conn;
858 struct iscsi_conn *conn;
859 struct qedi_cmd *qedi_cmd;
860 u32 comp_type;
861 u32 iscsi_cid;
862 u32 hdr_opcode;
863 u16 que_idx = work->que_idx;
864 u8 cqe_err_bits = 0;
865
866 comp_type = cqe->cqe_common.cqe_type;
867 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
868 cqe_err_bits =
869 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
870
871 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
872 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n",
873 cqe->cqe_common.conn_id, comp_type, hdr_opcode);
874
875 if (comp_type >= MAX_ISCSI_CQES_TYPE) {
876 QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n");
877 return;
878 }
879
880 iscsi_cid = cqe->cqe_common.conn_id;
881 q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
882 if (!q_conn) {
883 QEDI_WARN(&qedi->dbg_ctx,
884 "Session no longer exists for cid=0x%x!!\n",
885 iscsi_cid);
886 return;
887 }
888
889 conn = q_conn->cls_conn->dd_data;
890
891 if (unlikely(cqe_err_bits &&
892 GET_FIELD(cqe_err_bits,
893 CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) {
894 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
895 return;
896 }
897
898 switch (comp_type) {
899 case ISCSI_CQE_TYPE_SOLICITED:
900 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
901 qedi_cmd = container_of(work, struct qedi_cmd, cqe_work);
902 task = qedi_cmd->task;
903 if (!task) {
904 QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n");
905 return;
906 }
907
908 /* Process NOPIN local completion */
909 nopout_hdr = (struct iscsi_nopout *)task->hdr;
910 if ((nopout_hdr->itt == RESERVED_ITT) &&
911 (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
912 qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
913 task, q_conn);
914 } else {
915 cqe->cqe_solicited.itid =
916 qedi_get_itt(cqe->cqe_solicited);
917 /* Process other solicited responses */
918 qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
919 }
920 break;
921 case ISCSI_CQE_TYPE_UNSOLICITED:
922 switch (hdr_opcode) {
923 case ISCSI_OPCODE_NOP_IN:
924 qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
925 que_idx);
926 break;
927 case ISCSI_OPCODE_ASYNC_MSG:
928 qedi_process_async_mesg(qedi, cqe, task, q_conn,
929 que_idx);
930 break;
931 case ISCSI_OPCODE_REJECT:
932 qedi_process_reject_mesg(qedi, cqe, task, q_conn,
933 que_idx);
934 break;
935 }
936 goto exit_fp_process;
937 case ISCSI_CQE_TYPE_DUMMY:
938 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n");
939 goto exit_fp_process;
940 case ISCSI_CQE_TYPE_TASK_CLEANUP:
941 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n");
942 qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
943 conn);
944 goto exit_fp_process;
945 default:
946 QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n");
947 break;
948 }
949
950 exit_fp_process:
951 return;
952 }
953
qedi_ring_doorbell(struct qedi_conn * qedi_conn)954 static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
955 {
956 struct iscsi_db_data dbell = { 0 };
957
958 dbell.agg_flags = 0;
959
960 dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT;
961 dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT;
962 dbell.params |=
963 DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT;
964
965 dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx;
966 writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell);
967
968 /* Make sure fw write idx is coherent, and include both memory barriers
969 * as a failsafe as for some architectures the call is the same but on
970 * others they are two different assembly operations.
971 */
972 wmb();
973 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ,
974 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n",
975 qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx,
976 qedi_conn->iscsi_conn_id);
977 }
978
qedi_get_wqe_idx(struct qedi_conn * qedi_conn)979 static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
980 {
981 struct qedi_endpoint *ep;
982 u16 rval;
983
984 ep = qedi_conn->ep;
985 rval = ep->sq_prod_idx;
986
987 /* Increament SQ index */
988 ep->sq_prod_idx++;
989 ep->fw_sq_prod_idx++;
990 if (ep->sq_prod_idx == QEDI_SQ_SIZE)
991 ep->sq_prod_idx = 0;
992
993 return rval;
994 }
995
qedi_send_iscsi_login(struct qedi_conn * qedi_conn,struct iscsi_task * task)996 int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
997 struct iscsi_task *task)
998 {
999 struct iscsi_login_req_hdr login_req_pdu_header;
1000 struct scsi_sgl_task_params tx_sgl_task_params;
1001 struct scsi_sgl_task_params rx_sgl_task_params;
1002 struct iscsi_task_params task_params;
1003 struct e4_iscsi_task_context *fw_task_ctx;
1004 struct qedi_ctx *qedi = qedi_conn->qedi;
1005 struct iscsi_login_req *login_hdr;
1006 struct scsi_sge *resp_sge = NULL;
1007 struct qedi_cmd *qedi_cmd;
1008 struct qedi_endpoint *ep;
1009 s16 tid = 0;
1010 u16 sq_idx = 0;
1011 int rval = 0;
1012
1013 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1014 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1015 ep = qedi_conn->ep;
1016 login_hdr = (struct iscsi_login_req *)task->hdr;
1017
1018 tid = qedi_get_task_idx(qedi);
1019 if (tid == -1)
1020 return -ENOMEM;
1021
1022 fw_task_ctx =
1023 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
1024 tid);
1025 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
1026
1027 qedi_cmd->task_id = tid;
1028
1029 memset(&task_params, 0, sizeof(task_params));
1030 memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
1031 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1032 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1033 /* Update header info */
1034 login_req_pdu_header.opcode = login_hdr->opcode;
1035 login_req_pdu_header.version_min = login_hdr->min_version;
1036 login_req_pdu_header.version_max = login_hdr->max_version;
1037 login_req_pdu_header.flags_attr = login_hdr->flags;
1038 login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
1039 login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
1040
1041 login_req_pdu_header.tsih = login_hdr->tsih;
1042 login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
1043
1044 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1045 login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1046 login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
1047 login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
1048 login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
1049 login_req_pdu_header.exp_stat_sn = 0;
1050
1051 /* Fill tx AHS and rx buffer */
1052 tx_sgl_task_params.sgl =
1053 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1054 tx_sgl_task_params.sgl_phys_addr.lo =
1055 (u32)(qedi_conn->gen_pdu.req_dma_addr);
1056 tx_sgl_task_params.sgl_phys_addr.hi =
1057 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1058 tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
1059 tx_sgl_task_params.num_sges = 1;
1060
1061 rx_sgl_task_params.sgl =
1062 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1063 rx_sgl_task_params.sgl_phys_addr.lo =
1064 (u32)(qedi_conn->gen_pdu.resp_dma_addr);
1065 rx_sgl_task_params.sgl_phys_addr.hi =
1066 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
1067 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
1068 rx_sgl_task_params.num_sges = 1;
1069
1070 /* Fill fw input params */
1071 task_params.context = fw_task_ctx;
1072 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1073 task_params.itid = tid;
1074 task_params.cq_rss_number = 0;
1075 task_params.tx_io_size = ntoh24(login_hdr->dlength);
1076 task_params.rx_io_size = resp_sge->sge_len;
1077
1078 sq_idx = qedi_get_wqe_idx(qedi_conn);
1079 task_params.sqe = &ep->sq[sq_idx];
1080
1081 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1082 rval = init_initiator_login_request_task(&task_params,
1083 &login_req_pdu_header,
1084 &tx_sgl_task_params,
1085 &rx_sgl_task_params);
1086 if (rval)
1087 return -1;
1088
1089 spin_lock(&qedi_conn->list_lock);
1090 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1091 qedi_cmd->io_cmd_in_list = true;
1092 qedi_conn->active_cmd_count++;
1093 spin_unlock(&qedi_conn->list_lock);
1094
1095 qedi_ring_doorbell(qedi_conn);
1096 return 0;
1097 }
1098
qedi_send_iscsi_logout(struct qedi_conn * qedi_conn,struct iscsi_task * task)1099 int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
1100 struct iscsi_task *task)
1101 {
1102 struct iscsi_logout_req_hdr logout_pdu_header;
1103 struct scsi_sgl_task_params tx_sgl_task_params;
1104 struct scsi_sgl_task_params rx_sgl_task_params;
1105 struct iscsi_task_params task_params;
1106 struct e4_iscsi_task_context *fw_task_ctx;
1107 struct iscsi_logout *logout_hdr = NULL;
1108 struct qedi_ctx *qedi = qedi_conn->qedi;
1109 struct qedi_cmd *qedi_cmd;
1110 struct qedi_endpoint *ep;
1111 s16 tid = 0;
1112 u16 sq_idx = 0;
1113 int rval = 0;
1114
1115 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1116 logout_hdr = (struct iscsi_logout *)task->hdr;
1117 ep = qedi_conn->ep;
1118
1119 tid = qedi_get_task_idx(qedi);
1120 if (tid == -1)
1121 return -ENOMEM;
1122
1123 fw_task_ctx =
1124 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
1125 tid);
1126 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
1127
1128 qedi_cmd->task_id = tid;
1129
1130 memset(&task_params, 0, sizeof(task_params));
1131 memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
1132 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1133 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1134
1135 /* Update header info */
1136 logout_pdu_header.opcode = logout_hdr->opcode;
1137 logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
1138 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1139 logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1140 logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
1141 logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
1142 logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
1143
1144 /* Fill fw input params */
1145 task_params.context = fw_task_ctx;
1146 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1147 task_params.itid = tid;
1148 task_params.cq_rss_number = 0;
1149 task_params.tx_io_size = 0;
1150 task_params.rx_io_size = 0;
1151
1152 sq_idx = qedi_get_wqe_idx(qedi_conn);
1153 task_params.sqe = &ep->sq[sq_idx];
1154 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1155
1156 rval = init_initiator_logout_request_task(&task_params,
1157 &logout_pdu_header,
1158 NULL, NULL);
1159 if (rval)
1160 return -1;
1161
1162 spin_lock(&qedi_conn->list_lock);
1163 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1164 qedi_cmd->io_cmd_in_list = true;
1165 qedi_conn->active_cmd_count++;
1166 spin_unlock(&qedi_conn->list_lock);
1167
1168 qedi_ring_doorbell(qedi_conn);
1169 return 0;
1170 }
1171
qedi_cleanup_all_io(struct qedi_ctx * qedi,struct qedi_conn * qedi_conn,struct iscsi_task * task,bool in_recovery)1172 int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
1173 struct iscsi_task *task, bool in_recovery)
1174 {
1175 int rval;
1176 struct iscsi_task *ctask;
1177 struct qedi_cmd *cmd, *cmd_tmp;
1178 struct iscsi_tm *tmf_hdr;
1179 unsigned int lun = 0;
1180 bool lun_reset = false;
1181 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1182 struct iscsi_session *session = conn->session;
1183
1184 /* From recovery, task is NULL or from tmf resp valid task */
1185 if (task) {
1186 tmf_hdr = (struct iscsi_tm *)task->hdr;
1187
1188 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1189 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) {
1190 lun_reset = true;
1191 lun = scsilun_to_int(&tmf_hdr->lun);
1192 }
1193 }
1194
1195 qedi_conn->cmd_cleanup_req = 0;
1196 qedi_conn->cmd_cleanup_cmpl = 0;
1197
1198 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1199 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
1200 qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id,
1201 in_recovery, lun_reset);
1202
1203 if (lun_reset)
1204 spin_lock_bh(&session->back_lock);
1205
1206 spin_lock(&qedi_conn->list_lock);
1207
1208 list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
1209 io_cmd) {
1210 ctask = cmd->task;
1211 if (ctask == task)
1212 continue;
1213
1214 if (lun_reset) {
1215 if (cmd->scsi_cmd && cmd->scsi_cmd->device) {
1216 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1217 "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n",
1218 cmd->task_id, get_itt(ctask->itt),
1219 cmd->scsi_cmd, cmd->scsi_cmd->device,
1220 ctask->state, cmd->state,
1221 qedi_conn->iscsi_conn_id);
1222 if (cmd->scsi_cmd->device->lun != lun)
1223 continue;
1224 }
1225 }
1226 qedi_conn->cmd_cleanup_req++;
1227 qedi_iscsi_cleanup_task(ctask, true);
1228
1229 cmd->io_cmd_in_list = false;
1230 list_del_init(&cmd->io_cmd);
1231 qedi_conn->active_cmd_count--;
1232 QEDI_WARN(&qedi->dbg_ctx,
1233 "Deleted active cmd list node io_cmd=%p, cid=0x%x\n",
1234 &cmd->io_cmd, qedi_conn->iscsi_conn_id);
1235 }
1236
1237 spin_unlock(&qedi_conn->list_lock);
1238
1239 if (lun_reset)
1240 spin_unlock_bh(&session->back_lock);
1241
1242 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1243 "cmd_cleanup_req=%d, cid=0x%x\n",
1244 qedi_conn->cmd_cleanup_req,
1245 qedi_conn->iscsi_conn_id);
1246
1247 rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
1248 ((qedi_conn->cmd_cleanup_req ==
1249 qedi_conn->cmd_cleanup_cmpl) ||
1250 test_bit(QEDI_IN_RECOVERY,
1251 &qedi->flags)),
1252 5 * HZ);
1253 if (rval) {
1254 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1255 "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1256 qedi_conn->cmd_cleanup_req,
1257 qedi_conn->cmd_cleanup_cmpl,
1258 qedi_conn->iscsi_conn_id);
1259
1260 return 0;
1261 }
1262
1263 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1264 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
1265 qedi_conn->cmd_cleanup_req,
1266 qedi_conn->cmd_cleanup_cmpl,
1267 qedi_conn->iscsi_conn_id);
1268
1269 iscsi_host_for_each_session(qedi->shost,
1270 qedi_mark_device_missing);
1271 qedi_ops->common->drain(qedi->cdev);
1272
1273 /* Enable IOs for all other sessions except current.*/
1274 if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
1275 (qedi_conn->cmd_cleanup_req ==
1276 qedi_conn->cmd_cleanup_cmpl) ||
1277 test_bit(QEDI_IN_RECOVERY,
1278 &qedi->flags),
1279 5 * HZ)) {
1280 iscsi_host_for_each_session(qedi->shost,
1281 qedi_mark_device_available);
1282 return -1;
1283 }
1284
1285 iscsi_host_for_each_session(qedi->shost,
1286 qedi_mark_device_available);
1287
1288 return 0;
1289 }
1290
qedi_clearsq(struct qedi_ctx * qedi,struct qedi_conn * qedi_conn,struct iscsi_task * task)1291 void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
1292 struct iscsi_task *task)
1293 {
1294 struct qedi_endpoint *qedi_ep;
1295 int rval;
1296
1297 qedi_ep = qedi_conn->ep;
1298 qedi_conn->cmd_cleanup_req = 0;
1299 qedi_conn->cmd_cleanup_cmpl = 0;
1300
1301 if (!qedi_ep) {
1302 QEDI_WARN(&qedi->dbg_ctx,
1303 "Cannot proceed, ep already disconnected, cid=0x%x\n",
1304 qedi_conn->iscsi_conn_id);
1305 return;
1306 }
1307
1308 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1309 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n",
1310 qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep);
1311
1312 qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle);
1313
1314 rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true);
1315 if (rval) {
1316 QEDI_ERR(&qedi->dbg_ctx,
1317 "fatal error, need hard reset, cid=0x%x\n",
1318 qedi_conn->iscsi_conn_id);
1319 WARN_ON(1);
1320 }
1321 }
1322
qedi_wait_for_cleanup_request(struct qedi_ctx * qedi,struct qedi_conn * qedi_conn,struct iscsi_task * task,struct qedi_cmd * qedi_cmd,struct qedi_work_map * list_work)1323 static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi,
1324 struct qedi_conn *qedi_conn,
1325 struct iscsi_task *task,
1326 struct qedi_cmd *qedi_cmd,
1327 struct qedi_work_map *list_work)
1328 {
1329 struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data;
1330 int wait;
1331
1332 wait = wait_event_interruptible_timeout(qedi_conn->wait_queue,
1333 ((qedi_cmd->state ==
1334 CLEANUP_RECV) ||
1335 ((qedi_cmd->type == TYPEIO) &&
1336 (cmd->state ==
1337 RESPONSE_RECEIVED))),
1338 5 * HZ);
1339 if (!wait) {
1340 qedi_cmd->state = CLEANUP_WAIT_FAILED;
1341
1342 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1343 "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n",
1344 cmd->task_id, qedi_conn->iscsi_conn_id);
1345
1346 return -1;
1347 }
1348 return 0;
1349 }
1350
qedi_tmf_work(struct work_struct * work)1351 static void qedi_tmf_work(struct work_struct *work)
1352 {
1353 struct qedi_cmd *qedi_cmd =
1354 container_of(work, struct qedi_cmd, tmf_work);
1355 struct qedi_conn *qedi_conn = qedi_cmd->conn;
1356 struct qedi_ctx *qedi = qedi_conn->qedi;
1357 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1358 struct qedi_work_map *list_work = NULL;
1359 struct iscsi_task *mtask;
1360 struct qedi_cmd *cmd;
1361 struct iscsi_task *ctask;
1362 struct iscsi_tm *tmf_hdr;
1363 s16 rval = 0;
1364 s16 tid = 0;
1365
1366 mtask = qedi_cmd->task;
1367 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1368 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1369
1370 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
1371 if (!ctask || !ctask->sc) {
1372 QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n");
1373 goto abort_ret;
1374 }
1375
1376 cmd = (struct qedi_cmd *)ctask->dd_data;
1377 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1378 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n",
1379 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
1380 qedi_conn->iscsi_conn_id);
1381
1382 if (qedi_do_not_recover) {
1383 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
1384 qedi_do_not_recover);
1385 goto abort_ret;
1386 }
1387
1388 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
1389 if (!list_work) {
1390 QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
1391 goto abort_ret;
1392 }
1393
1394 qedi_cmd->type = TYPEIO;
1395 list_work->qedi_cmd = qedi_cmd;
1396 list_work->rtid = cmd->task_id;
1397 list_work->state = QEDI_WORK_SCHEDULED;
1398 qedi_cmd->list_tmf_work = list_work;
1399
1400 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
1401 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n",
1402 list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id,
1403 tmf_hdr->flags);
1404
1405 spin_lock_bh(&qedi_conn->tmf_work_lock);
1406 list_add_tail(&list_work->list, &qedi_conn->tmf_work_list);
1407 spin_unlock_bh(&qedi_conn->tmf_work_lock);
1408
1409 qedi_iscsi_cleanup_task(ctask, false);
1410
1411 rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd,
1412 list_work);
1413 if (rval == -1) {
1414 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1415 "FW cleanup got escalated, cid=0x%x\n",
1416 qedi_conn->iscsi_conn_id);
1417 goto ldel_exit;
1418 }
1419
1420 tid = qedi_get_task_idx(qedi);
1421 if (tid == -1) {
1422 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
1423 qedi_conn->iscsi_conn_id);
1424 goto ldel_exit;
1425 }
1426
1427 qedi_cmd->task_id = tid;
1428 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
1429
1430 abort_ret:
1431 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1432 return;
1433
1434 ldel_exit:
1435 spin_lock_bh(&qedi_conn->tmf_work_lock);
1436 if (qedi_cmd->list_tmf_work) {
1437 list_del_init(&list_work->list);
1438 qedi_cmd->list_tmf_work = NULL;
1439 kfree(list_work);
1440 }
1441 spin_unlock_bh(&qedi_conn->tmf_work_lock);
1442
1443 spin_lock(&qedi_conn->list_lock);
1444 if (likely(cmd->io_cmd_in_list)) {
1445 cmd->io_cmd_in_list = false;
1446 list_del_init(&cmd->io_cmd);
1447 qedi_conn->active_cmd_count--;
1448 }
1449 spin_unlock(&qedi_conn->list_lock);
1450
1451 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
1452 }
1453
qedi_send_iscsi_tmf(struct qedi_conn * qedi_conn,struct iscsi_task * mtask)1454 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
1455 struct iscsi_task *mtask)
1456 {
1457 struct iscsi_tmf_request_hdr tmf_pdu_header;
1458 struct iscsi_task_params task_params;
1459 struct qedi_ctx *qedi = qedi_conn->qedi;
1460 struct e4_iscsi_task_context *fw_task_ctx;
1461 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
1462 struct iscsi_task *ctask;
1463 struct iscsi_tm *tmf_hdr;
1464 struct qedi_cmd *qedi_cmd;
1465 struct qedi_cmd *cmd;
1466 struct qedi_endpoint *ep;
1467 u32 scsi_lun[2];
1468 s16 tid = 0;
1469 u16 sq_idx = 0;
1470 int rval = 0;
1471
1472 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1473 qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1474 ep = qedi_conn->ep;
1475 if (!ep)
1476 return -ENODEV;
1477
1478 tid = qedi_get_task_idx(qedi);
1479 if (tid == -1)
1480 return -ENOMEM;
1481
1482 fw_task_ctx =
1483 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
1484 tid);
1485 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
1486
1487 qedi_cmd->task_id = tid;
1488
1489 memset(&task_params, 0, sizeof(task_params));
1490 memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
1491
1492 /* Update header info */
1493 qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
1494 tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
1495 tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
1496
1497 memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
1498 tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
1499 tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
1500
1501 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1502 ISCSI_TM_FUNC_ABORT_TASK) {
1503 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt);
1504 if (!ctask || !ctask->sc) {
1505 QEDI_ERR(&qedi->dbg_ctx,
1506 "Could not get reference task\n");
1507 return 0;
1508 }
1509 cmd = (struct qedi_cmd *)ctask->dd_data;
1510 tmf_pdu_header.rtt =
1511 qedi_set_itt(cmd->task_id,
1512 get_itt(tmf_hdr->rtt));
1513 } else {
1514 tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
1515 }
1516
1517 tmf_pdu_header.opcode = tmf_hdr->opcode;
1518 tmf_pdu_header.function = tmf_hdr->flags;
1519 tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
1520 tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
1521
1522 /* Fill fw input params */
1523 task_params.context = fw_task_ctx;
1524 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1525 task_params.itid = tid;
1526 task_params.cq_rss_number = 0;
1527 task_params.tx_io_size = 0;
1528 task_params.rx_io_size = 0;
1529
1530 sq_idx = qedi_get_wqe_idx(qedi_conn);
1531 task_params.sqe = &ep->sq[sq_idx];
1532
1533 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1534 rval = init_initiator_tmf_request_task(&task_params,
1535 &tmf_pdu_header);
1536 if (rval)
1537 return -1;
1538
1539 spin_lock(&qedi_conn->list_lock);
1540 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1541 qedi_cmd->io_cmd_in_list = true;
1542 qedi_conn->active_cmd_count++;
1543 spin_unlock(&qedi_conn->list_lock);
1544
1545 qedi_ring_doorbell(qedi_conn);
1546 return 0;
1547 }
1548
qedi_iscsi_abort_work(struct qedi_conn * qedi_conn,struct iscsi_task * mtask)1549 int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
1550 struct iscsi_task *mtask)
1551 {
1552 struct qedi_ctx *qedi = qedi_conn->qedi;
1553 struct iscsi_tm *tmf_hdr;
1554 struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
1555 s16 tid = 0;
1556
1557 tmf_hdr = (struct iscsi_tm *)mtask->hdr;
1558 qedi_cmd->task = mtask;
1559
1560 /* If abort task then schedule the work and return */
1561 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1562 ISCSI_TM_FUNC_ABORT_TASK) {
1563 qedi_cmd->state = CLEANUP_WAIT;
1564 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work);
1565 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work);
1566
1567 } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1568 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
1569 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1570 ISCSI_TM_FUNC_TARGET_WARM_RESET) ||
1571 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
1572 ISCSI_TM_FUNC_TARGET_COLD_RESET)) {
1573 tid = qedi_get_task_idx(qedi);
1574 if (tid == -1) {
1575 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n",
1576 qedi_conn->iscsi_conn_id);
1577 return -1;
1578 }
1579 qedi_cmd->task_id = tid;
1580
1581 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task);
1582
1583 } else {
1584 QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n",
1585 qedi_conn->iscsi_conn_id);
1586 return -1;
1587 }
1588
1589 return 0;
1590 }
1591
qedi_send_iscsi_text(struct qedi_conn * qedi_conn,struct iscsi_task * task)1592 int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
1593 struct iscsi_task *task)
1594 {
1595 struct iscsi_text_request_hdr text_request_pdu_header;
1596 struct scsi_sgl_task_params tx_sgl_task_params;
1597 struct scsi_sgl_task_params rx_sgl_task_params;
1598 struct iscsi_task_params task_params;
1599 struct e4_iscsi_task_context *fw_task_ctx;
1600 struct qedi_ctx *qedi = qedi_conn->qedi;
1601 struct iscsi_text *text_hdr;
1602 struct scsi_sge *req_sge = NULL;
1603 struct scsi_sge *resp_sge = NULL;
1604 struct qedi_cmd *qedi_cmd;
1605 struct qedi_endpoint *ep;
1606 s16 tid = 0;
1607 u16 sq_idx = 0;
1608 int rval = 0;
1609
1610 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1611 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1612 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1613 text_hdr = (struct iscsi_text *)task->hdr;
1614 ep = qedi_conn->ep;
1615
1616 tid = qedi_get_task_idx(qedi);
1617 if (tid == -1)
1618 return -ENOMEM;
1619
1620 fw_task_ctx =
1621 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
1622 tid);
1623 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
1624
1625 qedi_cmd->task_id = tid;
1626
1627 memset(&task_params, 0, sizeof(task_params));
1628 memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
1629 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1630 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1631
1632 /* Update header info */
1633 text_request_pdu_header.opcode = text_hdr->opcode;
1634 text_request_pdu_header.flags_attr = text_hdr->flags;
1635
1636 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1637 text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1638 text_request_pdu_header.ttt = text_hdr->ttt;
1639 text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
1640 text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
1641 text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
1642
1643 /* Fill tx AHS and rx buffer */
1644 tx_sgl_task_params.sgl =
1645 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1646 tx_sgl_task_params.sgl_phys_addr.lo =
1647 (u32)(qedi_conn->gen_pdu.req_dma_addr);
1648 tx_sgl_task_params.sgl_phys_addr.hi =
1649 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1650 tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
1651 tx_sgl_task_params.num_sges = 1;
1652
1653 rx_sgl_task_params.sgl =
1654 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1655 rx_sgl_task_params.sgl_phys_addr.lo =
1656 (u32)(qedi_conn->gen_pdu.resp_dma_addr);
1657 rx_sgl_task_params.sgl_phys_addr.hi =
1658 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
1659 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
1660 rx_sgl_task_params.num_sges = 1;
1661
1662 /* Fill fw input params */
1663 task_params.context = fw_task_ctx;
1664 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1665 task_params.itid = tid;
1666 task_params.cq_rss_number = 0;
1667 task_params.tx_io_size = ntoh24(text_hdr->dlength);
1668 task_params.rx_io_size = resp_sge->sge_len;
1669
1670 sq_idx = qedi_get_wqe_idx(qedi_conn);
1671 task_params.sqe = &ep->sq[sq_idx];
1672
1673 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1674 rval = init_initiator_text_request_task(&task_params,
1675 &text_request_pdu_header,
1676 &tx_sgl_task_params,
1677 &rx_sgl_task_params);
1678 if (rval)
1679 return -1;
1680
1681 spin_lock(&qedi_conn->list_lock);
1682 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1683 qedi_cmd->io_cmd_in_list = true;
1684 qedi_conn->active_cmd_count++;
1685 spin_unlock(&qedi_conn->list_lock);
1686
1687 qedi_ring_doorbell(qedi_conn);
1688 return 0;
1689 }
1690
qedi_send_iscsi_nopout(struct qedi_conn * qedi_conn,struct iscsi_task * task,char * datap,int data_len,int unsol)1691 int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
1692 struct iscsi_task *task,
1693 char *datap, int data_len, int unsol)
1694 {
1695 struct iscsi_nop_out_hdr nop_out_pdu_header;
1696 struct scsi_sgl_task_params tx_sgl_task_params;
1697 struct scsi_sgl_task_params rx_sgl_task_params;
1698 struct iscsi_task_params task_params;
1699 struct qedi_ctx *qedi = qedi_conn->qedi;
1700 struct e4_iscsi_task_context *fw_task_ctx;
1701 struct iscsi_nopout *nopout_hdr;
1702 struct scsi_sge *resp_sge = NULL;
1703 struct qedi_cmd *qedi_cmd;
1704 struct qedi_endpoint *ep;
1705 u32 scsi_lun[2];
1706 s16 tid = 0;
1707 u16 sq_idx = 0;
1708 int rval = 0;
1709
1710 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1711 qedi_cmd = (struct qedi_cmd *)task->dd_data;
1712 nopout_hdr = (struct iscsi_nopout *)task->hdr;
1713 ep = qedi_conn->ep;
1714
1715 tid = qedi_get_task_idx(qedi);
1716 if (tid == -1)
1717 return -ENOMEM;
1718
1719 fw_task_ctx =
1720 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
1721 tid);
1722 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
1723
1724 qedi_cmd->task_id = tid;
1725
1726 memset(&task_params, 0, sizeof(task_params));
1727 memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
1728 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
1729 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
1730
1731 /* Update header info */
1732 nop_out_pdu_header.opcode = nopout_hdr->opcode;
1733 SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
1734 SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
1735
1736 memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
1737 nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
1738 nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
1739 nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
1740 nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
1741
1742 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
1743
1744 if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
1745 nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
1746 nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
1747 } else {
1748 nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
1749 nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
1750
1751 spin_lock(&qedi_conn->list_lock);
1752 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
1753 qedi_cmd->io_cmd_in_list = true;
1754 qedi_conn->active_cmd_count++;
1755 spin_unlock(&qedi_conn->list_lock);
1756 }
1757
1758 /* Fill tx AHS and rx buffer */
1759 if (data_len) {
1760 tx_sgl_task_params.sgl =
1761 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
1762 tx_sgl_task_params.sgl_phys_addr.lo =
1763 (u32)(qedi_conn->gen_pdu.req_dma_addr);
1764 tx_sgl_task_params.sgl_phys_addr.hi =
1765 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
1766 tx_sgl_task_params.total_buffer_size = data_len;
1767 tx_sgl_task_params.num_sges = 1;
1768
1769 rx_sgl_task_params.sgl =
1770 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
1771 rx_sgl_task_params.sgl_phys_addr.lo =
1772 (u32)(qedi_conn->gen_pdu.resp_dma_addr);
1773 rx_sgl_task_params.sgl_phys_addr.hi =
1774 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
1775 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
1776 rx_sgl_task_params.num_sges = 1;
1777 }
1778
1779 /* Fill fw input params */
1780 task_params.context = fw_task_ctx;
1781 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
1782 task_params.itid = tid;
1783 task_params.cq_rss_number = 0;
1784 task_params.tx_io_size = data_len;
1785 task_params.rx_io_size = resp_sge->sge_len;
1786
1787 sq_idx = qedi_get_wqe_idx(qedi_conn);
1788 task_params.sqe = &ep->sq[sq_idx];
1789
1790 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1791 rval = init_initiator_nop_out_task(&task_params,
1792 &nop_out_pdu_header,
1793 &tx_sgl_task_params,
1794 &rx_sgl_task_params);
1795 if (rval)
1796 return -1;
1797
1798 qedi_ring_doorbell(qedi_conn);
1799 return 0;
1800 }
1801
qedi_split_bd(struct qedi_cmd * cmd,u64 addr,int sg_len,int bd_index)1802 static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
1803 int bd_index)
1804 {
1805 struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
1806 int frag_size, sg_frags;
1807
1808 sg_frags = 0;
1809
1810 while (sg_len) {
1811 if (addr % QEDI_PAGE_SIZE)
1812 frag_size =
1813 (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE));
1814 else
1815 frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 :
1816 (sg_len % QEDI_BD_SPLIT_SZ);
1817
1818 if (frag_size == 0)
1819 frag_size = QEDI_BD_SPLIT_SZ;
1820
1821 bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff);
1822 bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32);
1823 bd[bd_index + sg_frags].sge_len = (u16)frag_size;
1824 QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO,
1825 "split sge %d: addr=%llx, len=%x",
1826 (bd_index + sg_frags), addr, frag_size);
1827
1828 addr += (u64)frag_size;
1829 sg_frags++;
1830 sg_len -= frag_size;
1831 }
1832 return sg_frags;
1833 }
1834
qedi_map_scsi_sg(struct qedi_ctx * qedi,struct qedi_cmd * cmd)1835 static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
1836 {
1837 struct scsi_cmnd *sc = cmd->scsi_cmd;
1838 struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
1839 struct scatterlist *sg;
1840 int byte_count = 0;
1841 int bd_count = 0;
1842 int sg_count;
1843 int sg_len;
1844 int sg_frags;
1845 u64 addr, end_addr;
1846 int i;
1847
1848 WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD);
1849
1850 sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc),
1851 scsi_sg_count(sc), sc->sc_data_direction);
1852
1853 /*
1854 * New condition to send single SGE as cached-SGL.
1855 * Single SGE with length less than 64K.
1856 */
1857 sg = scsi_sglist(sc);
1858 if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) {
1859 sg_len = sg_dma_len(sg);
1860 addr = (u64)sg_dma_address(sg);
1861
1862 bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
1863 bd[bd_count].sge_addr.hi = (addr >> 32);
1864 bd[bd_count].sge_len = (u16)sg_len;
1865
1866 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
1867 "single-cached-sgl: bd_count:%d addr=%llx, len=%x",
1868 sg_count, addr, sg_len);
1869
1870 return ++bd_count;
1871 }
1872
1873 scsi_for_each_sg(sc, sg, sg_count, i) {
1874 sg_len = sg_dma_len(sg);
1875 addr = (u64)sg_dma_address(sg);
1876 end_addr = (addr + sg_len);
1877
1878 /*
1879 * first sg elem in the 'list',
1880 * check if end addr is page-aligned.
1881 */
1882 if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE))
1883 cmd->use_slowpath = true;
1884
1885 /*
1886 * last sg elem in the 'list',
1887 * check if start addr is page-aligned.
1888 */
1889 else if ((i == (sg_count - 1)) &&
1890 (sg_count > 1) && (addr % QEDI_PAGE_SIZE))
1891 cmd->use_slowpath = true;
1892
1893 /*
1894 * middle sg elements in list,
1895 * check if start and end addr is page-aligned
1896 */
1897 else if ((i != 0) && (i != (sg_count - 1)) &&
1898 ((addr % QEDI_PAGE_SIZE) ||
1899 (end_addr % QEDI_PAGE_SIZE)))
1900 cmd->use_slowpath = true;
1901
1902 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x",
1903 i, sg_len);
1904
1905 if (sg_len > QEDI_BD_SPLIT_SZ) {
1906 sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count);
1907 } else {
1908 sg_frags = 1;
1909 bd[bd_count].sge_addr.lo = addr & 0xffffffff;
1910 bd[bd_count].sge_addr.hi = addr >> 32;
1911 bd[bd_count].sge_len = sg_len;
1912 }
1913 byte_count += sg_len;
1914 bd_count += sg_frags;
1915 }
1916
1917 if (byte_count != scsi_bufflen(sc))
1918 QEDI_ERR(&qedi->dbg_ctx,
1919 "byte_count = %d != scsi_bufflen = %d\n", byte_count,
1920 scsi_bufflen(sc));
1921 else
1922 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n",
1923 byte_count);
1924
1925 WARN_ON(byte_count != scsi_bufflen(sc));
1926
1927 return bd_count;
1928 }
1929
qedi_iscsi_map_sg_list(struct qedi_cmd * cmd)1930 static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
1931 {
1932 int bd_count;
1933 struct scsi_cmnd *sc = cmd->scsi_cmd;
1934
1935 if (scsi_sg_count(sc)) {
1936 bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd);
1937 if (bd_count == 0)
1938 return;
1939 } else {
1940 struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
1941
1942 bd[0].sge_addr.lo = 0;
1943 bd[0].sge_addr.hi = 0;
1944 bd[0].sge_len = 0;
1945 bd_count = 0;
1946 }
1947 cmd->io_tbl.sge_valid = bd_count;
1948 }
1949
qedi_cpy_scsi_cdb(struct scsi_cmnd * sc,u32 * dstp)1950 static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp)
1951 {
1952 u32 dword;
1953 int lpcnt;
1954 u8 *srcp;
1955
1956 lpcnt = sc->cmd_len / sizeof(dword);
1957 srcp = (u8 *)sc->cmnd;
1958 while (lpcnt--) {
1959 memcpy(&dword, (const void *)srcp, 4);
1960 *dstp = cpu_to_be32(dword);
1961 srcp += 4;
1962 dstp++;
1963 }
1964 if (sc->cmd_len & 0x3) {
1965 dword = (u32)srcp[0] | ((u32)srcp[1] << 8);
1966 *dstp = cpu_to_be32(dword);
1967 }
1968 }
1969
qedi_trace_io(struct qedi_ctx * qedi,struct iscsi_task * task,u16 tid,int8_t direction)1970 void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
1971 u16 tid, int8_t direction)
1972 {
1973 struct qedi_io_log *io_log;
1974 struct iscsi_conn *conn = task->conn;
1975 struct qedi_conn *qedi_conn = conn->dd_data;
1976 struct scsi_cmnd *sc_cmd = task->sc;
1977 unsigned long flags;
1978
1979 spin_lock_irqsave(&qedi->io_trace_lock, flags);
1980
1981 io_log = &qedi->io_trace_buf[qedi->io_trace_idx];
1982 io_log->direction = direction;
1983 io_log->task_id = tid;
1984 io_log->cid = qedi_conn->iscsi_conn_id;
1985 io_log->lun = sc_cmd->device->lun;
1986 io_log->op = sc_cmd->cmnd[0];
1987 io_log->lba[0] = sc_cmd->cmnd[2];
1988 io_log->lba[1] = sc_cmd->cmnd[3];
1989 io_log->lba[2] = sc_cmd->cmnd[4];
1990 io_log->lba[3] = sc_cmd->cmnd[5];
1991 io_log->bufflen = scsi_bufflen(sc_cmd);
1992 io_log->sg_count = scsi_sg_count(sc_cmd);
1993 io_log->fast_sgs = qedi->fast_sgls;
1994 io_log->cached_sgs = qedi->cached_sgls;
1995 io_log->slow_sgs = qedi->slow_sgls;
1996 io_log->cached_sge = qedi->use_cached_sge;
1997 io_log->slow_sge = qedi->use_slow_sge;
1998 io_log->fast_sge = qedi->use_fast_sge;
1999 io_log->result = sc_cmd->result;
2000 io_log->jiffies = jiffies;
2001 io_log->blk_req_cpu = smp_processor_id();
2002
2003 if (direction == QEDI_IO_TRACE_REQ) {
2004 /* For requests we only care about the submission CPU */
2005 io_log->req_cpu = smp_processor_id() % qedi->num_queues;
2006 io_log->intr_cpu = 0;
2007 io_log->blk_rsp_cpu = 0;
2008 } else if (direction == QEDI_IO_TRACE_RSP) {
2009 io_log->req_cpu = smp_processor_id() % qedi->num_queues;
2010 io_log->intr_cpu = qedi->intr_cpu;
2011 io_log->blk_rsp_cpu = smp_processor_id();
2012 }
2013
2014 qedi->io_trace_idx++;
2015 if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE)
2016 qedi->io_trace_idx = 0;
2017
2018 qedi->use_cached_sge = false;
2019 qedi->use_slow_sge = false;
2020 qedi->use_fast_sge = false;
2021
2022 spin_unlock_irqrestore(&qedi->io_trace_lock, flags);
2023 }
2024
qedi_iscsi_send_ioreq(struct iscsi_task * task)2025 int qedi_iscsi_send_ioreq(struct iscsi_task *task)
2026 {
2027 struct iscsi_conn *conn = task->conn;
2028 struct iscsi_session *session = conn->session;
2029 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
2030 struct qedi_ctx *qedi = iscsi_host_priv(shost);
2031 struct qedi_conn *qedi_conn = conn->dd_data;
2032 struct qedi_cmd *cmd = task->dd_data;
2033 struct scsi_cmnd *sc = task->sc;
2034 struct iscsi_cmd_hdr cmd_pdu_header;
2035 struct scsi_sgl_task_params tx_sgl_task_params;
2036 struct scsi_sgl_task_params rx_sgl_task_params;
2037 struct scsi_sgl_task_params *prx_sgl = NULL;
2038 struct scsi_sgl_task_params *ptx_sgl = NULL;
2039 struct iscsi_task_params task_params;
2040 struct iscsi_conn_params conn_params;
2041 struct scsi_initiator_cmd_params cmd_params;
2042 struct e4_iscsi_task_context *fw_task_ctx;
2043 struct iscsi_cls_conn *cls_conn;
2044 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
2045 enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
2046 struct qedi_endpoint *ep;
2047 u32 scsi_lun[2];
2048 s16 tid = 0;
2049 u16 sq_idx = 0;
2050 u16 cq_idx;
2051 int rval = 0;
2052
2053 ep = qedi_conn->ep;
2054 cls_conn = qedi_conn->cls_conn;
2055 conn = cls_conn->dd_data;
2056
2057 qedi_iscsi_map_sg_list(cmd);
2058 int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
2059
2060 tid = qedi_get_task_idx(qedi);
2061 if (tid == -1)
2062 return -ENOMEM;
2063
2064 fw_task_ctx =
2065 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
2066 tid);
2067 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
2068
2069 cmd->task_id = tid;
2070
2071 memset(&task_params, 0, sizeof(task_params));
2072 memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
2073 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
2074 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
2075 memset(&conn_params, 0, sizeof(conn_params));
2076 memset(&cmd_params, 0, sizeof(cmd_params));
2077
2078 cq_idx = smp_processor_id() % qedi->num_queues;
2079 /* Update header info */
2080 SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
2081 ISCSI_ATTR_SIMPLE);
2082 if (hdr->cdb[0] != TEST_UNIT_READY) {
2083 if (sc->sc_data_direction == DMA_TO_DEVICE) {
2084 SET_FIELD(cmd_pdu_header.flags_attr,
2085 ISCSI_CMD_HDR_WRITE, 1);
2086 task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
2087 } else {
2088 SET_FIELD(cmd_pdu_header.flags_attr,
2089 ISCSI_CMD_HDR_READ, 1);
2090 task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
2091 }
2092 }
2093
2094 cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
2095 cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
2096
2097 qedi_update_itt_map(qedi, tid, task->itt, cmd);
2098 cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
2099 cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
2100 cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
2101 cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
2102 cmd_pdu_header.hdr_first_byte = hdr->opcode;
2103 qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
2104
2105 /* Fill tx AHS and rx buffer */
2106 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
2107 tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
2108 tx_sgl_task_params.sgl_phys_addr.lo =
2109 (u32)(cmd->io_tbl.sge_tbl_dma);
2110 tx_sgl_task_params.sgl_phys_addr.hi =
2111 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2112 tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
2113 tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
2114 if (cmd->use_slowpath)
2115 tx_sgl_task_params.small_mid_sge = true;
2116 } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
2117 rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
2118 rx_sgl_task_params.sgl_phys_addr.lo =
2119 (u32)(cmd->io_tbl.sge_tbl_dma);
2120 rx_sgl_task_params.sgl_phys_addr.hi =
2121 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
2122 rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
2123 rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
2124 }
2125
2126 /* Add conn param */
2127 conn_params.first_burst_length = conn->session->first_burst;
2128 conn_params.max_send_pdu_length = conn->max_xmit_dlength;
2129 conn_params.max_burst_length = conn->session->max_burst;
2130 if (conn->session->initial_r2t_en)
2131 conn_params.initial_r2t = true;
2132 if (conn->session->imm_data_en)
2133 conn_params.immediate_data = true;
2134
2135 /* Add cmd params */
2136 cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
2137 cmd_params.sense_data_buffer_phys_addr.hi =
2138 (u32)((u64)cmd->sense_buffer_dma >> 32);
2139 /* Fill fw input params */
2140 task_params.context = fw_task_ctx;
2141 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
2142 task_params.itid = tid;
2143 task_params.cq_rss_number = cq_idx;
2144 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
2145 task_params.tx_io_size = scsi_bufflen(sc);
2146 else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
2147 task_params.rx_io_size = scsi_bufflen(sc);
2148
2149 sq_idx = qedi_get_wqe_idx(qedi_conn);
2150 task_params.sqe = &ep->sq[sq_idx];
2151
2152 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
2153 "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n",
2154 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
2155 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
2156 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
2157 (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
2158 (u32)(cmd->io_tbl.sge_tbl_dma),
2159 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
2160
2161 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
2162
2163 if (task_params.tx_io_size != 0)
2164 ptx_sgl = &tx_sgl_task_params;
2165 if (task_params.rx_io_size != 0)
2166 prx_sgl = &rx_sgl_task_params;
2167
2168 rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
2169 &cmd_params, &cmd_pdu_header,
2170 ptx_sgl, prx_sgl,
2171 NULL);
2172 if (rval)
2173 return -1;
2174
2175 spin_lock(&qedi_conn->list_lock);
2176 list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
2177 cmd->io_cmd_in_list = true;
2178 qedi_conn->active_cmd_count++;
2179 spin_unlock(&qedi_conn->list_lock);
2180
2181 qedi_ring_doorbell(qedi_conn);
2182 return 0;
2183 }
2184
qedi_iscsi_cleanup_task(struct iscsi_task * task,bool mark_cmd_node_deleted)2185 int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
2186 {
2187 struct iscsi_task_params task_params;
2188 struct qedi_endpoint *ep;
2189 struct iscsi_conn *conn = task->conn;
2190 struct qedi_conn *qedi_conn = conn->dd_data;
2191 struct qedi_cmd *cmd = task->dd_data;
2192 u16 sq_idx = 0;
2193 int rval = 0;
2194
2195 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
2196 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
2197 cmd->task_id, get_itt(task->itt), task->state,
2198 cmd->state, qedi_conn->iscsi_conn_id);
2199
2200 memset(&task_params, 0, sizeof(task_params));
2201 ep = qedi_conn->ep;
2202
2203 sq_idx = qedi_get_wqe_idx(qedi_conn);
2204
2205 task_params.sqe = &ep->sq[sq_idx];
2206 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
2207 task_params.itid = cmd->task_id;
2208
2209 rval = init_cleanup_task(&task_params);
2210 if (rval)
2211 return rval;
2212
2213 qedi_ring_doorbell(qedi_conn);
2214 return 0;
2215 }
2216