1 /*
2 * QLogic FCoE Offload Driver
3 * Copyright (c) 2016-2017 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9 #include "qedf.h"
10
11 /* It's assumed that the lock is held when calling this function. */
qedf_initiate_els(struct qedf_rport * fcport,unsigned int op,void * data,uint32_t data_len,void (* cb_func)(struct qedf_els_cb_arg * cb_arg),struct qedf_els_cb_arg * cb_arg,uint32_t timer_msec)12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 void *data, uint32_t data_len,
14 void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
16 {
17 struct qedf_ctx *qedf = fcport->qedf;
18 struct fc_lport *lport = qedf->lport;
19 struct qedf_ioreq *els_req;
20 struct qedf_mp_req *mp_req;
21 struct fc_frame_header *fc_hdr;
22 struct fcoe_task_context *task;
23 int rc = 0;
24 uint32_t did, sid;
25 uint16_t xid;
26 struct fcoe_wqe *sqe;
27 unsigned long flags;
28 u16 sqe_idx;
29
30 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
31
32 rc = fc_remote_port_chkready(fcport->rport);
33 if (rc) {
34 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
35 rc = -EAGAIN;
36 goto els_err;
37 }
38 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
39 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
40 op);
41 rc = -EAGAIN;
42 goto els_err;
43 }
44
45 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
46 QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
47 rc = -EINVAL;
48 goto els_err;
49 }
50
51 els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
52 if (!els_req) {
53 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
54 "Failed to alloc ELS request 0x%x\n", op);
55 rc = -ENOMEM;
56 goto els_err;
57 }
58
59 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
60 "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
61 els_req->xid);
62 els_req->sc_cmd = NULL;
63 els_req->cmd_type = QEDF_ELS;
64 els_req->fcport = fcport;
65 els_req->cb_func = cb_func;
66 cb_arg->io_req = els_req;
67 cb_arg->op = op;
68 els_req->cb_arg = cb_arg;
69 els_req->data_xfer_len = data_len;
70
71 /* Record which cpu this request is associated with */
72 els_req->cpu = smp_processor_id();
73
74 mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
75 rc = qedf_init_mp_req(els_req);
76 if (rc) {
77 QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
78 kref_put(&els_req->refcount, qedf_release_cmd);
79 goto els_err;
80 } else {
81 rc = 0;
82 }
83
84 /* Fill ELS Payload */
85 if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
86 memcpy(mp_req->req_buf, data, data_len);
87 } else {
88 QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
89 els_req->cb_func = NULL;
90 els_req->cb_arg = NULL;
91 kref_put(&els_req->refcount, qedf_release_cmd);
92 rc = -EINVAL;
93 }
94
95 if (rc)
96 goto els_err;
97
98 /* Fill FC header */
99 fc_hdr = &(mp_req->req_fc_hdr);
100
101 did = fcport->rdata->ids.port_id;
102 sid = fcport->sid;
103
104 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
105 FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
106 FC_FC_SEQ_INIT, 0);
107
108 /* Obtain exchange id */
109 xid = els_req->xid;
110
111 spin_lock_irqsave(&fcport->rport_lock, flags);
112
113 sqe_idx = qedf_get_sqe_idx(fcport);
114 sqe = &fcport->sq[sqe_idx];
115 memset(sqe, 0, sizeof(struct fcoe_wqe));
116
117 /* Initialize task context for this IO request */
118 task = qedf_get_task_mem(&qedf->tasks, xid);
119 qedf_init_mp_task(els_req, task, sqe);
120
121 /* Put timer on original I/O request */
122 if (timer_msec)
123 qedf_cmd_timer_set(qedf, els_req, timer_msec);
124
125 /* Ring doorbell */
126 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
127 "req\n");
128 qedf_ring_doorbell(fcport);
129 spin_unlock_irqrestore(&fcport->rport_lock, flags);
130 els_err:
131 return rc;
132 }
133
qedf_process_els_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * els_req)134 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
135 struct qedf_ioreq *els_req)
136 {
137 struct fcoe_task_context *task_ctx;
138 struct scsi_cmnd *sc_cmd;
139 uint16_t xid;
140 struct fcoe_cqe_midpath_info *mp_info;
141
142 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
143 " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
144
145 /* Kill the ELS timer */
146 cancel_delayed_work(&els_req->timeout_work);
147
148 xid = els_req->xid;
149 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
150 sc_cmd = els_req->sc_cmd;
151
152 /* Get ELS response length from CQE */
153 mp_info = &cqe->cqe_info.midpath_info;
154 els_req->mp_req.resp_len = mp_info->data_placement_size;
155
156 /* Parse ELS response */
157 if ((els_req->cb_func) && (els_req->cb_arg)) {
158 els_req->cb_func(els_req->cb_arg);
159 els_req->cb_arg = NULL;
160 }
161
162 kref_put(&els_req->refcount, qedf_release_cmd);
163 }
164
qedf_rrq_compl(struct qedf_els_cb_arg * cb_arg)165 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
166 {
167 struct qedf_ioreq *orig_io_req;
168 struct qedf_ioreq *rrq_req;
169 struct qedf_ctx *qedf;
170 int refcount;
171
172 rrq_req = cb_arg->io_req;
173 qedf = rrq_req->fcport->qedf;
174
175 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
176
177 orig_io_req = cb_arg->aborted_io_req;
178
179 if (!orig_io_req)
180 goto out_free;
181
182 if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
183 rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
184 cancel_delayed_work_sync(&orig_io_req->timeout_work);
185
186 refcount = kref_read(&orig_io_req->refcount);
187 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
188 " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
189 orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
190
191 /* This should return the aborted io_req to the command pool */
192 if (orig_io_req)
193 kref_put(&orig_io_req->refcount, qedf_release_cmd);
194
195 out_free:
196 kfree(cb_arg);
197 }
198
199 /* Assumes kref is already held by caller */
qedf_send_rrq(struct qedf_ioreq * aborted_io_req)200 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
201 {
202
203 struct fc_els_rrq rrq;
204 struct qedf_rport *fcport;
205 struct fc_lport *lport;
206 struct qedf_els_cb_arg *cb_arg = NULL;
207 struct qedf_ctx *qedf;
208 uint32_t sid;
209 uint32_t r_a_tov;
210 int rc;
211
212 if (!aborted_io_req) {
213 QEDF_ERR(NULL, "abort_io_req is NULL.\n");
214 return -EINVAL;
215 }
216
217 fcport = aborted_io_req->fcport;
218
219 /* Check that fcport is still offloaded */
220 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
221 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
222 return -EINVAL;
223 }
224
225 if (!fcport->qedf) {
226 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
227 return -EINVAL;
228 }
229
230 qedf = fcport->qedf;
231 lport = qedf->lport;
232 sid = fcport->sid;
233 r_a_tov = lport->r_a_tov;
234
235 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
236 "io = %p, orig_xid = 0x%x\n", aborted_io_req,
237 aborted_io_req->xid);
238 memset(&rrq, 0, sizeof(rrq));
239
240 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
241 if (!cb_arg) {
242 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
243 "RRQ\n");
244 rc = -ENOMEM;
245 goto rrq_err;
246 }
247
248 cb_arg->aborted_io_req = aborted_io_req;
249
250 rrq.rrq_cmd = ELS_RRQ;
251 hton24(rrq.rrq_s_id, sid);
252 rrq.rrq_ox_id = htons(aborted_io_req->xid);
253 rrq.rrq_rx_id =
254 htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
255
256 rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
257 qedf_rrq_compl, cb_arg, r_a_tov);
258
259 rrq_err:
260 if (rc) {
261 QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
262 "req 0x%x\n", aborted_io_req->xid);
263 kfree(cb_arg);
264 kref_put(&aborted_io_req->refcount, qedf_release_cmd);
265 }
266 return rc;
267 }
268
qedf_process_l2_frame_compl(struct qedf_rport * fcport,struct fc_frame * fp,u16 l2_oxid)269 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
270 struct fc_frame *fp,
271 u16 l2_oxid)
272 {
273 struct fc_lport *lport = fcport->qedf->lport;
274 struct fc_frame_header *fh;
275 u32 crc;
276
277 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
278
279 /* Set the OXID we return to what libfc used */
280 if (l2_oxid != FC_XID_UNKNOWN)
281 fh->fh_ox_id = htons(l2_oxid);
282
283 /* Setup header fields */
284 fh->fh_r_ctl = FC_RCTL_ELS_REP;
285 fh->fh_type = FC_TYPE_ELS;
286 /* Last sequence, end sequence */
287 fh->fh_f_ctl[0] = 0x98;
288 hton24(fh->fh_d_id, lport->port_id);
289 hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
290 fh->fh_rx_id = 0xffff;
291
292 /* Set frame attributes */
293 crc = fcoe_fc_crc(fp);
294 fc_frame_init(fp);
295 fr_dev(fp) = lport;
296 fr_sof(fp) = FC_SOF_I3;
297 fr_eof(fp) = FC_EOF_T;
298 fr_crc(fp) = cpu_to_le32(~crc);
299
300 /* Send completed request to libfc */
301 fc_exch_recv(lport, fp);
302 }
303
304 /*
305 * In instances where an ELS command times out we may need to restart the
306 * rport by logging out and then logging back in.
307 */
qedf_restart_rport(struct qedf_rport * fcport)308 void qedf_restart_rport(struct qedf_rport *fcport)
309 {
310 struct fc_lport *lport;
311 struct fc_rport_priv *rdata;
312 u32 port_id;
313
314 if (!fcport)
315 return;
316
317 rdata = fcport->rdata;
318 if (rdata) {
319 lport = fcport->qedf->lport;
320 port_id = rdata->ids.port_id;
321 QEDF_ERR(&(fcport->qedf->dbg_ctx),
322 "LOGO port_id=%x.\n", port_id);
323 fc_rport_logoff(rdata);
324 /* Recreate the rport and log back in */
325 rdata = fc_rport_create(lport, port_id);
326 if (rdata)
327 fc_rport_login(rdata);
328 }
329 }
330
qedf_l2_els_compl(struct qedf_els_cb_arg * cb_arg)331 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
332 {
333 struct qedf_ioreq *els_req;
334 struct qedf_rport *fcport;
335 struct qedf_mp_req *mp_req;
336 struct fc_frame *fp;
337 struct fc_frame_header *fh, *mp_fc_hdr;
338 void *resp_buf, *fc_payload;
339 u32 resp_len;
340 u16 l2_oxid;
341
342 l2_oxid = cb_arg->l2_oxid;
343 els_req = cb_arg->io_req;
344
345 if (!els_req) {
346 QEDF_ERR(NULL, "els_req is NULL.\n");
347 goto free_arg;
348 }
349
350 /*
351 * If we are flushing the command just free the cb_arg as none of the
352 * response data will be valid.
353 */
354 if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
355 goto free_arg;
356
357 fcport = els_req->fcport;
358 mp_req = &(els_req->mp_req);
359 mp_fc_hdr = &(mp_req->resp_fc_hdr);
360 resp_len = mp_req->resp_len;
361 resp_buf = mp_req->resp_buf;
362
363 /*
364 * If a middle path ELS command times out, don't try to return
365 * the command but rather do any internal cleanup and then libfc
366 * timeout the command and clean up its internal resources.
367 */
368 if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
369 /*
370 * If ADISC times out, libfc will timeout the exchange and then
371 * try to send a PLOGI which will timeout since the session is
372 * still offloaded. Force libfc to logout the session which
373 * will offload the connection and allow the PLOGI response to
374 * flow over the LL2 path.
375 */
376 if (cb_arg->op == ELS_ADISC)
377 qedf_restart_rport(fcport);
378 return;
379 }
380
381 if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
382 QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
383 "beyond page size.\n");
384 goto free_arg;
385 }
386
387 fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
388 if (!fp) {
389 QEDF_ERR(&(fcport->qedf->dbg_ctx),
390 "fc_frame_alloc failure.\n");
391 return;
392 }
393
394 /* Copy frame header from firmware into fp */
395 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
396 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
397
398 /* Copy payload from firmware into fp */
399 fc_payload = fc_frame_payload_get(fp, resp_len);
400 memcpy(fc_payload, resp_buf, resp_len);
401
402 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
403 "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
404 qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
405
406 free_arg:
407 kfree(cb_arg);
408 }
409
qedf_send_adisc(struct qedf_rport * fcport,struct fc_frame * fp)410 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
411 {
412 struct fc_els_adisc *adisc;
413 struct fc_frame_header *fh;
414 struct fc_lport *lport = fcport->qedf->lport;
415 struct qedf_els_cb_arg *cb_arg = NULL;
416 struct qedf_ctx *qedf;
417 uint32_t r_a_tov = lport->r_a_tov;
418 int rc;
419
420 qedf = fcport->qedf;
421 fh = fc_frame_header_get(fp);
422
423 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
424 if (!cb_arg) {
425 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
426 "ADISC\n");
427 rc = -ENOMEM;
428 goto adisc_err;
429 }
430 cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
431
432 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
433 "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
434
435 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
436
437 rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
438 qedf_l2_els_compl, cb_arg, r_a_tov);
439
440 adisc_err:
441 if (rc) {
442 QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
443 kfree(cb_arg);
444 }
445 return rc;
446 }
447
qedf_srr_compl(struct qedf_els_cb_arg * cb_arg)448 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
449 {
450 struct qedf_ioreq *orig_io_req;
451 struct qedf_ioreq *srr_req;
452 struct qedf_mp_req *mp_req;
453 struct fc_frame_header *mp_fc_hdr, *fh;
454 struct fc_frame *fp;
455 void *resp_buf, *fc_payload;
456 u32 resp_len;
457 struct fc_lport *lport;
458 struct qedf_ctx *qedf;
459 int refcount;
460 u8 opcode;
461
462 srr_req = cb_arg->io_req;
463 qedf = srr_req->fcport->qedf;
464 lport = qedf->lport;
465
466 orig_io_req = cb_arg->aborted_io_req;
467
468 if (!orig_io_req)
469 goto out_free;
470
471 clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
472
473 if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
474 srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
475 cancel_delayed_work_sync(&orig_io_req->timeout_work);
476
477 refcount = kref_read(&orig_io_req->refcount);
478 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
479 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
480 orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
481
482 /* If a SRR times out, simply free resources */
483 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
484 goto out_put;
485
486 /* Normalize response data into struct fc_frame */
487 mp_req = &(srr_req->mp_req);
488 mp_fc_hdr = &(mp_req->resp_fc_hdr);
489 resp_len = mp_req->resp_len;
490 resp_buf = mp_req->resp_buf;
491
492 fp = fc_frame_alloc(lport, resp_len);
493 if (!fp) {
494 QEDF_ERR(&(qedf->dbg_ctx),
495 "fc_frame_alloc failure.\n");
496 goto out_put;
497 }
498
499 /* Copy frame header from firmware into fp */
500 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
501 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
502
503 /* Copy payload from firmware into fp */
504 fc_payload = fc_frame_payload_get(fp, resp_len);
505 memcpy(fc_payload, resp_buf, resp_len);
506
507 opcode = fc_frame_payload_op(fp);
508 switch (opcode) {
509 case ELS_LS_ACC:
510 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
511 "SRR success.\n");
512 break;
513 case ELS_LS_RJT:
514 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
515 "SRR rejected.\n");
516 qedf_initiate_abts(orig_io_req, true);
517 break;
518 }
519
520 fc_frame_free(fp);
521 out_put:
522 /* Put reference for original command since SRR completed */
523 kref_put(&orig_io_req->refcount, qedf_release_cmd);
524 out_free:
525 kfree(cb_arg);
526 }
527
qedf_send_srr(struct qedf_ioreq * orig_io_req,u32 offset,u8 r_ctl)528 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
529 {
530 struct fcp_srr srr;
531 struct qedf_ctx *qedf;
532 struct qedf_rport *fcport;
533 struct fc_lport *lport;
534 struct qedf_els_cb_arg *cb_arg = NULL;
535 u32 sid, r_a_tov;
536 int rc;
537
538 if (!orig_io_req) {
539 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
540 return -EINVAL;
541 }
542
543 fcport = orig_io_req->fcport;
544
545 /* Check that fcport is still offloaded */
546 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
547 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
548 return -EINVAL;
549 }
550
551 if (!fcport->qedf) {
552 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
553 return -EINVAL;
554 }
555
556 /* Take reference until SRR command completion */
557 kref_get(&orig_io_req->refcount);
558
559 qedf = fcport->qedf;
560 lport = qedf->lport;
561 sid = fcport->sid;
562 r_a_tov = lport->r_a_tov;
563
564 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
565 "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
566 memset(&srr, 0, sizeof(srr));
567
568 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
569 if (!cb_arg) {
570 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
571 "SRR\n");
572 rc = -ENOMEM;
573 goto srr_err;
574 }
575
576 cb_arg->aborted_io_req = orig_io_req;
577
578 srr.srr_op = ELS_SRR;
579 srr.srr_ox_id = htons(orig_io_req->xid);
580 srr.srr_rx_id = htons(orig_io_req->rx_id);
581 srr.srr_rel_off = htonl(offset);
582 srr.srr_r_ctl = r_ctl;
583
584 rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
585 qedf_srr_compl, cb_arg, r_a_tov);
586
587 srr_err:
588 if (rc) {
589 QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
590 "=0x%x\n", orig_io_req->xid);
591 kfree(cb_arg);
592 /* If we fail to queue SRR, send ABTS to orig_io */
593 qedf_initiate_abts(orig_io_req, true);
594 kref_put(&orig_io_req->refcount, qedf_release_cmd);
595 } else
596 /* Tell other threads that SRR is in progress */
597 set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
598
599 return rc;
600 }
601
qedf_initiate_seq_cleanup(struct qedf_ioreq * orig_io_req,u32 offset,u8 r_ctl)602 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
603 u32 offset, u8 r_ctl)
604 {
605 struct qedf_rport *fcport;
606 unsigned long flags;
607 struct qedf_els_cb_arg *cb_arg;
608 struct fcoe_wqe *sqe;
609 u16 sqe_idx;
610
611 fcport = orig_io_req->fcport;
612
613 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
614 "Doing sequence cleanup for xid=0x%x offset=%u.\n",
615 orig_io_req->xid, offset);
616
617 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
618 if (!cb_arg) {
619 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
620 "for sequence cleanup\n");
621 return;
622 }
623
624 /* Get reference for cleanup request */
625 kref_get(&orig_io_req->refcount);
626
627 orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
628 cb_arg->offset = offset;
629 cb_arg->r_ctl = r_ctl;
630 orig_io_req->cb_arg = cb_arg;
631
632 qedf_cmd_timer_set(fcport->qedf, orig_io_req,
633 QEDF_CLEANUP_TIMEOUT * HZ);
634
635 spin_lock_irqsave(&fcport->rport_lock, flags);
636
637 sqe_idx = qedf_get_sqe_idx(fcport);
638 sqe = &fcport->sq[sqe_idx];
639 memset(sqe, 0, sizeof(struct fcoe_wqe));
640 orig_io_req->task_params->sqe = sqe;
641
642 init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
643 offset);
644 qedf_ring_doorbell(fcport);
645
646 spin_unlock_irqrestore(&fcport->rport_lock, flags);
647 }
648
qedf_process_seq_cleanup_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)649 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
650 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
651 {
652 int rc;
653 struct qedf_els_cb_arg *cb_arg;
654
655 cb_arg = io_req->cb_arg;
656
657 /* If we timed out just free resources */
658 if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
659 goto free;
660
661 /* Kill the timer we put on the request */
662 cancel_delayed_work_sync(&io_req->timeout_work);
663
664 rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
665 if (rc)
666 QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
667 "abort, xid=0x%x.\n", io_req->xid);
668 free:
669 kfree(cb_arg);
670 kref_put(&io_req->refcount, qedf_release_cmd);
671 }
672
qedf_requeue_io_req(struct qedf_ioreq * orig_io_req)673 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
674 {
675 struct qedf_rport *fcport;
676 struct qedf_ioreq *new_io_req;
677 unsigned long flags;
678 bool rc = false;
679
680 fcport = orig_io_req->fcport;
681 if (!fcport) {
682 QEDF_ERR(NULL, "fcport is NULL.\n");
683 goto out;
684 }
685
686 if (!orig_io_req->sc_cmd) {
687 QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
688 "xid=0x%x.\n", orig_io_req->xid);
689 goto out;
690 }
691
692 new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
693 if (!new_io_req) {
694 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
695 "io_req.\n");
696 goto out;
697 }
698
699 new_io_req->sc_cmd = orig_io_req->sc_cmd;
700
701 /*
702 * This keeps the sc_cmd struct from being returned to the tape
703 * driver and being requeued twice. We do need to put a reference
704 * for the original I/O request since we will not do a SCSI completion
705 * for it.
706 */
707 orig_io_req->sc_cmd = NULL;
708 kref_put(&orig_io_req->refcount, qedf_release_cmd);
709
710 spin_lock_irqsave(&fcport->rport_lock, flags);
711
712 /* kref for new command released in qedf_post_io_req on error */
713 if (qedf_post_io_req(fcport, new_io_req)) {
714 QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
715 /* Return SQE to pool */
716 atomic_inc(&fcport->free_sqes);
717 } else {
718 QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
719 "Reissued SCSI command from orig_xid=0x%x on "
720 "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
721 /*
722 * Abort the original I/O but do not return SCSI command as
723 * it has been reissued on another OX_ID.
724 */
725 spin_unlock_irqrestore(&fcport->rport_lock, flags);
726 qedf_initiate_abts(orig_io_req, false);
727 goto out;
728 }
729
730 spin_unlock_irqrestore(&fcport->rport_lock, flags);
731 out:
732 return rc;
733 }
734
735
qedf_rec_compl(struct qedf_els_cb_arg * cb_arg)736 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
737 {
738 struct qedf_ioreq *orig_io_req;
739 struct qedf_ioreq *rec_req;
740 struct qedf_mp_req *mp_req;
741 struct fc_frame_header *mp_fc_hdr, *fh;
742 struct fc_frame *fp;
743 void *resp_buf, *fc_payload;
744 u32 resp_len;
745 struct fc_lport *lport;
746 struct qedf_ctx *qedf;
747 int refcount;
748 enum fc_rctl r_ctl;
749 struct fc_els_ls_rjt *rjt;
750 struct fc_els_rec_acc *acc;
751 u8 opcode;
752 u32 offset, e_stat;
753 struct scsi_cmnd *sc_cmd;
754 bool srr_needed = false;
755
756 rec_req = cb_arg->io_req;
757 qedf = rec_req->fcport->qedf;
758 lport = qedf->lport;
759
760 orig_io_req = cb_arg->aborted_io_req;
761
762 if (!orig_io_req)
763 goto out_free;
764
765 if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
766 rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
767 cancel_delayed_work_sync(&orig_io_req->timeout_work);
768
769 refcount = kref_read(&orig_io_req->refcount);
770 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
771 " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
772 orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
773
774 /* If a REC times out, free resources */
775 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
776 goto out_put;
777
778 /* Normalize response data into struct fc_frame */
779 mp_req = &(rec_req->mp_req);
780 mp_fc_hdr = &(mp_req->resp_fc_hdr);
781 resp_len = mp_req->resp_len;
782 acc = resp_buf = mp_req->resp_buf;
783
784 fp = fc_frame_alloc(lport, resp_len);
785 if (!fp) {
786 QEDF_ERR(&(qedf->dbg_ctx),
787 "fc_frame_alloc failure.\n");
788 goto out_put;
789 }
790
791 /* Copy frame header from firmware into fp */
792 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
793 memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
794
795 /* Copy payload from firmware into fp */
796 fc_payload = fc_frame_payload_get(fp, resp_len);
797 memcpy(fc_payload, resp_buf, resp_len);
798
799 opcode = fc_frame_payload_op(fp);
800 if (opcode == ELS_LS_RJT) {
801 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
802 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
803 "Received LS_RJT for REC: er_reason=0x%x, "
804 "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
805 /*
806 * The following response(s) mean that we need to reissue the
807 * request on another exchange. We need to do this without
808 * informing the upper layers lest it cause an application
809 * error.
810 */
811 if ((rjt->er_reason == ELS_RJT_LOGIC ||
812 rjt->er_reason == ELS_RJT_UNAB) &&
813 rjt->er_explan == ELS_EXPL_OXID_RXID) {
814 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
815 "Handle CMD LOST case.\n");
816 qedf_requeue_io_req(orig_io_req);
817 }
818 } else if (opcode == ELS_LS_ACC) {
819 offset = ntohl(acc->reca_fc4value);
820 e_stat = ntohl(acc->reca_e_stat);
821 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
822 "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
823 offset, e_stat);
824 if (e_stat & ESB_ST_SEQ_INIT) {
825 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
826 "Target has the seq init\n");
827 goto out_free_frame;
828 }
829 sc_cmd = orig_io_req->sc_cmd;
830 if (!sc_cmd) {
831 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
832 "sc_cmd is NULL for xid=0x%x.\n",
833 orig_io_req->xid);
834 goto out_free_frame;
835 }
836 /* SCSI write case */
837 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
838 if (offset == orig_io_req->data_xfer_len) {
839 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
840 "WRITE - response lost.\n");
841 r_ctl = FC_RCTL_DD_CMD_STATUS;
842 srr_needed = true;
843 offset = 0;
844 } else {
845 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
846 "WRITE - XFER_RDY/DATA lost.\n");
847 r_ctl = FC_RCTL_DD_DATA_DESC;
848 /* Use data from warning CQE instead of REC */
849 offset = orig_io_req->tx_buf_off;
850 }
851 /* SCSI read case */
852 } else {
853 if (orig_io_req->rx_buf_off ==
854 orig_io_req->data_xfer_len) {
855 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
856 "READ - response lost.\n");
857 srr_needed = true;
858 r_ctl = FC_RCTL_DD_CMD_STATUS;
859 offset = 0;
860 } else {
861 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
862 "READ - DATA lost.\n");
863 /*
864 * For read case we always set the offset to 0
865 * for sequence recovery task.
866 */
867 offset = 0;
868 r_ctl = FC_RCTL_DD_SOL_DATA;
869 }
870 }
871
872 if (srr_needed)
873 qedf_send_srr(orig_io_req, offset, r_ctl);
874 else
875 qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
876 }
877
878 out_free_frame:
879 fc_frame_free(fp);
880 out_put:
881 /* Put reference for original command since REC completed */
882 kref_put(&orig_io_req->refcount, qedf_release_cmd);
883 out_free:
884 kfree(cb_arg);
885 }
886
887 /* Assumes kref is already held by caller */
qedf_send_rec(struct qedf_ioreq * orig_io_req)888 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
889 {
890
891 struct fc_els_rec rec;
892 struct qedf_rport *fcport;
893 struct fc_lport *lport;
894 struct qedf_els_cb_arg *cb_arg = NULL;
895 struct qedf_ctx *qedf;
896 uint32_t sid;
897 uint32_t r_a_tov;
898 int rc;
899
900 if (!orig_io_req) {
901 QEDF_ERR(NULL, "orig_io_req is NULL.\n");
902 return -EINVAL;
903 }
904
905 fcport = orig_io_req->fcport;
906
907 /* Check that fcport is still offloaded */
908 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
909 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
910 return -EINVAL;
911 }
912
913 if (!fcport->qedf) {
914 QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
915 return -EINVAL;
916 }
917
918 /* Take reference until REC command completion */
919 kref_get(&orig_io_req->refcount);
920
921 qedf = fcport->qedf;
922 lport = qedf->lport;
923 sid = fcport->sid;
924 r_a_tov = lport->r_a_tov;
925
926 memset(&rec, 0, sizeof(rec));
927
928 cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
929 if (!cb_arg) {
930 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
931 "REC\n");
932 rc = -ENOMEM;
933 goto rec_err;
934 }
935
936 cb_arg->aborted_io_req = orig_io_req;
937
938 rec.rec_cmd = ELS_REC;
939 hton24(rec.rec_s_id, sid);
940 rec.rec_ox_id = htons(orig_io_req->xid);
941 rec.rec_rx_id =
942 htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
943
944 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
945 "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
946 orig_io_req->xid, rec.rec_rx_id);
947 rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
948 qedf_rec_compl, cb_arg, r_a_tov);
949
950 rec_err:
951 if (rc) {
952 QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
953 "=0x%x\n", orig_io_req->xid);
954 kfree(cb_arg);
955 kref_put(&orig_io_req->refcount, qedf_release_cmd);
956 }
957 return rc;
958 }
959