• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  QLogic FCoE Offload Driver
3  *  Copyright (c) 2016-2018 Cavium Inc.
4  *
5  *  This software is available under the terms of the GNU General Public License
6  *  (GPL) Version 2, available from the file COPYING in the main directory of
7  *  this source tree.
8  */
9 #include "qedf.h"
10 
11 /* It's assumed that the lock is held when calling this function. */
qedf_initiate_els(struct qedf_rport * fcport,unsigned int op,void * data,uint32_t data_len,void (* cb_func)(struct qedf_els_cb_arg * cb_arg),struct qedf_els_cb_arg * cb_arg,uint32_t timer_msec)12 static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
13 	void *data, uint32_t data_len,
14 	void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
15 	struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
16 {
17 	struct qedf_ctx *qedf;
18 	struct fc_lport *lport;
19 	struct qedf_ioreq *els_req;
20 	struct qedf_mp_req *mp_req;
21 	struct fc_frame_header *fc_hdr;
22 	struct e4_fcoe_task_context *task;
23 	int rc = 0;
24 	uint32_t did, sid;
25 	uint16_t xid;
26 	struct fcoe_wqe *sqe;
27 	unsigned long flags;
28 	u16 sqe_idx;
29 
30 	if (!fcport) {
31 		QEDF_ERR(NULL, "fcport is NULL");
32 		rc = -EINVAL;
33 		goto els_err;
34 	}
35 
36 	qedf = fcport->qedf;
37 	lport = qedf->lport;
38 
39 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
40 
41 	rc = fc_remote_port_chkready(fcport->rport);
42 	if (rc) {
43 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
44 		rc = -EAGAIN;
45 		goto els_err;
46 	}
47 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
48 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
49 			  op);
50 		rc = -EAGAIN;
51 		goto els_err;
52 	}
53 
54 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
55 		QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
56 		rc = -EINVAL;
57 		goto els_err;
58 	}
59 
60 	els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
61 	if (!els_req) {
62 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
63 			  "Failed to alloc ELS request 0x%x\n", op);
64 		rc = -ENOMEM;
65 		goto els_err;
66 	}
67 
68 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
69 		   "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
70 		   els_req->xid);
71 	els_req->sc_cmd = NULL;
72 	els_req->cmd_type = QEDF_ELS;
73 	els_req->fcport = fcport;
74 	els_req->cb_func = cb_func;
75 	cb_arg->io_req = els_req;
76 	cb_arg->op = op;
77 	els_req->cb_arg = cb_arg;
78 	els_req->data_xfer_len = data_len;
79 
80 	/* Record which cpu this request is associated with */
81 	els_req->cpu = smp_processor_id();
82 
83 	mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
84 	rc = qedf_init_mp_req(els_req);
85 	if (rc) {
86 		QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
87 		kref_put(&els_req->refcount, qedf_release_cmd);
88 		goto els_err;
89 	} else {
90 		rc = 0;
91 	}
92 
93 	/* Fill ELS Payload */
94 	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
95 		memcpy(mp_req->req_buf, data, data_len);
96 	} else {
97 		QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
98 		els_req->cb_func = NULL;
99 		els_req->cb_arg = NULL;
100 		kref_put(&els_req->refcount, qedf_release_cmd);
101 		rc = -EINVAL;
102 	}
103 
104 	if (rc)
105 		goto els_err;
106 
107 	/* Fill FC header */
108 	fc_hdr = &(mp_req->req_fc_hdr);
109 
110 	did = fcport->rdata->ids.port_id;
111 	sid = fcport->sid;
112 
113 	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
114 			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
115 			   FC_FC_SEQ_INIT, 0);
116 
117 	/* Obtain exchange id */
118 	xid = els_req->xid;
119 
120 	spin_lock_irqsave(&fcport->rport_lock, flags);
121 
122 	sqe_idx = qedf_get_sqe_idx(fcport);
123 	sqe = &fcport->sq[sqe_idx];
124 	memset(sqe, 0, sizeof(struct fcoe_wqe));
125 
126 	/* Initialize task context for this IO request */
127 	task = qedf_get_task_mem(&qedf->tasks, xid);
128 	qedf_init_mp_task(els_req, task, sqe);
129 
130 	/* Put timer on original I/O request */
131 	if (timer_msec)
132 		qedf_cmd_timer_set(qedf, els_req, timer_msec);
133 
134 	/* Ring doorbell */
135 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
136 		   "req\n");
137 	qedf_ring_doorbell(fcport);
138 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
139 els_err:
140 	return rc;
141 }
142 
qedf_process_els_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * els_req)143 void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
144 	struct qedf_ioreq *els_req)
145 {
146 	struct fcoe_task_context *task_ctx;
147 	struct scsi_cmnd *sc_cmd;
148 	uint16_t xid;
149 	struct fcoe_cqe_midpath_info *mp_info;
150 
151 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
152 		   " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
153 
154 	/* Kill the ELS timer */
155 	cancel_delayed_work(&els_req->timeout_work);
156 
157 	xid = els_req->xid;
158 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
159 	sc_cmd = els_req->sc_cmd;
160 
161 	/* Get ELS response length from CQE */
162 	mp_info = &cqe->cqe_info.midpath_info;
163 	els_req->mp_req.resp_len = mp_info->data_placement_size;
164 
165 	/* Parse ELS response */
166 	if ((els_req->cb_func) && (els_req->cb_arg)) {
167 		els_req->cb_func(els_req->cb_arg);
168 		els_req->cb_arg = NULL;
169 	}
170 
171 	kref_put(&els_req->refcount, qedf_release_cmd);
172 }
173 
qedf_rrq_compl(struct qedf_els_cb_arg * cb_arg)174 static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
175 {
176 	struct qedf_ioreq *orig_io_req;
177 	struct qedf_ioreq *rrq_req;
178 	struct qedf_ctx *qedf;
179 	int refcount;
180 
181 	rrq_req = cb_arg->io_req;
182 	qedf = rrq_req->fcport->qedf;
183 
184 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
185 
186 	orig_io_req = cb_arg->aborted_io_req;
187 
188 	if (!orig_io_req)
189 		goto out_free;
190 
191 	if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
192 	    rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
193 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
194 
195 	refcount = kref_read(&orig_io_req->refcount);
196 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
197 		   " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
198 		   orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
199 
200 	/* This should return the aborted io_req to the command pool */
201 	if (orig_io_req)
202 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
203 
204 out_free:
205 	/*
206 	 * Release a reference to the rrq request if we timed out as the
207 	 * rrq completion handler is called directly from the timeout handler
208 	 * and not from els_compl where the reference would have normally been
209 	 * released.
210 	 */
211 	if (rrq_req->event == QEDF_IOREQ_EV_ELS_TMO)
212 		kref_put(&rrq_req->refcount, qedf_release_cmd);
213 	kfree(cb_arg);
214 }
215 
216 /* Assumes kref is already held by caller */
qedf_send_rrq(struct qedf_ioreq * aborted_io_req)217 int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
218 {
219 
220 	struct fc_els_rrq rrq;
221 	struct qedf_rport *fcport;
222 	struct fc_lport *lport;
223 	struct qedf_els_cb_arg *cb_arg = NULL;
224 	struct qedf_ctx *qedf;
225 	uint32_t sid;
226 	uint32_t r_a_tov;
227 	int rc;
228 
229 	if (!aborted_io_req) {
230 		QEDF_ERR(NULL, "abort_io_req is NULL.\n");
231 		return -EINVAL;
232 	}
233 
234 	fcport = aborted_io_req->fcport;
235 
236 	/* Check that fcport is still offloaded */
237 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
238 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
239 		return -EINVAL;
240 	}
241 
242 	if (!fcport->qedf) {
243 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
244 		return -EINVAL;
245 	}
246 
247 	qedf = fcport->qedf;
248 	lport = qedf->lport;
249 	sid = fcport->sid;
250 	r_a_tov = lport->r_a_tov;
251 
252 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
253 		   "io = %p, orig_xid = 0x%x\n", aborted_io_req,
254 		   aborted_io_req->xid);
255 	memset(&rrq, 0, sizeof(rrq));
256 
257 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
258 	if (!cb_arg) {
259 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
260 			  "RRQ\n");
261 		rc = -ENOMEM;
262 		goto rrq_err;
263 	}
264 
265 	cb_arg->aborted_io_req = aborted_io_req;
266 
267 	rrq.rrq_cmd = ELS_RRQ;
268 	hton24(rrq.rrq_s_id, sid);
269 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
270 	rrq.rrq_rx_id =
271 	    htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
272 
273 	rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
274 	    qedf_rrq_compl, cb_arg, r_a_tov);
275 
276 rrq_err:
277 	if (rc) {
278 		QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
279 			  "req 0x%x\n", aborted_io_req->xid);
280 		kfree(cb_arg);
281 		kref_put(&aborted_io_req->refcount, qedf_release_cmd);
282 	}
283 	return rc;
284 }
285 
qedf_process_l2_frame_compl(struct qedf_rport * fcport,struct fc_frame * fp,u16 l2_oxid)286 static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
287 					struct fc_frame *fp,
288 					u16 l2_oxid)
289 {
290 	struct fc_lport *lport = fcport->qedf->lport;
291 	struct fc_frame_header *fh;
292 	u32 crc;
293 
294 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
295 
296 	/* Set the OXID we return to what libfc used */
297 	if (l2_oxid != FC_XID_UNKNOWN)
298 		fh->fh_ox_id = htons(l2_oxid);
299 
300 	/* Setup header fields */
301 	fh->fh_r_ctl = FC_RCTL_ELS_REP;
302 	fh->fh_type = FC_TYPE_ELS;
303 	/* Last sequence, end sequence */
304 	fh->fh_f_ctl[0] = 0x98;
305 	hton24(fh->fh_d_id, lport->port_id);
306 	hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
307 	fh->fh_rx_id = 0xffff;
308 
309 	/* Set frame attributes */
310 	crc = fcoe_fc_crc(fp);
311 	fc_frame_init(fp);
312 	fr_dev(fp) = lport;
313 	fr_sof(fp) = FC_SOF_I3;
314 	fr_eof(fp) = FC_EOF_T;
315 	fr_crc(fp) = cpu_to_le32(~crc);
316 
317 	/* Send completed request to libfc */
318 	fc_exch_recv(lport, fp);
319 }
320 
321 /*
322  * In instances where an ELS command times out we may need to restart the
323  * rport by logging out and then logging back in.
324  */
qedf_restart_rport(struct qedf_rport * fcport)325 void qedf_restart_rport(struct qedf_rport *fcport)
326 {
327 	struct fc_lport *lport;
328 	struct fc_rport_priv *rdata;
329 	u32 port_id;
330 
331 	if (!fcport)
332 		return;
333 
334 	if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
335 	    !test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
336 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
337 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
338 		    fcport);
339 		return;
340 	}
341 
342 	/* Set that we are now in reset */
343 	set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
344 
345 	rdata = fcport->rdata;
346 	if (rdata) {
347 		lport = fcport->qedf->lport;
348 		port_id = rdata->ids.port_id;
349 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
350 		    "LOGO port_id=%x.\n", port_id);
351 		fc_rport_logoff(rdata);
352 		/* Recreate the rport and log back in */
353 		rdata = fc_rport_create(lport, port_id);
354 		if (rdata)
355 			fc_rport_login(rdata);
356 	}
357 	clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
358 }
359 
qedf_l2_els_compl(struct qedf_els_cb_arg * cb_arg)360 static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
361 {
362 	struct qedf_ioreq *els_req;
363 	struct qedf_rport *fcport;
364 	struct qedf_mp_req *mp_req;
365 	struct fc_frame *fp;
366 	struct fc_frame_header *fh, *mp_fc_hdr;
367 	void *resp_buf, *fc_payload;
368 	u32 resp_len;
369 	u16 l2_oxid;
370 
371 	l2_oxid = cb_arg->l2_oxid;
372 	els_req = cb_arg->io_req;
373 
374 	if (!els_req) {
375 		QEDF_ERR(NULL, "els_req is NULL.\n");
376 		goto free_arg;
377 	}
378 
379 	/*
380 	 * If we are flushing the command just free the cb_arg as none of the
381 	 * response data will be valid.
382 	 */
383 	if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
384 		goto free_arg;
385 
386 	fcport = els_req->fcport;
387 	mp_req = &(els_req->mp_req);
388 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
389 	resp_len = mp_req->resp_len;
390 	resp_buf = mp_req->resp_buf;
391 
392 	/*
393 	 * If a middle path ELS command times out, don't try to return
394 	 * the command but rather do any internal cleanup and then libfc
395 	 * timeout the command and clean up its internal resources.
396 	 */
397 	if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
398 		/*
399 		 * If ADISC times out, libfc will timeout the exchange and then
400 		 * try to send a PLOGI which will timeout since the session is
401 		 * still offloaded.  Force libfc to logout the session which
402 		 * will offload the connection and allow the PLOGI response to
403 		 * flow over the LL2 path.
404 		 */
405 		if (cb_arg->op == ELS_ADISC)
406 			qedf_restart_rport(fcport);
407 		return;
408 	}
409 
410 	if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
411 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
412 		   "beyond page size.\n");
413 		goto free_arg;
414 	}
415 
416 	fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
417 	if (!fp) {
418 		QEDF_ERR(&(fcport->qedf->dbg_ctx),
419 		    "fc_frame_alloc failure.\n");
420 		return;
421 	}
422 
423 	/* Copy frame header from firmware into fp */
424 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
425 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
426 
427 	/* Copy payload from firmware into fp */
428 	fc_payload = fc_frame_payload_get(fp, resp_len);
429 	memcpy(fc_payload, resp_buf, resp_len);
430 
431 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
432 	    "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
433 	qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
434 
435 free_arg:
436 	kfree(cb_arg);
437 }
438 
qedf_send_adisc(struct qedf_rport * fcport,struct fc_frame * fp)439 int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
440 {
441 	struct fc_els_adisc *adisc;
442 	struct fc_frame_header *fh;
443 	struct fc_lport *lport = fcport->qedf->lport;
444 	struct qedf_els_cb_arg *cb_arg = NULL;
445 	struct qedf_ctx *qedf;
446 	uint32_t r_a_tov = lport->r_a_tov;
447 	int rc;
448 
449 	qedf = fcport->qedf;
450 	fh = fc_frame_header_get(fp);
451 
452 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
453 	if (!cb_arg) {
454 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
455 			  "ADISC\n");
456 		rc = -ENOMEM;
457 		goto adisc_err;
458 	}
459 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
460 
461 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
462 	    "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
463 
464 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
465 
466 	rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
467 	    qedf_l2_els_compl, cb_arg, r_a_tov);
468 
469 adisc_err:
470 	if (rc) {
471 		QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
472 		kfree(cb_arg);
473 	}
474 	return rc;
475 }
476 
qedf_srr_compl(struct qedf_els_cb_arg * cb_arg)477 static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
478 {
479 	struct qedf_ioreq *orig_io_req;
480 	struct qedf_ioreq *srr_req;
481 	struct qedf_mp_req *mp_req;
482 	struct fc_frame_header *mp_fc_hdr, *fh;
483 	struct fc_frame *fp;
484 	void *resp_buf, *fc_payload;
485 	u32 resp_len;
486 	struct fc_lport *lport;
487 	struct qedf_ctx *qedf;
488 	int refcount;
489 	u8 opcode;
490 
491 	srr_req = cb_arg->io_req;
492 	qedf = srr_req->fcport->qedf;
493 	lport = qedf->lport;
494 
495 	orig_io_req = cb_arg->aborted_io_req;
496 
497 	if (!orig_io_req)
498 		goto out_free;
499 
500 	clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
501 
502 	if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
503 	    srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
504 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
505 
506 	refcount = kref_read(&orig_io_req->refcount);
507 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
508 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
509 		   orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
510 
511 	/* If a SRR times out, simply free resources */
512 	if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
513 		goto out_put;
514 
515 	/* Normalize response data into struct fc_frame */
516 	mp_req = &(srr_req->mp_req);
517 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
518 	resp_len = mp_req->resp_len;
519 	resp_buf = mp_req->resp_buf;
520 
521 	fp = fc_frame_alloc(lport, resp_len);
522 	if (!fp) {
523 		QEDF_ERR(&(qedf->dbg_ctx),
524 		    "fc_frame_alloc failure.\n");
525 		goto out_put;
526 	}
527 
528 	/* Copy frame header from firmware into fp */
529 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
530 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
531 
532 	/* Copy payload from firmware into fp */
533 	fc_payload = fc_frame_payload_get(fp, resp_len);
534 	memcpy(fc_payload, resp_buf, resp_len);
535 
536 	opcode = fc_frame_payload_op(fp);
537 	switch (opcode) {
538 	case ELS_LS_ACC:
539 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
540 		    "SRR success.\n");
541 		break;
542 	case ELS_LS_RJT:
543 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
544 		    "SRR rejected.\n");
545 		qedf_initiate_abts(orig_io_req, true);
546 		break;
547 	}
548 
549 	fc_frame_free(fp);
550 out_put:
551 	/* Put reference for original command since SRR completed */
552 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
553 out_free:
554 	kfree(cb_arg);
555 }
556 
qedf_send_srr(struct qedf_ioreq * orig_io_req,u32 offset,u8 r_ctl)557 static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
558 {
559 	struct fcp_srr srr;
560 	struct qedf_ctx *qedf;
561 	struct qedf_rport *fcport;
562 	struct fc_lport *lport;
563 	struct qedf_els_cb_arg *cb_arg = NULL;
564 	u32 sid, r_a_tov;
565 	int rc;
566 
567 	if (!orig_io_req) {
568 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
569 		return -EINVAL;
570 	}
571 
572 	fcport = orig_io_req->fcport;
573 
574 	/* Check that fcport is still offloaded */
575 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
576 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
577 		return -EINVAL;
578 	}
579 
580 	if (!fcport->qedf) {
581 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
582 		return -EINVAL;
583 	}
584 
585 	/* Take reference until SRR command completion */
586 	kref_get(&orig_io_req->refcount);
587 
588 	qedf = fcport->qedf;
589 	lport = qedf->lport;
590 	sid = fcport->sid;
591 	r_a_tov = lport->r_a_tov;
592 
593 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
594 		   "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
595 	memset(&srr, 0, sizeof(srr));
596 
597 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
598 	if (!cb_arg) {
599 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
600 			  "SRR\n");
601 		rc = -ENOMEM;
602 		goto srr_err;
603 	}
604 
605 	cb_arg->aborted_io_req = orig_io_req;
606 
607 	srr.srr_op = ELS_SRR;
608 	srr.srr_ox_id = htons(orig_io_req->xid);
609 	srr.srr_rx_id = htons(orig_io_req->rx_id);
610 	srr.srr_rel_off = htonl(offset);
611 	srr.srr_r_ctl = r_ctl;
612 
613 	rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
614 	    qedf_srr_compl, cb_arg, r_a_tov);
615 
616 srr_err:
617 	if (rc) {
618 		QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
619 			  "=0x%x\n", orig_io_req->xid);
620 		kfree(cb_arg);
621 		/* If we fail to queue SRR, send ABTS to orig_io */
622 		qedf_initiate_abts(orig_io_req, true);
623 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
624 	} else
625 		/* Tell other threads that SRR is in progress */
626 		set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
627 
628 	return rc;
629 }
630 
qedf_initiate_seq_cleanup(struct qedf_ioreq * orig_io_req,u32 offset,u8 r_ctl)631 static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
632 	u32 offset, u8 r_ctl)
633 {
634 	struct qedf_rport *fcport;
635 	unsigned long flags;
636 	struct qedf_els_cb_arg *cb_arg;
637 	struct fcoe_wqe *sqe;
638 	u16 sqe_idx;
639 
640 	fcport = orig_io_req->fcport;
641 
642 	QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
643 	    "Doing sequence cleanup for xid=0x%x offset=%u.\n",
644 	    orig_io_req->xid, offset);
645 
646 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
647 	if (!cb_arg) {
648 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
649 			  "for sequence cleanup\n");
650 		return;
651 	}
652 
653 	/* Get reference for cleanup request */
654 	kref_get(&orig_io_req->refcount);
655 
656 	orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
657 	cb_arg->offset = offset;
658 	cb_arg->r_ctl = r_ctl;
659 	orig_io_req->cb_arg = cb_arg;
660 
661 	qedf_cmd_timer_set(fcport->qedf, orig_io_req,
662 	    QEDF_CLEANUP_TIMEOUT * HZ);
663 
664 	spin_lock_irqsave(&fcport->rport_lock, flags);
665 
666 	sqe_idx = qedf_get_sqe_idx(fcport);
667 	sqe = &fcport->sq[sqe_idx];
668 	memset(sqe, 0, sizeof(struct fcoe_wqe));
669 	orig_io_req->task_params->sqe = sqe;
670 
671 	init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
672 						   offset);
673 	qedf_ring_doorbell(fcport);
674 
675 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
676 }
677 
qedf_process_seq_cleanup_compl(struct qedf_ctx * qedf,struct fcoe_cqe * cqe,struct qedf_ioreq * io_req)678 void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
679 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
680 {
681 	int rc;
682 	struct qedf_els_cb_arg *cb_arg;
683 
684 	cb_arg = io_req->cb_arg;
685 
686 	/* If we timed out just free resources */
687 	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
688 		goto free;
689 
690 	/* Kill the timer we put on the request */
691 	cancel_delayed_work_sync(&io_req->timeout_work);
692 
693 	rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
694 	if (rc)
695 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
696 		    "abort, xid=0x%x.\n", io_req->xid);
697 free:
698 	kfree(cb_arg);
699 	kref_put(&io_req->refcount, qedf_release_cmd);
700 }
701 
qedf_requeue_io_req(struct qedf_ioreq * orig_io_req)702 static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
703 {
704 	struct qedf_rport *fcport;
705 	struct qedf_ioreq *new_io_req;
706 	unsigned long flags;
707 	bool rc = false;
708 
709 	fcport = orig_io_req->fcport;
710 	if (!fcport) {
711 		QEDF_ERR(NULL, "fcport is NULL.\n");
712 		goto out;
713 	}
714 
715 	if (!orig_io_req->sc_cmd) {
716 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
717 		    "xid=0x%x.\n", orig_io_req->xid);
718 		goto out;
719 	}
720 
721 	new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
722 	if (!new_io_req) {
723 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
724 		    "io_req.\n");
725 		goto out;
726 	}
727 
728 	new_io_req->sc_cmd = orig_io_req->sc_cmd;
729 
730 	/*
731 	 * This keeps the sc_cmd struct from being returned to the tape
732 	 * driver and being requeued twice. We do need to put a reference
733 	 * for the original I/O request since we will not do a SCSI completion
734 	 * for it.
735 	 */
736 	orig_io_req->sc_cmd = NULL;
737 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
738 
739 	spin_lock_irqsave(&fcport->rport_lock, flags);
740 
741 	/* kref for new command released in qedf_post_io_req on error */
742 	if (qedf_post_io_req(fcport, new_io_req)) {
743 		QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
744 		/* Return SQE to pool */
745 		atomic_inc(&fcport->free_sqes);
746 	} else {
747 		QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
748 		    "Reissued SCSI command from  orig_xid=0x%x on "
749 		    "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
750 		/*
751 		 * Abort the original I/O but do not return SCSI command as
752 		 * it has been reissued on another OX_ID.
753 		 */
754 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
755 		qedf_initiate_abts(orig_io_req, false);
756 		goto out;
757 	}
758 
759 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
760 out:
761 	return rc;
762 }
763 
764 
qedf_rec_compl(struct qedf_els_cb_arg * cb_arg)765 static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
766 {
767 	struct qedf_ioreq *orig_io_req;
768 	struct qedf_ioreq *rec_req;
769 	struct qedf_mp_req *mp_req;
770 	struct fc_frame_header *mp_fc_hdr, *fh;
771 	struct fc_frame *fp;
772 	void *resp_buf, *fc_payload;
773 	u32 resp_len;
774 	struct fc_lport *lport;
775 	struct qedf_ctx *qedf;
776 	int refcount;
777 	enum fc_rctl r_ctl;
778 	struct fc_els_ls_rjt *rjt;
779 	struct fc_els_rec_acc *acc;
780 	u8 opcode;
781 	u32 offset, e_stat;
782 	struct scsi_cmnd *sc_cmd;
783 	bool srr_needed = false;
784 
785 	rec_req = cb_arg->io_req;
786 	qedf = rec_req->fcport->qedf;
787 	lport = qedf->lport;
788 
789 	orig_io_req = cb_arg->aborted_io_req;
790 
791 	if (!orig_io_req)
792 		goto out_free;
793 
794 	if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
795 	    rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
796 		cancel_delayed_work_sync(&orig_io_req->timeout_work);
797 
798 	refcount = kref_read(&orig_io_req->refcount);
799 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
800 		   " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
801 		   orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
802 
803 	/* If a REC times out, free resources */
804 	if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
805 		goto out_put;
806 
807 	/* Normalize response data into struct fc_frame */
808 	mp_req = &(rec_req->mp_req);
809 	mp_fc_hdr = &(mp_req->resp_fc_hdr);
810 	resp_len = mp_req->resp_len;
811 	acc = resp_buf = mp_req->resp_buf;
812 
813 	fp = fc_frame_alloc(lport, resp_len);
814 	if (!fp) {
815 		QEDF_ERR(&(qedf->dbg_ctx),
816 		    "fc_frame_alloc failure.\n");
817 		goto out_put;
818 	}
819 
820 	/* Copy frame header from firmware into fp */
821 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
822 	memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
823 
824 	/* Copy payload from firmware into fp */
825 	fc_payload = fc_frame_payload_get(fp, resp_len);
826 	memcpy(fc_payload, resp_buf, resp_len);
827 
828 	opcode = fc_frame_payload_op(fp);
829 	if (opcode == ELS_LS_RJT) {
830 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
831 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
832 		    "Received LS_RJT for REC: er_reason=0x%x, "
833 		    "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
834 		/*
835 		 * The following response(s) mean that we need to reissue the
836 		 * request on another exchange.  We need to do this without
837 		 * informing the upper layers lest it cause an application
838 		 * error.
839 		 */
840 		if ((rjt->er_reason == ELS_RJT_LOGIC ||
841 		    rjt->er_reason == ELS_RJT_UNAB) &&
842 		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
843 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
844 			    "Handle CMD LOST case.\n");
845 			qedf_requeue_io_req(orig_io_req);
846 		}
847 	} else if (opcode == ELS_LS_ACC) {
848 		offset = ntohl(acc->reca_fc4value);
849 		e_stat = ntohl(acc->reca_e_stat);
850 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
851 		    "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
852 		    offset, e_stat);
853 		if (e_stat & ESB_ST_SEQ_INIT)  {
854 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
855 			    "Target has the seq init\n");
856 			goto out_free_frame;
857 		}
858 		sc_cmd = orig_io_req->sc_cmd;
859 		if (!sc_cmd) {
860 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
861 			    "sc_cmd is NULL for xid=0x%x.\n",
862 			    orig_io_req->xid);
863 			goto out_free_frame;
864 		}
865 		/* SCSI write case */
866 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
867 			if (offset == orig_io_req->data_xfer_len) {
868 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
869 				    "WRITE - response lost.\n");
870 				r_ctl = FC_RCTL_DD_CMD_STATUS;
871 				srr_needed = true;
872 				offset = 0;
873 			} else {
874 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
875 				    "WRITE - XFER_RDY/DATA lost.\n");
876 				r_ctl = FC_RCTL_DD_DATA_DESC;
877 				/* Use data from warning CQE instead of REC */
878 				offset = orig_io_req->tx_buf_off;
879 			}
880 		/* SCSI read case */
881 		} else {
882 			if (orig_io_req->rx_buf_off ==
883 			    orig_io_req->data_xfer_len) {
884 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
885 				    "READ - response lost.\n");
886 				srr_needed = true;
887 				r_ctl = FC_RCTL_DD_CMD_STATUS;
888 				offset = 0;
889 			} else {
890 				QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
891 				    "READ - DATA lost.\n");
892 				/*
893 				 * For read case we always set the offset to 0
894 				 * for sequence recovery task.
895 				 */
896 				offset = 0;
897 				r_ctl = FC_RCTL_DD_SOL_DATA;
898 			}
899 		}
900 
901 		if (srr_needed)
902 			qedf_send_srr(orig_io_req, offset, r_ctl);
903 		else
904 			qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
905 	}
906 
907 out_free_frame:
908 	fc_frame_free(fp);
909 out_put:
910 	/* Put reference for original command since REC completed */
911 	kref_put(&orig_io_req->refcount, qedf_release_cmd);
912 out_free:
913 	kfree(cb_arg);
914 }
915 
916 /* Assumes kref is already held by caller */
qedf_send_rec(struct qedf_ioreq * orig_io_req)917 int qedf_send_rec(struct qedf_ioreq *orig_io_req)
918 {
919 
920 	struct fc_els_rec rec;
921 	struct qedf_rport *fcport;
922 	struct fc_lport *lport;
923 	struct qedf_els_cb_arg *cb_arg = NULL;
924 	struct qedf_ctx *qedf;
925 	uint32_t sid;
926 	uint32_t r_a_tov;
927 	int rc;
928 
929 	if (!orig_io_req) {
930 		QEDF_ERR(NULL, "orig_io_req is NULL.\n");
931 		return -EINVAL;
932 	}
933 
934 	fcport = orig_io_req->fcport;
935 
936 	/* Check that fcport is still offloaded */
937 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
938 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
939 		return -EINVAL;
940 	}
941 
942 	if (!fcport->qedf) {
943 		QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
944 		return -EINVAL;
945 	}
946 
947 	/* Take reference until REC command completion */
948 	kref_get(&orig_io_req->refcount);
949 
950 	qedf = fcport->qedf;
951 	lport = qedf->lport;
952 	sid = fcport->sid;
953 	r_a_tov = lport->r_a_tov;
954 
955 	memset(&rec, 0, sizeof(rec));
956 
957 	cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
958 	if (!cb_arg) {
959 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
960 			  "REC\n");
961 		rc = -ENOMEM;
962 		goto rec_err;
963 	}
964 
965 	cb_arg->aborted_io_req = orig_io_req;
966 
967 	rec.rec_cmd = ELS_REC;
968 	hton24(rec.rec_s_id, sid);
969 	rec.rec_ox_id = htons(orig_io_req->xid);
970 	rec.rec_rx_id =
971 	    htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
972 
973 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
974 	   "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
975 	   orig_io_req->xid, rec.rec_rx_id);
976 	rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
977 	    qedf_rec_compl, cb_arg, r_a_tov);
978 
979 rec_err:
980 	if (rc) {
981 		QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
982 			  "=0x%x\n", orig_io_req->xid);
983 		kfree(cb_arg);
984 		kref_put(&orig_io_req->refcount, qedf_release_cmd);
985 	}
986 	return rc;
987 }
988