1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7 #include "efct_driver.h"
8 #include "efct_hw.h"
9
10 #define enable_tsend_auto_resp(efct) 1
11 #define enable_treceive_auto_resp(efct) 0
12
13 #define SCSI_IOFMT "[%04x][i:%04x t:%04x h:%04x]"
14
15 #define scsi_io_printf(io, fmt, ...) \
16 efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
17 io->node->display_name, io->instance_index,\
18 io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
19
20 #define EFCT_LOG_ENABLE_SCSI_TRACE(efct) \
21 (((efct) != NULL) ? (((efct)->logmask & (1U << 2)) != 0) : 0)
22
23 #define scsi_io_trace(io, fmt, ...) \
24 do { \
25 if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
26 scsi_io_printf(io, fmt, ##__VA_ARGS__); \
27 } while (0)
28
29 struct efct_io *
efct_scsi_io_alloc(struct efct_node * node)30 efct_scsi_io_alloc(struct efct_node *node)
31 {
32 struct efct *efct;
33 struct efct_xport *xport;
34 struct efct_io *io;
35 unsigned long flags;
36
37 efct = node->efct;
38
39 xport = efct->xport;
40
41 spin_lock_irqsave(&node->active_ios_lock, flags);
42
43 io = efct_io_pool_io_alloc(efct->xport->io_pool);
44 if (!io) {
45 efc_log_err(efct, "IO alloc Failed\n");
46 atomic_add_return(1, &xport->io_alloc_failed_count);
47 return NULL;
48 }
49
50 /* initialize refcount */
51 kref_init(&io->ref);
52 io->release = _efct_scsi_io_free;
53
54 /* set generic fields */
55 io->efct = efct;
56 io->node = node;
57 kref_get(&node->ref);
58
59 /* set type and name */
60 io->io_type = EFCT_IO_TYPE_IO;
61 io->display_name = "scsi_io";
62
63 io->cmd_ini = false;
64 io->cmd_tgt = true;
65
66 /* Add to node's active_ios list */
67 INIT_LIST_HEAD(&io->list_entry);
68 list_add(&io->list_entry, &node->active_ios);
69
70 spin_unlock_irqrestore(&node->active_ios_lock, flags);
71
72 return io;
73 }
74
75 void
_efct_scsi_io_free(struct kref * arg)76 _efct_scsi_io_free(struct kref *arg)
77 {
78 struct efct_io *io = container_of(arg, struct efct_io, ref);
79 struct efct *efct = io->efct;
80 struct efct_node *node = io->node;
81 unsigned long flags = 0;
82
83 scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
84
85 if (io->io_free) {
86 efc_log_err(efct, "IO already freed.\n");
87 return;
88 }
89
90 spin_lock_irqsave(&node->active_ios_lock, flags);
91 list_del_init(&io->list_entry);
92 spin_unlock_irqrestore(&node->active_ios_lock, flags);
93
94 kref_put(&node->ref, node->release);
95 io->node = NULL;
96 efct_io_pool_io_free(efct->xport->io_pool, io);
97 }
98
99 void
efct_scsi_io_free(struct efct_io * io)100 efct_scsi_io_free(struct efct_io *io)
101 {
102 scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
103 WARN_ON(!refcount_read(&io->ref.refcount));
104 kref_put(&io->ref, io->release);
105 }
106
107 static void
efct_target_io_cb(struct efct_hw_io * hio,u32 length,int status,u32 ext_status,void * app)108 efct_target_io_cb(struct efct_hw_io *hio, u32 length, int status,
109 u32 ext_status, void *app)
110 {
111 u32 flags = 0;
112 struct efct_io *io = app;
113 struct efct *efct;
114 enum efct_scsi_io_status scsi_stat = EFCT_SCSI_STATUS_GOOD;
115 efct_scsi_io_cb_t cb;
116
117 if (!io || !io->efct) {
118 pr_err("%s: IO can not be NULL\n", __func__);
119 return;
120 }
121
122 scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
123
124 efct = io->efct;
125
126 io->transferred += length;
127
128 if (!io->scsi_tgt_cb) {
129 efct_scsi_check_pending(efct);
130 return;
131 }
132
133 /* Call target server completion */
134 cb = io->scsi_tgt_cb;
135
136 /* Clear the callback before invoking the callback */
137 io->scsi_tgt_cb = NULL;
138
139 /* if status was good, and auto-good-response was set,
140 * then callback target-server with IO_CMPL_RSP_SENT,
141 * otherwise send IO_CMPL
142 */
143 if (status == 0 && io->auto_resp)
144 flags |= EFCT_SCSI_IO_CMPL_RSP_SENT;
145 else
146 flags |= EFCT_SCSI_IO_CMPL;
147
148 switch (status) {
149 case SLI4_FC_WCQE_STATUS_SUCCESS:
150 scsi_stat = EFCT_SCSI_STATUS_GOOD;
151 break;
152 case SLI4_FC_WCQE_STATUS_DI_ERROR:
153 if (ext_status & SLI4_FC_DI_ERROR_GE)
154 scsi_stat = EFCT_SCSI_STATUS_DIF_GUARD_ERR;
155 else if (ext_status & SLI4_FC_DI_ERROR_AE)
156 scsi_stat = EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR;
157 else if (ext_status & SLI4_FC_DI_ERROR_RE)
158 scsi_stat = EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR;
159 else
160 scsi_stat = EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR;
161 break;
162 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
163 switch (ext_status) {
164 case SLI4_FC_LOCAL_REJECT_INVALID_RELOFFSET:
165 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED:
166 scsi_stat = EFCT_SCSI_STATUS_ABORTED;
167 break;
168 case SLI4_FC_LOCAL_REJECT_INVALID_RPI:
169 scsi_stat = EFCT_SCSI_STATUS_NEXUS_LOST;
170 break;
171 case SLI4_FC_LOCAL_REJECT_NO_XRI:
172 scsi_stat = EFCT_SCSI_STATUS_NO_IO;
173 break;
174 default:
175 /*we have seen 0x0d(TX_DMA_FAILED err)*/
176 scsi_stat = EFCT_SCSI_STATUS_ERROR;
177 break;
178 }
179 break;
180
181 case SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT:
182 /* target IO timed out */
183 scsi_stat = EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED;
184 break;
185
186 case SLI4_FC_WCQE_STATUS_SHUTDOWN:
187 /* Target IO cancelled by HW */
188 scsi_stat = EFCT_SCSI_STATUS_SHUTDOWN;
189 break;
190
191 default:
192 scsi_stat = EFCT_SCSI_STATUS_ERROR;
193 break;
194 }
195
196 cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
197
198 efct_scsi_check_pending(efct);
199 }
200
201 static int
efct_scsi_build_sgls(struct efct_hw * hw,struct efct_hw_io * hio,struct efct_scsi_sgl * sgl,u32 sgl_count,enum efct_hw_io_type type)202 efct_scsi_build_sgls(struct efct_hw *hw, struct efct_hw_io *hio,
203 struct efct_scsi_sgl *sgl, u32 sgl_count,
204 enum efct_hw_io_type type)
205 {
206 int rc;
207 u32 i;
208 struct efct *efct = hw->os;
209
210 /* Initialize HW SGL */
211 rc = efct_hw_io_init_sges(hw, hio, type);
212 if (rc) {
213 efc_log_err(efct, "efct_hw_io_init_sges failed: %d\n", rc);
214 return -EIO;
215 }
216
217 for (i = 0; i < sgl_count; i++) {
218 /* Add data SGE */
219 rc = efct_hw_io_add_sge(hw, hio, sgl[i].addr, sgl[i].len);
220 if (rc) {
221 efc_log_err(efct, "add sge failed cnt=%d rc=%d\n",
222 sgl_count, rc);
223 return rc;
224 }
225 }
226
227 return 0;
228 }
229
efc_log_sgl(struct efct_io * io)230 static void efc_log_sgl(struct efct_io *io)
231 {
232 struct efct_hw_io *hio = io->hio;
233 struct sli4_sge *data = NULL;
234 u32 *dword = NULL;
235 u32 i;
236 u32 n_sge;
237
238 scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
239 upper_32_bits(hio->def_sgl.phys),
240 lower_32_bits(hio->def_sgl.phys));
241 n_sge = (hio->sgl == &hio->def_sgl) ? hio->n_sge : hio->def_sgl_count;
242 for (i = 0, data = hio->def_sgl.virt; i < n_sge; i++, data++) {
243 dword = (u32 *)data;
244
245 scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
246 i, dword[0], dword[1], dword[2], dword[3]);
247
248 if (dword[2] & (1U << 31))
249 break;
250 }
251 }
252
253 static void
efct_scsi_check_pending_async_cb(struct efct_hw * hw,int status,u8 * mqe,void * arg)254 efct_scsi_check_pending_async_cb(struct efct_hw *hw, int status,
255 u8 *mqe, void *arg)
256 {
257 struct efct_io *io = arg;
258
259 if (io) {
260 efct_hw_done_t cb = io->hw_cb;
261
262 if (!io->hw_cb)
263 return;
264
265 io->hw_cb = NULL;
266 (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
267 }
268 }
269
270 static int
efct_scsi_io_dispatch_hw_io(struct efct_io * io,struct efct_hw_io * hio)271 efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
272 {
273 int rc = 0;
274 struct efct *efct = io->efct;
275
276 /* Got a HW IO;
277 * update ini/tgt_task_tag with HW IO info and dispatch
278 */
279 io->hio = hio;
280 if (io->cmd_tgt)
281 io->tgt_task_tag = hio->indicator;
282 else if (io->cmd_ini)
283 io->init_task_tag = hio->indicator;
284 io->hw_tag = hio->reqtag;
285
286 hio->eq = io->hw_priv;
287
288 /* Copy WQ steering */
289 switch (io->wq_steering) {
290 case EFCT_SCSI_WQ_STEERING_CLASS >> EFCT_SCSI_WQ_STEERING_SHIFT:
291 hio->wq_steering = EFCT_HW_WQ_STEERING_CLASS;
292 break;
293 case EFCT_SCSI_WQ_STEERING_REQUEST >> EFCT_SCSI_WQ_STEERING_SHIFT:
294 hio->wq_steering = EFCT_HW_WQ_STEERING_REQUEST;
295 break;
296 case EFCT_SCSI_WQ_STEERING_CPU >> EFCT_SCSI_WQ_STEERING_SHIFT:
297 hio->wq_steering = EFCT_HW_WQ_STEERING_CPU;
298 break;
299 }
300
301 switch (io->io_type) {
302 case EFCT_IO_TYPE_IO:
303 rc = efct_scsi_build_sgls(&efct->hw, io->hio,
304 io->sgl, io->sgl_count, io->hio_type);
305 if (rc)
306 break;
307
308 if (EFCT_LOG_ENABLE_SCSI_TRACE(efct))
309 efc_log_sgl(io);
310
311 if (io->app_id)
312 io->iparam.fcp_tgt.app_id = io->app_id;
313
314 io->iparam.fcp_tgt.vpi = io->node->vpi;
315 io->iparam.fcp_tgt.rpi = io->node->rpi;
316 io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
317 io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
318 io->iparam.fcp_tgt.xmit_len = io->wire_len;
319
320 rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
321 &io->iparam, io->hw_cb, io);
322 break;
323 default:
324 scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
325 rc = -EIO;
326 break;
327 }
328 return rc;
329 }
330
331 static int
efct_scsi_io_dispatch_no_hw_io(struct efct_io * io)332 efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
333 {
334 int rc;
335
336 switch (io->io_type) {
337 case EFCT_IO_TYPE_ABORT: {
338 struct efct_hw_io *hio_to_abort = NULL;
339
340 hio_to_abort = io->io_to_abort->hio;
341
342 if (!hio_to_abort) {
343 /*
344 * If "IO to abort" does not have an
345 * associated HW IO, immediately make callback with
346 * success. The command must have been sent to
347 * the backend, but the data phase has not yet
348 * started, so we don't have a HW IO.
349 *
350 * Note: since the backend shims should be
351 * taking a reference on io_to_abort, it should not
352 * be possible to have been completed and freed by
353 * the backend before the abort got here.
354 */
355 scsi_io_printf(io, "IO: not active\n");
356 ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
357 SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
358 rc = 0;
359 break;
360 }
361
362 /* HW IO is valid, abort it */
363 scsi_io_printf(io, "aborting\n");
364 rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
365 io->send_abts, io->hw_cb, io);
366 if (rc) {
367 int status = SLI4_FC_WCQE_STATUS_SUCCESS;
368 efct_hw_done_t cb = io->hw_cb;
369
370 if (rc != -ENOENT && rc != -EINPROGRESS) {
371 status = -1;
372 scsi_io_printf(io, "Failed to abort IO rc=%d\n",
373 rc);
374 }
375 cb(io->hio, 0, status, 0, io);
376 rc = 0;
377 }
378
379 break;
380 }
381 default:
382 scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
383 rc = -EIO;
384 break;
385 }
386 return rc;
387 }
388
389 static struct efct_io *
efct_scsi_dispatch_pending(struct efct * efct)390 efct_scsi_dispatch_pending(struct efct *efct)
391 {
392 struct efct_xport *xport = efct->xport;
393 struct efct_io *io = NULL;
394 struct efct_hw_io *hio;
395 unsigned long flags = 0;
396 int status;
397
398 spin_lock_irqsave(&xport->io_pending_lock, flags);
399
400 if (!list_empty(&xport->io_pending_list)) {
401 io = list_first_entry(&xport->io_pending_list, struct efct_io,
402 io_pending_link);
403 list_del_init(&io->io_pending_link);
404 }
405
406 if (!io) {
407 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
408 return NULL;
409 }
410
411 if (io->io_type == EFCT_IO_TYPE_ABORT) {
412 hio = NULL;
413 } else {
414 hio = efct_hw_io_alloc(&efct->hw);
415 if (!hio) {
416 /*
417 * No HW IO available.Put IO back on
418 * the front of pending list
419 */
420 list_add(&xport->io_pending_list, &io->io_pending_link);
421 io = NULL;
422 } else {
423 hio->eq = io->hw_priv;
424 }
425 }
426
427 /* Must drop the lock before dispatching the IO */
428 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
429
430 if (!io)
431 return NULL;
432
433 /*
434 * We pulled an IO off the pending list,
435 * and either got an HW IO or don't need one
436 */
437 atomic_sub_return(1, &xport->io_pending_count);
438 if (!hio)
439 status = efct_scsi_io_dispatch_no_hw_io(io);
440 else
441 status = efct_scsi_io_dispatch_hw_io(io, hio);
442 if (status) {
443 /*
444 * Invoke the HW callback, but do so in the
445 * separate execution context,provided by the
446 * NOP mailbox completion processing context
447 * by using efct_hw_async_call()
448 */
449 if (efct_hw_async_call(&efct->hw,
450 efct_scsi_check_pending_async_cb, io)) {
451 efc_log_debug(efct, "call hw async failed\n");
452 }
453 }
454
455 return io;
456 }
457
458 void
efct_scsi_check_pending(struct efct * efct)459 efct_scsi_check_pending(struct efct *efct)
460 {
461 struct efct_xport *xport = efct->xport;
462 struct efct_io *io = NULL;
463 int count = 0;
464 unsigned long flags = 0;
465 int dispatch = 0;
466
467 /* Guard against recursion */
468 if (atomic_add_return(1, &xport->io_pending_recursing)) {
469 /* This function is already running. Decrement and return. */
470 atomic_sub_return(1, &xport->io_pending_recursing);
471 return;
472 }
473
474 while (efct_scsi_dispatch_pending(efct))
475 count++;
476
477 if (count) {
478 atomic_sub_return(1, &xport->io_pending_recursing);
479 return;
480 }
481
482 /*
483 * If nothing was removed from the list,
484 * we might be in a case where we need to abort an
485 * active IO and the abort is on the pending list.
486 * Look for an abort we can dispatch.
487 */
488
489 spin_lock_irqsave(&xport->io_pending_lock, flags);
490
491 list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
492 if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
493 /* This IO has a HW IO, so it is
494 * active. Dispatch the abort.
495 */
496 dispatch = 1;
497 list_del_init(&io->io_pending_link);
498 atomic_sub_return(1, &xport->io_pending_count);
499 break;
500 }
501 }
502
503 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
504
505 if (dispatch) {
506 if (efct_scsi_io_dispatch_no_hw_io(io)) {
507 if (efct_hw_async_call(&efct->hw,
508 efct_scsi_check_pending_async_cb, io)) {
509 efc_log_debug(efct, "hw async failed\n");
510 }
511 }
512 }
513
514 atomic_sub_return(1, &xport->io_pending_recursing);
515 }
516
517 int
efct_scsi_io_dispatch(struct efct_io * io,void * cb)518 efct_scsi_io_dispatch(struct efct_io *io, void *cb)
519 {
520 struct efct_hw_io *hio;
521 struct efct *efct = io->efct;
522 struct efct_xport *xport = efct->xport;
523 unsigned long flags = 0;
524
525 io->hw_cb = cb;
526
527 /*
528 * if this IO already has a HW IO, then this is either
529 * not the first phase of the IO. Send it to the HW.
530 */
531 if (io->hio)
532 return efct_scsi_io_dispatch_hw_io(io, io->hio);
533
534 /*
535 * We don't already have a HW IO associated with the IO. First check
536 * the pending list. If not empty, add IO to the tail and process the
537 * pending list.
538 */
539 spin_lock_irqsave(&xport->io_pending_lock, flags);
540 if (!list_empty(&xport->io_pending_list)) {
541 /*
542 * If this is a low latency request,
543 * the put at the front of the IO pending
544 * queue, otherwise put it at the end of the queue.
545 */
546 if (io->low_latency) {
547 INIT_LIST_HEAD(&io->io_pending_link);
548 list_add(&xport->io_pending_list, &io->io_pending_link);
549 } else {
550 INIT_LIST_HEAD(&io->io_pending_link);
551 list_add_tail(&io->io_pending_link,
552 &xport->io_pending_list);
553 }
554 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
555 atomic_add_return(1, &xport->io_pending_count);
556 atomic_add_return(1, &xport->io_total_pending);
557
558 /* process pending list */
559 efct_scsi_check_pending(efct);
560 return 0;
561 }
562 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
563
564 /*
565 * We don't have a HW IO associated with the IO and there's nothing
566 * on the pending list. Attempt to allocate a HW IO and dispatch it.
567 */
568 hio = efct_hw_io_alloc(&io->efct->hw);
569 if (!hio) {
570 /* Couldn't get a HW IO. Save this IO on the pending list */
571 spin_lock_irqsave(&xport->io_pending_lock, flags);
572 INIT_LIST_HEAD(&io->io_pending_link);
573 list_add_tail(&io->io_pending_link, &xport->io_pending_list);
574 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
575
576 atomic_add_return(1, &xport->io_total_pending);
577 atomic_add_return(1, &xport->io_pending_count);
578 return 0;
579 }
580
581 /* We successfully allocated a HW IO; dispatch to HW */
582 return efct_scsi_io_dispatch_hw_io(io, hio);
583 }
584
585 int
efct_scsi_io_dispatch_abort(struct efct_io * io,void * cb)586 efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
587 {
588 struct efct *efct = io->efct;
589 struct efct_xport *xport = efct->xport;
590 unsigned long flags = 0;
591
592 io->hw_cb = cb;
593
594 /*
595 * For aborts, we don't need a HW IO, but we still want
596 * to pass through the pending list to preserve ordering.
597 * Thus, if the pending list is not empty, add this abort
598 * to the pending list and process the pending list.
599 */
600 spin_lock_irqsave(&xport->io_pending_lock, flags);
601 if (!list_empty(&xport->io_pending_list)) {
602 INIT_LIST_HEAD(&io->io_pending_link);
603 list_add_tail(&io->io_pending_link, &xport->io_pending_list);
604 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
605 atomic_add_return(1, &xport->io_pending_count);
606 atomic_add_return(1, &xport->io_total_pending);
607
608 /* process pending list */
609 efct_scsi_check_pending(efct);
610 return 0;
611 }
612 spin_unlock_irqrestore(&xport->io_pending_lock, flags);
613
614 /* nothing on pending list, dispatch abort */
615 return efct_scsi_io_dispatch_no_hw_io(io);
616 }
617
618 static inline int
efct_scsi_xfer_data(struct efct_io * io,u32 flags,struct efct_scsi_sgl * sgl,u32 sgl_count,u64 xwire_len,enum efct_hw_io_type type,int enable_ar,efct_scsi_io_cb_t cb,void * arg)619 efct_scsi_xfer_data(struct efct_io *io, u32 flags,
620 struct efct_scsi_sgl *sgl, u32 sgl_count, u64 xwire_len,
621 enum efct_hw_io_type type, int enable_ar,
622 efct_scsi_io_cb_t cb, void *arg)
623 {
624 struct efct *efct;
625 size_t residual = 0;
626
627 io->sgl_count = sgl_count;
628
629 efct = io->efct;
630
631 scsi_io_trace(io, "%s wire_len %llu\n",
632 (type == EFCT_HW_IO_TARGET_READ) ? "send" : "recv",
633 xwire_len);
634
635 io->hio_type = type;
636
637 io->scsi_tgt_cb = cb;
638 io->scsi_tgt_cb_arg = arg;
639
640 residual = io->exp_xfer_len - io->transferred;
641 io->wire_len = (xwire_len < residual) ? xwire_len : residual;
642 residual = (xwire_len - io->wire_len);
643
644 memset(&io->iparam, 0, sizeof(io->iparam));
645 io->iparam.fcp_tgt.ox_id = io->init_task_tag;
646 io->iparam.fcp_tgt.offset = io->transferred;
647 io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
648 io->iparam.fcp_tgt.timeout = io->timeout;
649
650 /* if this is the last data phase and there is no residual, enable
651 * auto-good-response
652 */
653 if (enable_ar && (flags & EFCT_SCSI_LAST_DATAPHASE) && residual == 0 &&
654 ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
655 (!(flags & EFCT_SCSI_NO_AUTO_RESPONSE))) {
656 io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
657 io->auto_resp = true;
658 } else {
659 io->auto_resp = false;
660 }
661
662 /* save this transfer length */
663 io->xfer_req = io->wire_len;
664
665 /* Adjust the transferred count to account for overrun
666 * when the residual is calculated in efct_scsi_send_resp
667 */
668 io->transferred += residual;
669
670 /* Adjust the SGL size if there is overrun */
671
672 if (residual) {
673 struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
674
675 while (residual) {
676 size_t len = sgl_ptr->len;
677
678 if (len > residual) {
679 sgl_ptr->len = len - residual;
680 residual = 0;
681 } else {
682 sgl_ptr->len = 0;
683 residual -= len;
684 io->sgl_count--;
685 }
686 sgl_ptr--;
687 }
688 }
689
690 /* Set latency and WQ steering */
691 io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
692 io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
693 EFCT_SCSI_WQ_STEERING_SHIFT;
694 io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
695 EFCT_SCSI_WQ_CLASS_SHIFT;
696
697 if (efct->xport) {
698 struct efct_xport *xport = efct->xport;
699
700 if (type == EFCT_HW_IO_TARGET_READ) {
701 xport->fcp_stats.input_requests++;
702 xport->fcp_stats.input_bytes += xwire_len;
703 } else if (type == EFCT_HW_IO_TARGET_WRITE) {
704 xport->fcp_stats.output_requests++;
705 xport->fcp_stats.output_bytes += xwire_len;
706 }
707 }
708 return efct_scsi_io_dispatch(io, efct_target_io_cb);
709 }
710
711 int
efct_scsi_send_rd_data(struct efct_io * io,u32 flags,struct efct_scsi_sgl * sgl,u32 sgl_count,u64 len,efct_scsi_io_cb_t cb,void * arg)712 efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
713 struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
714 efct_scsi_io_cb_t cb, void *arg)
715 {
716 return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
717 len, EFCT_HW_IO_TARGET_READ,
718 enable_tsend_auto_resp(io->efct), cb, arg);
719 }
720
721 int
efct_scsi_recv_wr_data(struct efct_io * io,u32 flags,struct efct_scsi_sgl * sgl,u32 sgl_count,u64 len,efct_scsi_io_cb_t cb,void * arg)722 efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
723 struct efct_scsi_sgl *sgl, u32 sgl_count, u64 len,
724 efct_scsi_io_cb_t cb, void *arg)
725 {
726 return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
727 EFCT_HW_IO_TARGET_WRITE,
728 enable_treceive_auto_resp(io->efct), cb, arg);
729 }
730
731 int
efct_scsi_send_resp(struct efct_io * io,u32 flags,struct efct_scsi_cmd_resp * rsp,efct_scsi_io_cb_t cb,void * arg)732 efct_scsi_send_resp(struct efct_io *io, u32 flags,
733 struct efct_scsi_cmd_resp *rsp,
734 efct_scsi_io_cb_t cb, void *arg)
735 {
736 struct efct *efct;
737 int residual;
738 /* Always try auto resp */
739 bool auto_resp = true;
740 u8 scsi_status = 0;
741 u16 scsi_status_qualifier = 0;
742 u8 *sense_data = NULL;
743 u32 sense_data_length = 0;
744
745 efct = io->efct;
746
747 if (rsp) {
748 scsi_status = rsp->scsi_status;
749 scsi_status_qualifier = rsp->scsi_status_qualifier;
750 sense_data = rsp->sense_data;
751 sense_data_length = rsp->sense_data_length;
752 residual = rsp->residual;
753 } else {
754 residual = io->exp_xfer_len - io->transferred;
755 }
756
757 io->wire_len = 0;
758 io->hio_type = EFCT_HW_IO_TARGET_RSP;
759
760 io->scsi_tgt_cb = cb;
761 io->scsi_tgt_cb_arg = arg;
762
763 memset(&io->iparam, 0, sizeof(io->iparam));
764 io->iparam.fcp_tgt.ox_id = io->init_task_tag;
765 io->iparam.fcp_tgt.offset = 0;
766 io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
767 io->iparam.fcp_tgt.timeout = io->timeout;
768
769 /* Set low latency queueing request */
770 io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
771 io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
772 EFCT_SCSI_WQ_STEERING_SHIFT;
773 io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
774 EFCT_SCSI_WQ_CLASS_SHIFT;
775
776 if (scsi_status != 0 || residual || sense_data_length) {
777 struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
778 u8 *sns_data;
779
780 if (!fcprsp) {
781 efc_log_err(efct, "NULL response buffer\n");
782 return -EIO;
783 }
784
785 sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
786
787 auto_resp = false;
788
789 memset(fcprsp, 0, sizeof(*fcprsp));
790
791 io->wire_len += sizeof(*fcprsp);
792
793 fcprsp->resp.fr_status = scsi_status;
794 fcprsp->resp.fr_retry_delay =
795 cpu_to_be16(scsi_status_qualifier);
796
797 /* set residual status if necessary */
798 if (residual != 0) {
799 /* FCP: if data transferred is less than the
800 * amount expected, then this is an underflow.
801 * If data transferred would have been greater
802 * than the amount expected this is an overflow
803 */
804 if (residual > 0) {
805 fcprsp->resp.fr_flags |= FCP_RESID_UNDER;
806 fcprsp->ext.fr_resid = cpu_to_be32(residual);
807 } else {
808 fcprsp->resp.fr_flags |= FCP_RESID_OVER;
809 fcprsp->ext.fr_resid = cpu_to_be32(-residual);
810 }
811 }
812
813 if (EFCT_SCSI_SNS_BUF_VALID(sense_data) && sense_data_length) {
814 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) {
815 efc_log_err(efct, "Sense exceeds max size.\n");
816 return -EIO;
817 }
818
819 fcprsp->resp.fr_flags |= FCP_SNS_LEN_VAL;
820 memcpy(sns_data, sense_data, sense_data_length);
821 fcprsp->ext.fr_sns_len = cpu_to_be32(sense_data_length);
822 io->wire_len += sense_data_length;
823 }
824
825 io->sgl[0].addr = io->rspbuf.phys;
826 io->sgl[0].dif_addr = 0;
827 io->sgl[0].len = io->wire_len;
828 io->sgl_count = 1;
829 }
830
831 if (auto_resp)
832 io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
833
834 return efct_scsi_io_dispatch(io, efct_target_io_cb);
835 }
836
837 static int
efct_target_bls_resp_cb(struct efct_hw_io * hio,u32 length,int status,u32 ext_status,void * app)838 efct_target_bls_resp_cb(struct efct_hw_io *hio, u32 length, int status,
839 u32 ext_status, void *app)
840 {
841 struct efct_io *io = app;
842 struct efct *efct;
843 enum efct_scsi_io_status bls_status;
844
845 efct = io->efct;
846
847 /* BLS isn't really a "SCSI" concept, but use SCSI status */
848 if (status) {
849 io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
850 bls_status = EFCT_SCSI_STATUS_ERROR;
851 } else {
852 bls_status = EFCT_SCSI_STATUS_GOOD;
853 }
854
855 if (io->bls_cb) {
856 efct_scsi_io_cb_t bls_cb = io->bls_cb;
857 void *bls_cb_arg = io->bls_cb_arg;
858
859 io->bls_cb = NULL;
860 io->bls_cb_arg = NULL;
861
862 /* invoke callback */
863 bls_cb(io, bls_status, 0, bls_cb_arg);
864 }
865
866 efct_scsi_check_pending(efct);
867 return 0;
868 }
869
870 static int
efct_target_send_bls_resp(struct efct_io * io,efct_scsi_io_cb_t cb,void * arg)871 efct_target_send_bls_resp(struct efct_io *io,
872 efct_scsi_io_cb_t cb, void *arg)
873 {
874 struct efct_node *node = io->node;
875 struct sli_bls_params *bls = &io->iparam.bls;
876 struct efct *efct = node->efct;
877 struct fc_ba_acc *acc;
878 int rc;
879
880 /* fill out IO structure with everything needed to send BA_ACC */
881 memset(&io->iparam, 0, sizeof(io->iparam));
882 bls->ox_id = io->init_task_tag;
883 bls->rx_id = io->abort_rx_id;
884 bls->vpi = io->node->vpi;
885 bls->rpi = io->node->rpi;
886 bls->s_id = U32_MAX;
887 bls->d_id = io->node->node_fc_id;
888 bls->rpi_registered = true;
889
890 acc = (void *)bls->payload;
891 acc->ba_ox_id = cpu_to_be16(bls->ox_id);
892 acc->ba_rx_id = cpu_to_be16(bls->rx_id);
893 acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
894
895 /* generic io fields have already been populated */
896
897 /* set type and BLS-specific fields */
898 io->io_type = EFCT_IO_TYPE_BLS_RESP;
899 io->display_name = "bls_rsp";
900 io->hio_type = EFCT_HW_BLS_ACC;
901 io->bls_cb = cb;
902 io->bls_cb_arg = arg;
903
904 /* dispatch IO */
905 rc = efct_hw_bls_send(efct, FC_RCTL_BA_ACC, bls,
906 efct_target_bls_resp_cb, io);
907 return rc;
908 }
909
efct_bls_send_rjt_cb(struct efct_hw_io * hio,u32 length,int status,u32 ext_status,void * app)910 static int efct_bls_send_rjt_cb(struct efct_hw_io *hio, u32 length, int status,
911 u32 ext_status, void *app)
912 {
913 struct efct_io *io = app;
914
915 efct_scsi_io_free(io);
916 return 0;
917 }
918
919 struct efct_io *
efct_bls_send_rjt(struct efct_io * io,struct fc_frame_header * hdr)920 efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
921 {
922 struct efct_node *node = io->node;
923 struct sli_bls_params *bls = &io->iparam.bls;
924 struct efct *efct = node->efct;
925 struct fc_ba_rjt *acc;
926 int rc;
927
928 /* fill out BLS Response-specific fields */
929 io->io_type = EFCT_IO_TYPE_BLS_RESP;
930 io->display_name = "ba_rjt";
931 io->hio_type = EFCT_HW_BLS_RJT;
932 io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
933
934 /* fill out iparam fields */
935 memset(&io->iparam, 0, sizeof(io->iparam));
936 bls->ox_id = be16_to_cpu(hdr->fh_ox_id);
937 bls->rx_id = be16_to_cpu(hdr->fh_rx_id);
938 bls->vpi = io->node->vpi;
939 bls->rpi = io->node->rpi;
940 bls->s_id = U32_MAX;
941 bls->d_id = io->node->node_fc_id;
942 bls->rpi_registered = true;
943
944 acc = (void *)bls->payload;
945 acc->br_reason = ELS_RJT_UNAB;
946 acc->br_explan = ELS_EXPL_NONE;
947
948 rc = efct_hw_bls_send(efct, FC_RCTL_BA_RJT, bls, efct_bls_send_rjt_cb,
949 io);
950 if (rc) {
951 efc_log_err(efct, "efct_scsi_io_dispatch() failed: %d\n", rc);
952 efct_scsi_io_free(io);
953 io = NULL;
954 }
955 return io;
956 }
957
958 int
efct_scsi_send_tmf_resp(struct efct_io * io,enum efct_scsi_tmf_resp rspcode,u8 addl_rsp_info[3],efct_scsi_io_cb_t cb,void * arg)959 efct_scsi_send_tmf_resp(struct efct_io *io,
960 enum efct_scsi_tmf_resp rspcode,
961 u8 addl_rsp_info[3],
962 efct_scsi_io_cb_t cb, void *arg)
963 {
964 int rc;
965 struct {
966 struct fcp_resp_with_ext rsp_ext;
967 struct fcp_resp_rsp_info info;
968 } *fcprsp;
969 u8 fcp_rspcode;
970
971 io->wire_len = 0;
972
973 switch (rspcode) {
974 case EFCT_SCSI_TMF_FUNCTION_COMPLETE:
975 fcp_rspcode = FCP_TMF_CMPL;
976 break;
977 case EFCT_SCSI_TMF_FUNCTION_SUCCEEDED:
978 case EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND:
979 fcp_rspcode = FCP_TMF_CMPL;
980 break;
981 case EFCT_SCSI_TMF_FUNCTION_REJECTED:
982 fcp_rspcode = FCP_TMF_REJECTED;
983 break;
984 case EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER:
985 fcp_rspcode = FCP_TMF_INVALID_LUN;
986 break;
987 case EFCT_SCSI_TMF_SERVICE_DELIVERY:
988 fcp_rspcode = FCP_TMF_FAILED;
989 break;
990 default:
991 fcp_rspcode = FCP_TMF_REJECTED;
992 break;
993 }
994
995 io->hio_type = EFCT_HW_IO_TARGET_RSP;
996
997 io->scsi_tgt_cb = cb;
998 io->scsi_tgt_cb_arg = arg;
999
1000 if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
1001 rc = efct_target_send_bls_resp(io, cb, arg);
1002 return rc;
1003 }
1004
1005 /* populate the FCP TMF response */
1006 fcprsp = io->rspbuf.virt;
1007 memset(fcprsp, 0, sizeof(*fcprsp));
1008
1009 fcprsp->rsp_ext.resp.fr_flags |= FCP_SNS_LEN_VAL;
1010
1011 if (addl_rsp_info) {
1012 memcpy(fcprsp->info._fr_resvd, addl_rsp_info,
1013 sizeof(fcprsp->info._fr_resvd));
1014 }
1015 fcprsp->info.rsp_code = fcp_rspcode;
1016
1017 io->wire_len = sizeof(*fcprsp);
1018
1019 fcprsp->rsp_ext.ext.fr_rsp_len =
1020 cpu_to_be32(sizeof(struct fcp_resp_rsp_info));
1021
1022 io->sgl[0].addr = io->rspbuf.phys;
1023 io->sgl[0].dif_addr = 0;
1024 io->sgl[0].len = io->wire_len;
1025 io->sgl_count = 1;
1026
1027 memset(&io->iparam, 0, sizeof(io->iparam));
1028 io->iparam.fcp_tgt.ox_id = io->init_task_tag;
1029 io->iparam.fcp_tgt.offset = 0;
1030 io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
1031 io->iparam.fcp_tgt.timeout = io->timeout;
1032
1033 rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
1034
1035 return rc;
1036 }
1037
1038 static int
efct_target_abort_cb(struct efct_hw_io * hio,u32 length,int status,u32 ext_status,void * app)1039 efct_target_abort_cb(struct efct_hw_io *hio, u32 length, int status,
1040 u32 ext_status, void *app)
1041 {
1042 struct efct_io *io = app;
1043 struct efct *efct;
1044 enum efct_scsi_io_status scsi_status;
1045 efct_scsi_io_cb_t abort_cb;
1046 void *abort_cb_arg;
1047
1048 efct = io->efct;
1049
1050 if (!io->abort_cb)
1051 goto done;
1052
1053 abort_cb = io->abort_cb;
1054 abort_cb_arg = io->abort_cb_arg;
1055
1056 io->abort_cb = NULL;
1057 io->abort_cb_arg = NULL;
1058
1059 switch (status) {
1060 case SLI4_FC_WCQE_STATUS_SUCCESS:
1061 scsi_status = EFCT_SCSI_STATUS_GOOD;
1062 break;
1063 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
1064 switch (ext_status) {
1065 case SLI4_FC_LOCAL_REJECT_NO_XRI:
1066 scsi_status = EFCT_SCSI_STATUS_NO_IO;
1067 break;
1068 case SLI4_FC_LOCAL_REJECT_ABORT_IN_PROGRESS:
1069 scsi_status = EFCT_SCSI_STATUS_ABORT_IN_PROGRESS;
1070 break;
1071 default:
1072 /*we have seen 0x15 (abort in progress)*/
1073 scsi_status = EFCT_SCSI_STATUS_ERROR;
1074 break;
1075 }
1076 break;
1077 case SLI4_FC_WCQE_STATUS_FCP_RSP_FAILURE:
1078 scsi_status = EFCT_SCSI_STATUS_CHECK_RESPONSE;
1079 break;
1080 default:
1081 scsi_status = EFCT_SCSI_STATUS_ERROR;
1082 break;
1083 }
1084 /* invoke callback */
1085 abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
1086
1087 done:
1088 /* done with IO to abort,efct_ref_get(): efct_scsi_tgt_abort_io() */
1089 kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
1090
1091 efct_io_pool_io_free(efct->xport->io_pool, io);
1092
1093 efct_scsi_check_pending(efct);
1094 return 0;
1095 }
1096
1097 int
efct_scsi_tgt_abort_io(struct efct_io * io,efct_scsi_io_cb_t cb,void * arg)1098 efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
1099 {
1100 struct efct *efct;
1101 struct efct_xport *xport;
1102 int rc;
1103 struct efct_io *abort_io = NULL;
1104
1105 efct = io->efct;
1106 xport = efct->xport;
1107
1108 /* take a reference on IO being aborted */
1109 if (kref_get_unless_zero(&io->ref) == 0) {
1110 /* command no longer active */
1111 scsi_io_printf(io, "command no longer active\n");
1112 return -EIO;
1113 }
1114
1115 /*
1116 * allocate a new IO to send the abort request. Use efct_io_alloc()
1117 * directly, as we need an IO object that will not fail allocation
1118 * due to allocations being disabled (in efct_scsi_io_alloc())
1119 */
1120 abort_io = efct_io_pool_io_alloc(efct->xport->io_pool);
1121 if (!abort_io) {
1122 atomic_add_return(1, &xport->io_alloc_failed_count);
1123 kref_put(&io->ref, io->release);
1124 return -EIO;
1125 }
1126
1127 /* Save the target server callback and argument */
1128 /* set generic fields */
1129 abort_io->cmd_tgt = true;
1130 abort_io->node = io->node;
1131
1132 /* set type and abort-specific fields */
1133 abort_io->io_type = EFCT_IO_TYPE_ABORT;
1134 abort_io->display_name = "tgt_abort";
1135 abort_io->io_to_abort = io;
1136 abort_io->send_abts = false;
1137 abort_io->abort_cb = cb;
1138 abort_io->abort_cb_arg = arg;
1139
1140 /* now dispatch IO */
1141 rc = efct_scsi_io_dispatch_abort(abort_io, efct_target_abort_cb);
1142 if (rc)
1143 kref_put(&io->ref, io->release);
1144 return rc;
1145 }
1146
1147 void
efct_scsi_io_complete(struct efct_io * io)1148 efct_scsi_io_complete(struct efct_io *io)
1149 {
1150 if (io->io_free) {
1151 efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
1152 io->tag);
1153 return;
1154 }
1155
1156 scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
1157 kref_put(&io->ref, io->release);
1158 }
1159