1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18 * @cmd: SCSI command
19 *
20 * Returns the proper CF_* direction based on CDB.
21 */
22 static inline uint16_t
qla2x00_get_cmd_direction(srb_t * sp)23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25 uint16_t cflags;
26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
28
29 cflags = 0;
30
31 /* Set transfer direction */
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 cflags = CF_WRITE;
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ;
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 }
39 return (cflags);
40 }
41
42 /**
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
45 *
46 * @dsds: number of data segment decriptors needed
47 *
48 * Returns the number of IOCB entries needed to store @dsds.
49 */
50 uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53 uint16_t iocbs;
54
55 iocbs = 1;
56 if (dsds > 3) {
57 iocbs += (dsds - 3) / 7;
58 if ((dsds - 3) % 7)
59 iocbs++;
60 }
61 return (iocbs);
62 }
63
64 /**
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
67 *
68 * @dsds: number of data segment decriptors needed
69 *
70 * Returns the number of IOCB entries needed to store @dsds.
71 */
72 uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75 uint16_t iocbs;
76
77 iocbs = 1;
78 if (dsds > 2) {
79 iocbs += (dsds - 2) / 5;
80 if ((dsds - 2) % 5)
81 iocbs++;
82 }
83 return (iocbs);
84 }
85
86 /**
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88 * @ha: HA context
89 *
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 */
92 static inline cont_entry_t *
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host * vha)93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
98 req->ring_index++;
99 if (req->ring_index == req->length) {
100 req->ring_index = 0;
101 req->ring_ptr = req->ring;
102 } else {
103 req->ring_ptr++;
104 }
105
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
111
112 return (cont_pkt);
113 }
114
115 /**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121 static inline cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t * vha,struct req_que * req)122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
131 } else {
132 req->ring_ptr++;
133 }
134
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142 return (cont_pkt);
143 }
144
145 static inline int
qla24xx_configure_prot_mode(srb_t * sp,uint16_t * fw_prot_opts)146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
153
154 /* Translate SCSI opcode to a protection opcode */
155 switch (scsi_get_prot_op(cmd)) {
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
169 case SCSI_PROT_WRITE_PASS:
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 }
179
180 return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
186 *
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
190 */
qla2x00_build_scsi_iocbs_32(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
193 {
194 uint16_t avail_dsds;
195 uint32_t *cur_dsd;
196 scsi_qla_host_t *vha;
197 struct scsi_cmnd *cmd;
198 struct scatterlist *sg;
199 int i;
200
201 cmd = GET_CMD_SP(sp);
202
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
205 __constant_cpu_to_le32(COMMAND_TYPE);
206
207 /* No data transfer */
208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
210 return;
211 }
212
213 vha = sp->fcport->vha;
214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215
216 /* Three DSDs are available in the Command Type 2 IOCB */
217 avail_dsds = 3;
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220 /* Load data segments */
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
223
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
226 /*
227 * Seven DSDs are available in the Continuation
228 * Type 0 IOCB.
229 */
230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232 avail_dsds = 7;
233 }
234
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237 avail_dsds--;
238 }
239 }
240
241 /**
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
244 *
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
248 */
qla2x00_build_scsi_iocbs_64(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 uint16_t tot_dsds)
251 {
252 uint16_t avail_dsds;
253 uint32_t *cur_dsd;
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
257 int i;
258
259 cmd = GET_CMD_SP(sp);
260
261 /* Update entry type to indicate Command Type 3 IOCB */
262 *((uint32_t *)(&cmd_pkt->entry_type)) =
263 __constant_cpu_to_le32(COMMAND_A64_TYPE);
264
265 /* No data transfer */
266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
268 return;
269 }
270
271 vha = sp->fcport->vha;
272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274 /* Two DSDs are available in the Command Type 3 IOCB */
275 avail_dsds = 2;
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278 /* Load data segments */
279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280 dma_addr_t sle_dma;
281 cont_a64_entry_t *cont_pkt;
282
283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
285 /*
286 * Five DSDs are available in the Continuation
287 * Type 1 IOCB.
288 */
289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291 avail_dsds = 5;
292 }
293
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298 avail_dsds--;
299 }
300 }
301
302 /**
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
305 *
306 * Returns non-zero if a failure occurred, else zero.
307 */
308 int
qla2x00_start_scsi(srb_t * sp)309 qla2x00_start_scsi(srb_t *sp)
310 {
311 int ret, nseg;
312 unsigned long flags;
313 scsi_qla_host_t *vha;
314 struct scsi_cmnd *cmd;
315 uint32_t *clr_ptr;
316 uint32_t index;
317 uint32_t handle;
318 cmd_entry_t *cmd_pkt;
319 uint16_t cnt;
320 uint16_t req_cnt;
321 uint16_t tot_dsds;
322 struct device_reg_2xxx __iomem *reg;
323 struct qla_hw_data *ha;
324 struct req_que *req;
325 struct rsp_que *rsp;
326 char tag[2];
327
328 /* Setup device pointers. */
329 ret = 0;
330 vha = sp->fcport->vha;
331 ha = vha->hw;
332 reg = &ha->iobase->isp;
333 cmd = GET_CMD_SP(sp);
334 req = ha->req_q_map[0];
335 rsp = ha->rsp_q_map[0];
336 /* So we know we haven't pci_map'ed anything yet */
337 tot_dsds = 0;
338
339 /* Send marker if required */
340 if (vha->marker_needed != 0) {
341 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
342 QLA_SUCCESS) {
343 return (QLA_FUNCTION_FAILED);
344 }
345 vha->marker_needed = 0;
346 }
347
348 /* Acquire ring specific lock */
349 spin_lock_irqsave(&ha->hardware_lock, flags);
350
351 /* Check for room in outstanding command list. */
352 handle = req->current_outstanding_cmd;
353 for (index = 1; index < req->num_outstanding_cmds; index++) {
354 handle++;
355 if (handle == req->num_outstanding_cmds)
356 handle = 1;
357 if (!req->outstanding_cmds[handle])
358 break;
359 }
360 if (index == req->num_outstanding_cmds)
361 goto queuing_error;
362
363 /* Map the sg table so we have an accurate count of sg entries needed */
364 if (scsi_sg_count(cmd)) {
365 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
366 scsi_sg_count(cmd), cmd->sc_data_direction);
367 if (unlikely(!nseg))
368 goto queuing_error;
369 } else
370 nseg = 0;
371
372 tot_dsds = nseg;
373
374 /* Calculate the number of request entries needed. */
375 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
376 if (req->cnt < (req_cnt + 2)) {
377 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
378 if (req->ring_index < cnt)
379 req->cnt = cnt - req->ring_index;
380 else
381 req->cnt = req->length -
382 (req->ring_index - cnt);
383 /* If still no head room then bail out */
384 if (req->cnt < (req_cnt + 2))
385 goto queuing_error;
386 }
387
388 /* Build command packet */
389 req->current_outstanding_cmd = handle;
390 req->outstanding_cmds[handle] = sp;
391 sp->handle = handle;
392 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
393 req->cnt -= req_cnt;
394
395 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
396 cmd_pkt->handle = handle;
397 /* Zero out remaining portion of packet. */
398 clr_ptr = (uint32_t *)cmd_pkt + 2;
399 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
400 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
401
402 /* Set target ID and LUN number*/
403 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
404 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
405
406 /* Update tagged queuing modifier */
407 if (scsi_populate_tag_msg(cmd, tag)) {
408 switch (tag[0]) {
409 case HEAD_OF_QUEUE_TAG:
410 cmd_pkt->control_flags =
411 __constant_cpu_to_le16(CF_HEAD_TAG);
412 break;
413 case ORDERED_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_ORDERED_TAG);
416 break;
417 default:
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_SIMPLE_TAG);
420 break;
421 }
422 }
423
424 /* Load SCSI command packet. */
425 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
426 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
427
428 /* Build IOCB segments */
429 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
430
431 /* Set total data segment count. */
432 cmd_pkt->entry_count = (uint8_t)req_cnt;
433 wmb();
434
435 /* Adjust ring index. */
436 req->ring_index++;
437 if (req->ring_index == req->length) {
438 req->ring_index = 0;
439 req->ring_ptr = req->ring;
440 } else
441 req->ring_ptr++;
442
443 sp->flags |= SRB_DMA_VALID;
444
445 /* Set chip new ring index. */
446 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
447 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
448
449 /* Manage unprocessed RIO/ZIO commands in response queue. */
450 if (vha->flags.process_response_queue &&
451 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
452 qla2x00_process_response_queue(rsp);
453
454 spin_unlock_irqrestore(&ha->hardware_lock, flags);
455 return (QLA_SUCCESS);
456
457 queuing_error:
458 if (tot_dsds)
459 scsi_dma_unmap(cmd);
460
461 spin_unlock_irqrestore(&ha->hardware_lock, flags);
462
463 return (QLA_FUNCTION_FAILED);
464 }
465
466 /**
467 * qla2x00_start_iocbs() - Execute the IOCB command
468 */
469 void
qla2x00_start_iocbs(struct scsi_qla_host * vha,struct req_que * req)470 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
471 {
472 struct qla_hw_data *ha = vha->hw;
473 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
474
475 if (IS_QLA82XX(ha)) {
476 qla82xx_start_iocbs(vha);
477 } else {
478 /* Adjust ring index. */
479 req->ring_index++;
480 if (req->ring_index == req->length) {
481 req->ring_index = 0;
482 req->ring_ptr = req->ring;
483 } else
484 req->ring_ptr++;
485
486 /* Set chip new ring index. */
487 if (ha->mqenable || IS_QLA83XX(ha)) {
488 WRT_REG_DWORD(req->req_q_in, req->ring_index);
489 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
490 } else if (IS_QLAFX00(ha)) {
491 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
492 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
493 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
494 } else if (IS_FWI2_CAPABLE(ha)) {
495 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
496 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
497 } else {
498 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
499 req->ring_index);
500 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
501 }
502 }
503 }
504
505 /**
506 * qla2x00_marker() - Send a marker IOCB to the firmware.
507 * @ha: HA context
508 * @loop_id: loop ID
509 * @lun: LUN
510 * @type: marker modifier
511 *
512 * Can be called from both normal and interrupt context.
513 *
514 * Returns non-zero if a failure occurred, else zero.
515 */
516 static int
__qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint16_t lun,uint8_t type)517 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
518 struct rsp_que *rsp, uint16_t loop_id,
519 uint16_t lun, uint8_t type)
520 {
521 mrk_entry_t *mrk;
522 struct mrk_entry_24xx *mrk24 = NULL;
523 struct mrk_entry_fx00 *mrkfx = NULL;
524
525 struct qla_hw_data *ha = vha->hw;
526 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
527
528 req = ha->req_q_map[0];
529 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
530 if (mrk == NULL) {
531 ql_log(ql_log_warn, base_vha, 0x3026,
532 "Failed to allocate Marker IOCB.\n");
533
534 return (QLA_FUNCTION_FAILED);
535 }
536
537 mrk->entry_type = MARKER_TYPE;
538 mrk->modifier = type;
539 if (type != MK_SYNC_ALL) {
540 if (IS_QLAFX00(ha)) {
541 mrkfx = (struct mrk_entry_fx00 *) mrk;
542 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
543 mrkfx->handle_hi = 0;
544 mrkfx->tgt_id = cpu_to_le16(loop_id);
545 mrkfx->lun[1] = LSB(lun);
546 mrkfx->lun[2] = MSB(lun);
547 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
548 } else if (IS_FWI2_CAPABLE(ha)) {
549 mrk24 = (struct mrk_entry_24xx *) mrk;
550 mrk24->nport_handle = cpu_to_le16(loop_id);
551 mrk24->lun[1] = LSB(lun);
552 mrk24->lun[2] = MSB(lun);
553 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
554 mrk24->vp_index = vha->vp_idx;
555 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
556 } else {
557 SET_TARGET_ID(ha, mrk->target, loop_id);
558 mrk->lun = cpu_to_le16(lun);
559 }
560 }
561 wmb();
562
563 qla2x00_start_iocbs(vha, req);
564
565 return (QLA_SUCCESS);
566 }
567
568 int
qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint16_t lun,uint8_t type)569 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
570 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
571 uint8_t type)
572 {
573 int ret;
574 unsigned long flags = 0;
575
576 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
577 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
578 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
579
580 return (ret);
581 }
582
583 /*
584 * qla2x00_issue_marker
585 *
586 * Issue marker
587 * Caller CAN have hardware lock held as specified by ha_locked parameter.
588 * Might release it, then reaquire.
589 */
qla2x00_issue_marker(scsi_qla_host_t * vha,int ha_locked)590 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
591 {
592 if (ha_locked) {
593 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
594 MK_SYNC_ALL) != QLA_SUCCESS)
595 return QLA_FUNCTION_FAILED;
596 } else {
597 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
598 MK_SYNC_ALL) != QLA_SUCCESS)
599 return QLA_FUNCTION_FAILED;
600 }
601 vha->marker_needed = 0;
602
603 return QLA_SUCCESS;
604 }
605
606 static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t * sp,struct cmd_type_6 * cmd_pkt,uint16_t tot_dsds)607 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
608 uint16_t tot_dsds)
609 {
610 uint32_t *cur_dsd = NULL;
611 scsi_qla_host_t *vha;
612 struct qla_hw_data *ha;
613 struct scsi_cmnd *cmd;
614 struct scatterlist *cur_seg;
615 uint32_t *dsd_seg;
616 void *next_dsd;
617 uint8_t avail_dsds;
618 uint8_t first_iocb = 1;
619 uint32_t dsd_list_len;
620 struct dsd_dma *dsd_ptr;
621 struct ct6_dsd *ctx;
622
623 cmd = GET_CMD_SP(sp);
624
625 /* Update entry type to indicate Command Type 3 IOCB */
626 *((uint32_t *)(&cmd_pkt->entry_type)) =
627 __constant_cpu_to_le32(COMMAND_TYPE_6);
628
629 /* No data transfer */
630 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
631 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
632 return 0;
633 }
634
635 vha = sp->fcport->vha;
636 ha = vha->hw;
637
638 /* Set transfer direction */
639 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
640 cmd_pkt->control_flags =
641 __constant_cpu_to_le16(CF_WRITE_DATA);
642 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
643 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
644 cmd_pkt->control_flags =
645 __constant_cpu_to_le16(CF_READ_DATA);
646 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
647 }
648
649 cur_seg = scsi_sglist(cmd);
650 ctx = GET_CMD_CTX_SP(sp);
651
652 while (tot_dsds) {
653 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
654 QLA_DSDS_PER_IOCB : tot_dsds;
655 tot_dsds -= avail_dsds;
656 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
657
658 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
659 struct dsd_dma, list);
660 next_dsd = dsd_ptr->dsd_addr;
661 list_del(&dsd_ptr->list);
662 ha->gbl_dsd_avail--;
663 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
664 ctx->dsd_use_cnt++;
665 ha->gbl_dsd_inuse++;
666
667 if (first_iocb) {
668 first_iocb = 0;
669 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
670 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
673 } else {
674 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
675 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
676 *cur_dsd++ = cpu_to_le32(dsd_list_len);
677 }
678 cur_dsd = (uint32_t *)next_dsd;
679 while (avail_dsds) {
680 dma_addr_t sle_dma;
681
682 sle_dma = sg_dma_address(cur_seg);
683 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
684 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
685 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
686 cur_seg = sg_next(cur_seg);
687 avail_dsds--;
688 }
689 }
690
691 /* Null termination */
692 *cur_dsd++ = 0;
693 *cur_dsd++ = 0;
694 *cur_dsd++ = 0;
695 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
696 return 0;
697 }
698
699 /*
700 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
701 * for Command Type 6.
702 *
703 * @dsds: number of data segment decriptors needed
704 *
705 * Returns the number of dsd list needed to store @dsds.
706 */
707 inline uint16_t
qla24xx_calc_dsd_lists(uint16_t dsds)708 qla24xx_calc_dsd_lists(uint16_t dsds)
709 {
710 uint16_t dsd_lists = 0;
711
712 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
713 if (dsds % QLA_DSDS_PER_IOCB)
714 dsd_lists++;
715 return dsd_lists;
716 }
717
718
719 /**
720 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
721 * IOCB types.
722 *
723 * @sp: SRB command to process
724 * @cmd_pkt: Command type 3 IOCB
725 * @tot_dsds: Total number of segments to transfer
726 */
727 inline void
qla24xx_build_scsi_iocbs(srb_t * sp,struct cmd_type_7 * cmd_pkt,uint16_t tot_dsds)728 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
729 uint16_t tot_dsds)
730 {
731 uint16_t avail_dsds;
732 uint32_t *cur_dsd;
733 scsi_qla_host_t *vha;
734 struct scsi_cmnd *cmd;
735 struct scatterlist *sg;
736 int i;
737 struct req_que *req;
738
739 cmd = GET_CMD_SP(sp);
740
741 /* Update entry type to indicate Command Type 3 IOCB */
742 *((uint32_t *)(&cmd_pkt->entry_type)) =
743 __constant_cpu_to_le32(COMMAND_TYPE_7);
744
745 /* No data transfer */
746 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
747 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
748 return;
749 }
750
751 vha = sp->fcport->vha;
752 req = vha->req;
753
754 /* Set transfer direction */
755 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
756 cmd_pkt->task_mgmt_flags =
757 __constant_cpu_to_le16(TMF_WRITE_DATA);
758 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
759 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
760 cmd_pkt->task_mgmt_flags =
761 __constant_cpu_to_le16(TMF_READ_DATA);
762 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
763 }
764
765 /* One DSD is available in the Command Type 3 IOCB */
766 avail_dsds = 1;
767 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
768
769 /* Load data segments */
770
771 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
772 dma_addr_t sle_dma;
773 cont_a64_entry_t *cont_pkt;
774
775 /* Allocate additional continuation packets? */
776 if (avail_dsds == 0) {
777 /*
778 * Five DSDs are available in the Continuation
779 * Type 1 IOCB.
780 */
781 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
782 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
783 avail_dsds = 5;
784 }
785
786 sle_dma = sg_dma_address(sg);
787 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
788 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
789 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
790 avail_dsds--;
791 }
792 }
793
794 struct fw_dif_context {
795 uint32_t ref_tag;
796 uint16_t app_tag;
797 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
798 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
799 };
800
801 /*
802 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
803 *
804 */
805 static inline void
qla24xx_set_t10dif_tags(srb_t * sp,struct fw_dif_context * pkt,unsigned int protcnt)806 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
807 unsigned int protcnt)
808 {
809 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
810
811 switch (scsi_get_prot_type(cmd)) {
812 case SCSI_PROT_DIF_TYPE0:
813 /*
814 * No check for ql2xenablehba_err_chk, as it would be an
815 * I/O error if hba tag generation is not done.
816 */
817 pkt->ref_tag = cpu_to_le32((uint32_t)
818 (0xffffffff & scsi_get_lba(cmd)));
819
820 if (!qla2x00_hba_err_chk_enabled(sp))
821 break;
822
823 pkt->ref_tag_mask[0] = 0xff;
824 pkt->ref_tag_mask[1] = 0xff;
825 pkt->ref_tag_mask[2] = 0xff;
826 pkt->ref_tag_mask[3] = 0xff;
827 break;
828
829 /*
830 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
831 * match LBA in CDB + N
832 */
833 case SCSI_PROT_DIF_TYPE2:
834 pkt->app_tag = __constant_cpu_to_le16(0);
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
837
838 pkt->ref_tag = cpu_to_le32((uint32_t)
839 (0xffffffff & scsi_get_lba(cmd)));
840
841 if (!qla2x00_hba_err_chk_enabled(sp))
842 break;
843
844 /* enable ALL bytes of the ref tag */
845 pkt->ref_tag_mask[0] = 0xff;
846 pkt->ref_tag_mask[1] = 0xff;
847 pkt->ref_tag_mask[2] = 0xff;
848 pkt->ref_tag_mask[3] = 0xff;
849 break;
850
851 /* For Type 3 protection: 16 bit GUARD only */
852 case SCSI_PROT_DIF_TYPE3:
853 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
854 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
855 0x00;
856 break;
857
858 /*
859 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
860 * 16 bit app tag.
861 */
862 case SCSI_PROT_DIF_TYPE1:
863 pkt->ref_tag = cpu_to_le32((uint32_t)
864 (0xffffffff & scsi_get_lba(cmd)));
865 pkt->app_tag = __constant_cpu_to_le16(0);
866 pkt->app_tag_mask[0] = 0x0;
867 pkt->app_tag_mask[1] = 0x0;
868
869 if (!qla2x00_hba_err_chk_enabled(sp))
870 break;
871
872 /* enable ALL bytes of the ref tag */
873 pkt->ref_tag_mask[0] = 0xff;
874 pkt->ref_tag_mask[1] = 0xff;
875 pkt->ref_tag_mask[2] = 0xff;
876 pkt->ref_tag_mask[3] = 0xff;
877 break;
878 }
879 }
880
881 struct qla2_sgx {
882 dma_addr_t dma_addr; /* OUT */
883 uint32_t dma_len; /* OUT */
884
885 uint32_t tot_bytes; /* IN */
886 struct scatterlist *cur_sg; /* IN */
887
888 /* for book keeping, bzero on initial invocation */
889 uint32_t bytes_consumed;
890 uint32_t num_bytes;
891 uint32_t tot_partial;
892
893 /* for debugging */
894 uint32_t num_sg;
895 srb_t *sp;
896 };
897
898 static int
qla24xx_get_one_block_sg(uint32_t blk_sz,struct qla2_sgx * sgx,uint32_t * partial)899 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
900 uint32_t *partial)
901 {
902 struct scatterlist *sg;
903 uint32_t cumulative_partial, sg_len;
904 dma_addr_t sg_dma_addr;
905
906 if (sgx->num_bytes == sgx->tot_bytes)
907 return 0;
908
909 sg = sgx->cur_sg;
910 cumulative_partial = sgx->tot_partial;
911
912 sg_dma_addr = sg_dma_address(sg);
913 sg_len = sg_dma_len(sg);
914
915 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
916
917 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
918 sgx->dma_len = (blk_sz - cumulative_partial);
919 sgx->tot_partial = 0;
920 sgx->num_bytes += blk_sz;
921 *partial = 0;
922 } else {
923 sgx->dma_len = sg_len - sgx->bytes_consumed;
924 sgx->tot_partial += sgx->dma_len;
925 *partial = 1;
926 }
927
928 sgx->bytes_consumed += sgx->dma_len;
929
930 if (sg_len == sgx->bytes_consumed) {
931 sg = sg_next(sg);
932 sgx->num_sg++;
933 sgx->cur_sg = sg;
934 sgx->bytes_consumed = 0;
935 }
936
937 return 1;
938 }
939
940 static int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)941 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
942 uint32_t *dsd, uint16_t tot_dsds)
943 {
944 void *next_dsd;
945 uint8_t avail_dsds = 0;
946 uint32_t dsd_list_len;
947 struct dsd_dma *dsd_ptr;
948 struct scatterlist *sg_prot;
949 uint32_t *cur_dsd = dsd;
950 uint16_t used_dsds = tot_dsds;
951
952 uint32_t prot_int;
953 uint32_t partial;
954 struct qla2_sgx sgx;
955 dma_addr_t sle_dma;
956 uint32_t sle_dma_len, tot_prot_dma_len = 0;
957 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
958
959 prot_int = cmd->device->sector_size;
960
961 memset(&sgx, 0, sizeof(struct qla2_sgx));
962 sgx.tot_bytes = scsi_bufflen(cmd);
963 sgx.cur_sg = scsi_sglist(cmd);
964 sgx.sp = sp;
965
966 sg_prot = scsi_prot_sglist(cmd);
967
968 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
969
970 sle_dma = sgx.dma_addr;
971 sle_dma_len = sgx.dma_len;
972 alloc_and_fill:
973 /* Allocate additional continuation packets? */
974 if (avail_dsds == 0) {
975 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
976 QLA_DSDS_PER_IOCB : used_dsds;
977 dsd_list_len = (avail_dsds + 1) * 12;
978 used_dsds -= avail_dsds;
979
980 /* allocate tracking DS */
981 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
982 if (!dsd_ptr)
983 return 1;
984
985 /* allocate new list */
986 dsd_ptr->dsd_addr = next_dsd =
987 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
988 &dsd_ptr->dsd_list_dma);
989
990 if (!next_dsd) {
991 /*
992 * Need to cleanup only this dsd_ptr, rest
993 * will be done by sp_free_dma()
994 */
995 kfree(dsd_ptr);
996 return 1;
997 }
998
999 list_add_tail(&dsd_ptr->list,
1000 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1001
1002 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1003
1004 /* add new list to cmd iocb or last list */
1005 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1006 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1007 *cur_dsd++ = dsd_list_len;
1008 cur_dsd = (uint32_t *)next_dsd;
1009 }
1010 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1011 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1012 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1013 avail_dsds--;
1014
1015 if (partial == 0) {
1016 /* Got a full protection interval */
1017 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1018 sle_dma_len = 8;
1019
1020 tot_prot_dma_len += sle_dma_len;
1021 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1022 tot_prot_dma_len = 0;
1023 sg_prot = sg_next(sg_prot);
1024 }
1025
1026 partial = 1; /* So as to not re-enter this block */
1027 goto alloc_and_fill;
1028 }
1029 }
1030 /* Null termination */
1031 *cur_dsd++ = 0;
1032 *cur_dsd++ = 0;
1033 *cur_dsd++ = 0;
1034 return 0;
1035 }
1036
1037 static int
qla24xx_walk_and_build_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)1038 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1039 uint16_t tot_dsds)
1040 {
1041 void *next_dsd;
1042 uint8_t avail_dsds = 0;
1043 uint32_t dsd_list_len;
1044 struct dsd_dma *dsd_ptr;
1045 struct scatterlist *sg;
1046 uint32_t *cur_dsd = dsd;
1047 int i;
1048 uint16_t used_dsds = tot_dsds;
1049 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1050
1051 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1052 dma_addr_t sle_dma;
1053
1054 /* Allocate additional continuation packets? */
1055 if (avail_dsds == 0) {
1056 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1057 QLA_DSDS_PER_IOCB : used_dsds;
1058 dsd_list_len = (avail_dsds + 1) * 12;
1059 used_dsds -= avail_dsds;
1060
1061 /* allocate tracking DS */
1062 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1063 if (!dsd_ptr)
1064 return 1;
1065
1066 /* allocate new list */
1067 dsd_ptr->dsd_addr = next_dsd =
1068 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1069 &dsd_ptr->dsd_list_dma);
1070
1071 if (!next_dsd) {
1072 /*
1073 * Need to cleanup only this dsd_ptr, rest
1074 * will be done by sp_free_dma()
1075 */
1076 kfree(dsd_ptr);
1077 return 1;
1078 }
1079
1080 list_add_tail(&dsd_ptr->list,
1081 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1082
1083 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1084
1085 /* add new list to cmd iocb or last list */
1086 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1088 *cur_dsd++ = dsd_list_len;
1089 cur_dsd = (uint32_t *)next_dsd;
1090 }
1091 sle_dma = sg_dma_address(sg);
1092
1093 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1094 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1095 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1096 avail_dsds--;
1097
1098 }
1099 /* Null termination */
1100 *cur_dsd++ = 0;
1101 *cur_dsd++ = 0;
1102 *cur_dsd++ = 0;
1103 return 0;
1104 }
1105
1106 static int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds)1107 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1108 uint32_t *dsd,
1109 uint16_t tot_dsds)
1110 {
1111 void *next_dsd;
1112 uint8_t avail_dsds = 0;
1113 uint32_t dsd_list_len;
1114 struct dsd_dma *dsd_ptr;
1115 struct scatterlist *sg;
1116 int i;
1117 struct scsi_cmnd *cmd;
1118 uint32_t *cur_dsd = dsd;
1119 uint16_t used_dsds = tot_dsds;
1120
1121 cmd = GET_CMD_SP(sp);
1122 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1123 dma_addr_t sle_dma;
1124
1125 /* Allocate additional continuation packets? */
1126 if (avail_dsds == 0) {
1127 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1128 QLA_DSDS_PER_IOCB : used_dsds;
1129 dsd_list_len = (avail_dsds + 1) * 12;
1130 used_dsds -= avail_dsds;
1131
1132 /* allocate tracking DS */
1133 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1134 if (!dsd_ptr)
1135 return 1;
1136
1137 /* allocate new list */
1138 dsd_ptr->dsd_addr = next_dsd =
1139 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1140 &dsd_ptr->dsd_list_dma);
1141
1142 if (!next_dsd) {
1143 /*
1144 * Need to cleanup only this dsd_ptr, rest
1145 * will be done by sp_free_dma()
1146 */
1147 kfree(dsd_ptr);
1148 return 1;
1149 }
1150
1151 list_add_tail(&dsd_ptr->list,
1152 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1153
1154 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1155
1156 /* add new list to cmd iocb or last list */
1157 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1158 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1159 *cur_dsd++ = dsd_list_len;
1160 cur_dsd = (uint32_t *)next_dsd;
1161 }
1162 sle_dma = sg_dma_address(sg);
1163
1164 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1165 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1166 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1167
1168 avail_dsds--;
1169 }
1170 /* Null termination */
1171 *cur_dsd++ = 0;
1172 *cur_dsd++ = 0;
1173 *cur_dsd++ = 0;
1174 return 0;
1175 }
1176
1177 /**
1178 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1179 * Type 6 IOCB types.
1180 *
1181 * @sp: SRB command to process
1182 * @cmd_pkt: Command type 3 IOCB
1183 * @tot_dsds: Total number of segments to transfer
1184 */
1185 static inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t * sp,struct cmd_type_crc_2 * cmd_pkt,uint16_t tot_dsds,uint16_t tot_prot_dsds,uint16_t fw_prot_opts)1186 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1187 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1188 {
1189 uint32_t *cur_dsd, *fcp_dl;
1190 scsi_qla_host_t *vha;
1191 struct scsi_cmnd *cmd;
1192 struct scatterlist *cur_seg;
1193 int sgc;
1194 uint32_t total_bytes = 0;
1195 uint32_t data_bytes;
1196 uint32_t dif_bytes;
1197 uint8_t bundling = 1;
1198 uint16_t blk_size;
1199 uint8_t *clr_ptr;
1200 struct crc_context *crc_ctx_pkt = NULL;
1201 struct qla_hw_data *ha;
1202 uint8_t additional_fcpcdb_len;
1203 uint16_t fcp_cmnd_len;
1204 struct fcp_cmnd *fcp_cmnd;
1205 dma_addr_t crc_ctx_dma;
1206 char tag[2];
1207
1208 cmd = GET_CMD_SP(sp);
1209
1210 sgc = 0;
1211 /* Update entry type to indicate Command Type CRC_2 IOCB */
1212 *((uint32_t *)(&cmd_pkt->entry_type)) =
1213 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1214
1215 vha = sp->fcport->vha;
1216 ha = vha->hw;
1217
1218 /* No data transfer */
1219 data_bytes = scsi_bufflen(cmd);
1220 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1221 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1222 return QLA_SUCCESS;
1223 }
1224
1225 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1226
1227 /* Set transfer direction */
1228 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1229 cmd_pkt->control_flags =
1230 __constant_cpu_to_le16(CF_WRITE_DATA);
1231 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1232 cmd_pkt->control_flags =
1233 __constant_cpu_to_le16(CF_READ_DATA);
1234 }
1235
1236 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1237 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1238 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1239 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1240 bundling = 0;
1241
1242 /* Allocate CRC context from global pool */
1243 crc_ctx_pkt = sp->u.scmd.ctx =
1244 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1245
1246 if (!crc_ctx_pkt)
1247 goto crc_queuing_error;
1248
1249 /* Zero out CTX area. */
1250 clr_ptr = (uint8_t *)crc_ctx_pkt;
1251 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1252
1253 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1254
1255 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1256
1257 /* Set handle */
1258 crc_ctx_pkt->handle = cmd_pkt->handle;
1259
1260 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1261
1262 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1263 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1264
1265 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1266 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1267 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1268
1269 /* Determine SCSI command length -- align to 4 byte boundary */
1270 if (cmd->cmd_len > 16) {
1271 additional_fcpcdb_len = cmd->cmd_len - 16;
1272 if ((cmd->cmd_len % 4) != 0) {
1273 /* SCSI cmd > 16 bytes must be multiple of 4 */
1274 goto crc_queuing_error;
1275 }
1276 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1277 } else {
1278 additional_fcpcdb_len = 0;
1279 fcp_cmnd_len = 12 + 16 + 4;
1280 }
1281
1282 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1283
1284 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1285 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1286 fcp_cmnd->additional_cdb_len |= 1;
1287 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1288 fcp_cmnd->additional_cdb_len |= 2;
1289
1290 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1291 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1292 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1293 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1294 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1295 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1296 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1297 fcp_cmnd->task_management = 0;
1298
1299 /*
1300 * Update tagged queuing modifier if using command tag queuing
1301 */
1302 if (scsi_populate_tag_msg(cmd, tag)) {
1303 switch (tag[0]) {
1304 case HEAD_OF_QUEUE_TAG:
1305 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1306 break;
1307 case ORDERED_QUEUE_TAG:
1308 fcp_cmnd->task_attribute = TSK_ORDERED;
1309 break;
1310 default:
1311 fcp_cmnd->task_attribute = 0;
1312 break;
1313 }
1314 } else {
1315 fcp_cmnd->task_attribute = 0;
1316 }
1317
1318 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1319
1320 /* Compute dif len and adjust data len to incude protection */
1321 dif_bytes = 0;
1322 blk_size = cmd->device->sector_size;
1323 dif_bytes = (data_bytes / blk_size) * 8;
1324
1325 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1326 case SCSI_PROT_READ_INSERT:
1327 case SCSI_PROT_WRITE_STRIP:
1328 total_bytes = data_bytes;
1329 data_bytes += dif_bytes;
1330 break;
1331
1332 case SCSI_PROT_READ_STRIP:
1333 case SCSI_PROT_WRITE_INSERT:
1334 case SCSI_PROT_READ_PASS:
1335 case SCSI_PROT_WRITE_PASS:
1336 total_bytes = data_bytes + dif_bytes;
1337 break;
1338 default:
1339 BUG();
1340 }
1341
1342 if (!qla2x00_hba_err_chk_enabled(sp))
1343 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1344 /* HBA error checking enabled */
1345 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1346 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1347 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1348 SCSI_PROT_DIF_TYPE2))
1349 fw_prot_opts |= BIT_10;
1350 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1351 SCSI_PROT_DIF_TYPE3)
1352 fw_prot_opts |= BIT_11;
1353 }
1354
1355 if (!bundling) {
1356 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1357 } else {
1358 /*
1359 * Configure Bundling if we need to fetch interlaving
1360 * protection PCI accesses
1361 */
1362 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1363 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1364 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1365 tot_prot_dsds);
1366 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1367 }
1368
1369 /* Finish the common fields of CRC pkt */
1370 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1371 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1372 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1373 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1374 /* Fibre channel byte count */
1375 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1376 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1377 additional_fcpcdb_len);
1378 *fcp_dl = htonl(total_bytes);
1379
1380 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1381 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1382 return QLA_SUCCESS;
1383 }
1384 /* Walks data segments */
1385
1386 cmd_pkt->control_flags |=
1387 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1388
1389 if (!bundling && tot_prot_dsds) {
1390 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1391 cur_dsd, tot_dsds))
1392 goto crc_queuing_error;
1393 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1394 (tot_dsds - tot_prot_dsds)))
1395 goto crc_queuing_error;
1396
1397 if (bundling && tot_prot_dsds) {
1398 /* Walks dif segments */
1399 cur_seg = scsi_prot_sglist(cmd);
1400 cmd_pkt->control_flags |=
1401 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1402 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1403 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1404 tot_prot_dsds))
1405 goto crc_queuing_error;
1406 }
1407 return QLA_SUCCESS;
1408
1409 crc_queuing_error:
1410 /* Cleanup will be performed by the caller */
1411
1412 return QLA_FUNCTION_FAILED;
1413 }
1414
1415 /**
1416 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1417 * @sp: command to send to the ISP
1418 *
1419 * Returns non-zero if a failure occurred, else zero.
1420 */
1421 int
qla24xx_start_scsi(srb_t * sp)1422 qla24xx_start_scsi(srb_t *sp)
1423 {
1424 int ret, nseg;
1425 unsigned long flags;
1426 uint32_t *clr_ptr;
1427 uint32_t index;
1428 uint32_t handle;
1429 struct cmd_type_7 *cmd_pkt;
1430 uint16_t cnt;
1431 uint16_t req_cnt;
1432 uint16_t tot_dsds;
1433 struct req_que *req = NULL;
1434 struct rsp_que *rsp = NULL;
1435 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1436 struct scsi_qla_host *vha = sp->fcport->vha;
1437 struct qla_hw_data *ha = vha->hw;
1438 char tag[2];
1439
1440 /* Setup device pointers. */
1441 ret = 0;
1442
1443 qla25xx_set_que(sp, &rsp);
1444 req = vha->req;
1445
1446 /* So we know we haven't pci_map'ed anything yet */
1447 tot_dsds = 0;
1448
1449 /* Send marker if required */
1450 if (vha->marker_needed != 0) {
1451 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1452 QLA_SUCCESS)
1453 return QLA_FUNCTION_FAILED;
1454 vha->marker_needed = 0;
1455 }
1456
1457 /* Acquire ring specific lock */
1458 spin_lock_irqsave(&ha->hardware_lock, flags);
1459
1460 /* Check for room in outstanding command list. */
1461 handle = req->current_outstanding_cmd;
1462 for (index = 1; index < req->num_outstanding_cmds; index++) {
1463 handle++;
1464 if (handle == req->num_outstanding_cmds)
1465 handle = 1;
1466 if (!req->outstanding_cmds[handle])
1467 break;
1468 }
1469 if (index == req->num_outstanding_cmds)
1470 goto queuing_error;
1471
1472 /* Map the sg table so we have an accurate count of sg entries needed */
1473 if (scsi_sg_count(cmd)) {
1474 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1475 scsi_sg_count(cmd), cmd->sc_data_direction);
1476 if (unlikely(!nseg))
1477 goto queuing_error;
1478 } else
1479 nseg = 0;
1480
1481 tot_dsds = nseg;
1482 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1483 if (req->cnt < (req_cnt + 2)) {
1484 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1485
1486 if (req->ring_index < cnt)
1487 req->cnt = cnt - req->ring_index;
1488 else
1489 req->cnt = req->length -
1490 (req->ring_index - cnt);
1491 if (req->cnt < (req_cnt + 2))
1492 goto queuing_error;
1493 }
1494
1495 /* Build command packet. */
1496 req->current_outstanding_cmd = handle;
1497 req->outstanding_cmds[handle] = sp;
1498 sp->handle = handle;
1499 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1500 req->cnt -= req_cnt;
1501
1502 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1503 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1504
1505 /* Zero out remaining portion of packet. */
1506 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1507 clr_ptr = (uint32_t *)cmd_pkt + 2;
1508 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1509 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1510
1511 /* Set NPORT-ID and LUN number*/
1512 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1513 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1514 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1515 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1516 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1517
1518 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1519 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1520
1521 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1522 if (scsi_populate_tag_msg(cmd, tag)) {
1523 switch (tag[0]) {
1524 case HEAD_OF_QUEUE_TAG:
1525 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1526 break;
1527 case ORDERED_QUEUE_TAG:
1528 cmd_pkt->task = TSK_ORDERED;
1529 break;
1530 }
1531 }
1532
1533 /* Load SCSI command packet. */
1534 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1535 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1536
1537 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1538
1539 /* Build IOCB segments */
1540 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1541
1542 /* Set total data segment count. */
1543 cmd_pkt->entry_count = (uint8_t)req_cnt;
1544 /* Specify response queue number where completion should happen */
1545 cmd_pkt->entry_status = (uint8_t) rsp->id;
1546 wmb();
1547 /* Adjust ring index. */
1548 req->ring_index++;
1549 if (req->ring_index == req->length) {
1550 req->ring_index = 0;
1551 req->ring_ptr = req->ring;
1552 } else
1553 req->ring_ptr++;
1554
1555 sp->flags |= SRB_DMA_VALID;
1556
1557 /* Set chip new ring index. */
1558 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1559 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1560
1561 /* Manage unprocessed RIO/ZIO commands in response queue. */
1562 if (vha->flags.process_response_queue &&
1563 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1564 qla24xx_process_response_queue(vha, rsp);
1565
1566 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1567 return QLA_SUCCESS;
1568
1569 queuing_error:
1570 if (tot_dsds)
1571 scsi_dma_unmap(cmd);
1572
1573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1574
1575 return QLA_FUNCTION_FAILED;
1576 }
1577
1578 /**
1579 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1580 * @sp: command to send to the ISP
1581 *
1582 * Returns non-zero if a failure occurred, else zero.
1583 */
1584 int
qla24xx_dif_start_scsi(srb_t * sp)1585 qla24xx_dif_start_scsi(srb_t *sp)
1586 {
1587 int nseg;
1588 unsigned long flags;
1589 uint32_t *clr_ptr;
1590 uint32_t index;
1591 uint32_t handle;
1592 uint16_t cnt;
1593 uint16_t req_cnt = 0;
1594 uint16_t tot_dsds;
1595 uint16_t tot_prot_dsds;
1596 uint16_t fw_prot_opts = 0;
1597 struct req_que *req = NULL;
1598 struct rsp_que *rsp = NULL;
1599 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1600 struct scsi_qla_host *vha = sp->fcport->vha;
1601 struct qla_hw_data *ha = vha->hw;
1602 struct cmd_type_crc_2 *cmd_pkt;
1603 uint32_t status = 0;
1604
1605 #define QDSS_GOT_Q_SPACE BIT_0
1606
1607 /* Only process protection or >16 cdb in this routine */
1608 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1609 if (cmd->cmd_len <= 16)
1610 return qla24xx_start_scsi(sp);
1611 }
1612
1613 /* Setup device pointers. */
1614
1615 qla25xx_set_que(sp, &rsp);
1616 req = vha->req;
1617
1618 /* So we know we haven't pci_map'ed anything yet */
1619 tot_dsds = 0;
1620
1621 /* Send marker if required */
1622 if (vha->marker_needed != 0) {
1623 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1624 QLA_SUCCESS)
1625 return QLA_FUNCTION_FAILED;
1626 vha->marker_needed = 0;
1627 }
1628
1629 /* Acquire ring specific lock */
1630 spin_lock_irqsave(&ha->hardware_lock, flags);
1631
1632 /* Check for room in outstanding command list. */
1633 handle = req->current_outstanding_cmd;
1634 for (index = 1; index < req->num_outstanding_cmds; index++) {
1635 handle++;
1636 if (handle == req->num_outstanding_cmds)
1637 handle = 1;
1638 if (!req->outstanding_cmds[handle])
1639 break;
1640 }
1641
1642 if (index == req->num_outstanding_cmds)
1643 goto queuing_error;
1644
1645 /* Compute number of required data segments */
1646 /* Map the sg table so we have an accurate count of sg entries needed */
1647 if (scsi_sg_count(cmd)) {
1648 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1649 scsi_sg_count(cmd), cmd->sc_data_direction);
1650 if (unlikely(!nseg))
1651 goto queuing_error;
1652 else
1653 sp->flags |= SRB_DMA_VALID;
1654
1655 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1656 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1657 struct qla2_sgx sgx;
1658 uint32_t partial;
1659
1660 memset(&sgx, 0, sizeof(struct qla2_sgx));
1661 sgx.tot_bytes = scsi_bufflen(cmd);
1662 sgx.cur_sg = scsi_sglist(cmd);
1663 sgx.sp = sp;
1664
1665 nseg = 0;
1666 while (qla24xx_get_one_block_sg(
1667 cmd->device->sector_size, &sgx, &partial))
1668 nseg++;
1669 }
1670 } else
1671 nseg = 0;
1672
1673 /* number of required data segments */
1674 tot_dsds = nseg;
1675
1676 /* Compute number of required protection segments */
1677 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1678 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1679 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1680 if (unlikely(!nseg))
1681 goto queuing_error;
1682 else
1683 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1684
1685 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1686 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1687 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1688 }
1689 } else {
1690 nseg = 0;
1691 }
1692
1693 req_cnt = 1;
1694 /* Total Data and protection sg segment(s) */
1695 tot_prot_dsds = nseg;
1696 tot_dsds += nseg;
1697 if (req->cnt < (req_cnt + 2)) {
1698 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1699
1700 if (req->ring_index < cnt)
1701 req->cnt = cnt - req->ring_index;
1702 else
1703 req->cnt = req->length -
1704 (req->ring_index - cnt);
1705 if (req->cnt < (req_cnt + 2))
1706 goto queuing_error;
1707 }
1708
1709 status |= QDSS_GOT_Q_SPACE;
1710
1711 /* Build header part of command packet (excluding the OPCODE). */
1712 req->current_outstanding_cmd = handle;
1713 req->outstanding_cmds[handle] = sp;
1714 sp->handle = handle;
1715 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1716 req->cnt -= req_cnt;
1717
1718 /* Fill-in common area */
1719 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1720 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1721
1722 clr_ptr = (uint32_t *)cmd_pkt + 2;
1723 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1724
1725 /* Set NPORT-ID and LUN number*/
1726 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1727 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1728 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1729 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1730
1731 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1732 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1733
1734 /* Total Data and protection segment(s) */
1735 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1736
1737 /* Build IOCB segments and adjust for data protection segments */
1738 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1739 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1740 QLA_SUCCESS)
1741 goto queuing_error;
1742
1743 cmd_pkt->entry_count = (uint8_t)req_cnt;
1744 /* Specify response queue number where completion should happen */
1745 cmd_pkt->entry_status = (uint8_t) rsp->id;
1746 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1747 wmb();
1748
1749 /* Adjust ring index. */
1750 req->ring_index++;
1751 if (req->ring_index == req->length) {
1752 req->ring_index = 0;
1753 req->ring_ptr = req->ring;
1754 } else
1755 req->ring_ptr++;
1756
1757 /* Set chip new ring index. */
1758 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1759 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1760
1761 /* Manage unprocessed RIO/ZIO commands in response queue. */
1762 if (vha->flags.process_response_queue &&
1763 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1764 qla24xx_process_response_queue(vha, rsp);
1765
1766 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1767
1768 return QLA_SUCCESS;
1769
1770 queuing_error:
1771 if (status & QDSS_GOT_Q_SPACE) {
1772 req->outstanding_cmds[handle] = NULL;
1773 req->cnt += req_cnt;
1774 }
1775 /* Cleanup will be performed by the caller (queuecommand) */
1776
1777 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1778 return QLA_FUNCTION_FAILED;
1779 }
1780
1781
qla25xx_set_que(srb_t * sp,struct rsp_que ** rsp)1782 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1783 {
1784 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1785 struct qla_hw_data *ha = sp->fcport->vha->hw;
1786 int affinity = cmd->request->cpu;
1787
1788 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1789 affinity < ha->max_rsp_queues - 1)
1790 *rsp = ha->rsp_q_map[affinity + 1];
1791 else
1792 *rsp = ha->rsp_q_map[0];
1793 }
1794
1795 /* Generic Control-SRB manipulation functions. */
1796 void *
qla2x00_alloc_iocbs(scsi_qla_host_t * vha,srb_t * sp)1797 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1798 {
1799 struct qla_hw_data *ha = vha->hw;
1800 struct req_que *req = ha->req_q_map[0];
1801 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1802 uint32_t index, handle;
1803 request_t *pkt;
1804 uint16_t cnt, req_cnt;
1805
1806 pkt = NULL;
1807 req_cnt = 1;
1808 handle = 0;
1809
1810 if (!sp)
1811 goto skip_cmd_array;
1812
1813 /* Check for room in outstanding command list. */
1814 handle = req->current_outstanding_cmd;
1815 for (index = 1; req->num_outstanding_cmds; index++) {
1816 handle++;
1817 if (handle == req->num_outstanding_cmds)
1818 handle = 1;
1819 if (!req->outstanding_cmds[handle])
1820 break;
1821 }
1822 if (index == req->num_outstanding_cmds) {
1823 ql_log(ql_log_warn, vha, 0x700b,
1824 "No room on outstanding cmd array.\n");
1825 goto queuing_error;
1826 }
1827
1828 /* Prep command array. */
1829 req->current_outstanding_cmd = handle;
1830 req->outstanding_cmds[handle] = sp;
1831 sp->handle = handle;
1832
1833 /* Adjust entry-counts as needed. */
1834 if (sp->type != SRB_SCSI_CMD)
1835 req_cnt = sp->iocbs;
1836
1837 skip_cmd_array:
1838 /* Check for room on request queue. */
1839 if (req->cnt < req_cnt) {
1840 if (ha->mqenable || IS_QLA83XX(ha))
1841 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1842 else if (IS_QLA82XX(ha))
1843 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1844 else if (IS_FWI2_CAPABLE(ha))
1845 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1846 else if (IS_QLAFX00(ha))
1847 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
1848 else
1849 cnt = qla2x00_debounce_register(
1850 ISP_REQ_Q_OUT(ha, ®->isp));
1851
1852 if (req->ring_index < cnt)
1853 req->cnt = cnt - req->ring_index;
1854 else
1855 req->cnt = req->length -
1856 (req->ring_index - cnt);
1857 }
1858 if (req->cnt < req_cnt)
1859 goto queuing_error;
1860
1861 /* Prep packet */
1862 req->cnt -= req_cnt;
1863 pkt = req->ring_ptr;
1864 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1865 if (IS_QLAFX00(ha)) {
1866 WRT_REG_BYTE(&pkt->entry_count, req_cnt);
1867 WRT_REG_WORD(&pkt->handle, handle);
1868 } else {
1869 pkt->entry_count = req_cnt;
1870 pkt->handle = handle;
1871 }
1872
1873 queuing_error:
1874 return pkt;
1875 }
1876
1877 static void
qla24xx_login_iocb(srb_t * sp,struct logio_entry_24xx * logio)1878 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1879 {
1880 struct srb_iocb *lio = &sp->u.iocb_cmd;
1881
1882 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1883 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1884 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1885 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1886 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1887 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1888 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1889 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1890 logio->port_id[1] = sp->fcport->d_id.b.area;
1891 logio->port_id[2] = sp->fcport->d_id.b.domain;
1892 logio->vp_index = sp->fcport->vha->vp_idx;
1893 }
1894
1895 static void
qla2x00_login_iocb(srb_t * sp,struct mbx_entry * mbx)1896 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1897 {
1898 struct qla_hw_data *ha = sp->fcport->vha->hw;
1899 struct srb_iocb *lio = &sp->u.iocb_cmd;
1900 uint16_t opts;
1901
1902 mbx->entry_type = MBX_IOCB_TYPE;
1903 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1904 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1905 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1906 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1907 if (HAS_EXTENDED_IDS(ha)) {
1908 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1909 mbx->mb10 = cpu_to_le16(opts);
1910 } else {
1911 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1912 }
1913 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1914 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1915 sp->fcport->d_id.b.al_pa);
1916 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1917 }
1918
1919 static void
qla24xx_logout_iocb(srb_t * sp,struct logio_entry_24xx * logio)1920 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1921 {
1922 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1923 logio->control_flags =
1924 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1925 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1926 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1927 logio->port_id[1] = sp->fcport->d_id.b.area;
1928 logio->port_id[2] = sp->fcport->d_id.b.domain;
1929 logio->vp_index = sp->fcport->vha->vp_idx;
1930 }
1931
1932 static void
qla2x00_logout_iocb(srb_t * sp,struct mbx_entry * mbx)1933 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1934 {
1935 struct qla_hw_data *ha = sp->fcport->vha->hw;
1936
1937 mbx->entry_type = MBX_IOCB_TYPE;
1938 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1939 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1940 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1941 cpu_to_le16(sp->fcport->loop_id):
1942 cpu_to_le16(sp->fcport->loop_id << 8);
1943 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1944 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1945 sp->fcport->d_id.b.al_pa);
1946 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1947 /* Implicit: mbx->mbx10 = 0. */
1948 }
1949
1950 static void
qla24xx_adisc_iocb(srb_t * sp,struct logio_entry_24xx * logio)1951 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1952 {
1953 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1954 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1955 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1956 logio->vp_index = sp->fcport->vha->vp_idx;
1957 }
1958
1959 static void
qla2x00_adisc_iocb(srb_t * sp,struct mbx_entry * mbx)1960 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1961 {
1962 struct qla_hw_data *ha = sp->fcport->vha->hw;
1963
1964 mbx->entry_type = MBX_IOCB_TYPE;
1965 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1966 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1967 if (HAS_EXTENDED_IDS(ha)) {
1968 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1969 mbx->mb10 = cpu_to_le16(BIT_0);
1970 } else {
1971 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1972 }
1973 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1974 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1975 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1976 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1977 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1978 }
1979
1980 static void
qla24xx_tm_iocb(srb_t * sp,struct tsk_mgmt_entry * tsk)1981 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1982 {
1983 uint32_t flags;
1984 unsigned int lun;
1985 struct fc_port *fcport = sp->fcport;
1986 scsi_qla_host_t *vha = fcport->vha;
1987 struct qla_hw_data *ha = vha->hw;
1988 struct srb_iocb *iocb = &sp->u.iocb_cmd;
1989 struct req_que *req = vha->req;
1990
1991 flags = iocb->u.tmf.flags;
1992 lun = iocb->u.tmf.lun;
1993
1994 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1995 tsk->entry_count = 1;
1996 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1997 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1998 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1999 tsk->control_flags = cpu_to_le32(flags);
2000 tsk->port_id[0] = fcport->d_id.b.al_pa;
2001 tsk->port_id[1] = fcport->d_id.b.area;
2002 tsk->port_id[2] = fcport->d_id.b.domain;
2003 tsk->vp_index = fcport->vha->vp_idx;
2004
2005 if (flags == TCF_LUN_RESET) {
2006 int_to_scsilun(lun, &tsk->lun);
2007 host_to_fcp_swap((uint8_t *)&tsk->lun,
2008 sizeof(tsk->lun));
2009 }
2010 }
2011
2012 static void
qla24xx_els_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)2013 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2014 {
2015 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2016
2017 els_iocb->entry_type = ELS_IOCB_TYPE;
2018 els_iocb->entry_count = 1;
2019 els_iocb->sys_define = 0;
2020 els_iocb->entry_status = 0;
2021 els_iocb->handle = sp->handle;
2022 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2023 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2024 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2025 els_iocb->sof_type = EST_SOFI3;
2026 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2027
2028 els_iocb->opcode =
2029 sp->type == SRB_ELS_CMD_RPT ?
2030 bsg_job->request->rqst_data.r_els.els_code :
2031 bsg_job->request->rqst_data.h_els.command_code;
2032 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2033 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2034 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2035 els_iocb->control_flags = 0;
2036 els_iocb->rx_byte_count =
2037 cpu_to_le32(bsg_job->reply_payload.payload_len);
2038 els_iocb->tx_byte_count =
2039 cpu_to_le32(bsg_job->request_payload.payload_len);
2040
2041 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2042 (bsg_job->request_payload.sg_list)));
2043 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2044 (bsg_job->request_payload.sg_list)));
2045 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2046 (bsg_job->request_payload.sg_list));
2047
2048 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2049 (bsg_job->reply_payload.sg_list)));
2050 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2051 (bsg_job->reply_payload.sg_list)));
2052 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2053 (bsg_job->reply_payload.sg_list));
2054 }
2055
2056 static void
qla2x00_ct_iocb(srb_t * sp,ms_iocb_entry_t * ct_iocb)2057 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2058 {
2059 uint16_t avail_dsds;
2060 uint32_t *cur_dsd;
2061 struct scatterlist *sg;
2062 int index;
2063 uint16_t tot_dsds;
2064 scsi_qla_host_t *vha = sp->fcport->vha;
2065 struct qla_hw_data *ha = vha->hw;
2066 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2067 int loop_iterartion = 0;
2068 int cont_iocb_prsnt = 0;
2069 int entry_count = 1;
2070
2071 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2072 ct_iocb->entry_type = CT_IOCB_TYPE;
2073 ct_iocb->entry_status = 0;
2074 ct_iocb->handle1 = sp->handle;
2075 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2076 ct_iocb->status = __constant_cpu_to_le16(0);
2077 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2078 ct_iocb->timeout = 0;
2079 ct_iocb->cmd_dsd_count =
2080 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2081 ct_iocb->total_dsd_count =
2082 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2083 ct_iocb->req_bytecount =
2084 cpu_to_le32(bsg_job->request_payload.payload_len);
2085 ct_iocb->rsp_bytecount =
2086 cpu_to_le32(bsg_job->reply_payload.payload_len);
2087
2088 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2089 (bsg_job->request_payload.sg_list)));
2090 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2091 (bsg_job->request_payload.sg_list)));
2092 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2093
2094 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2095 (bsg_job->reply_payload.sg_list)));
2096 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2097 (bsg_job->reply_payload.sg_list)));
2098 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2099
2100 avail_dsds = 1;
2101 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2102 index = 0;
2103 tot_dsds = bsg_job->reply_payload.sg_cnt;
2104
2105 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2106 dma_addr_t sle_dma;
2107 cont_a64_entry_t *cont_pkt;
2108
2109 /* Allocate additional continuation packets? */
2110 if (avail_dsds == 0) {
2111 /*
2112 * Five DSDs are available in the Cont.
2113 * Type 1 IOCB.
2114 */
2115 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2116 vha->hw->req_q_map[0]);
2117 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2118 avail_dsds = 5;
2119 cont_iocb_prsnt = 1;
2120 entry_count++;
2121 }
2122
2123 sle_dma = sg_dma_address(sg);
2124 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2125 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2126 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2127 loop_iterartion++;
2128 avail_dsds--;
2129 }
2130 ct_iocb->entry_count = entry_count;
2131 }
2132
2133 static void
qla24xx_ct_iocb(srb_t * sp,struct ct_entry_24xx * ct_iocb)2134 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2135 {
2136 uint16_t avail_dsds;
2137 uint32_t *cur_dsd;
2138 struct scatterlist *sg;
2139 int index;
2140 uint16_t tot_dsds;
2141 scsi_qla_host_t *vha = sp->fcport->vha;
2142 struct qla_hw_data *ha = vha->hw;
2143 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2144 int loop_iterartion = 0;
2145 int cont_iocb_prsnt = 0;
2146 int entry_count = 1;
2147
2148 ct_iocb->entry_type = CT_IOCB_TYPE;
2149 ct_iocb->entry_status = 0;
2150 ct_iocb->sys_define = 0;
2151 ct_iocb->handle = sp->handle;
2152
2153 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2154 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2155 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2156
2157 ct_iocb->cmd_dsd_count =
2158 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2159 ct_iocb->timeout = 0;
2160 ct_iocb->rsp_dsd_count =
2161 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2162 ct_iocb->rsp_byte_count =
2163 cpu_to_le32(bsg_job->reply_payload.payload_len);
2164 ct_iocb->cmd_byte_count =
2165 cpu_to_le32(bsg_job->request_payload.payload_len);
2166 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2167 (bsg_job->request_payload.sg_list)));
2168 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2169 (bsg_job->request_payload.sg_list)));
2170 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2171 (bsg_job->request_payload.sg_list));
2172
2173 avail_dsds = 1;
2174 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2175 index = 0;
2176 tot_dsds = bsg_job->reply_payload.sg_cnt;
2177
2178 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2179 dma_addr_t sle_dma;
2180 cont_a64_entry_t *cont_pkt;
2181
2182 /* Allocate additional continuation packets? */
2183 if (avail_dsds == 0) {
2184 /*
2185 * Five DSDs are available in the Cont.
2186 * Type 1 IOCB.
2187 */
2188 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2189 ha->req_q_map[0]);
2190 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2191 avail_dsds = 5;
2192 cont_iocb_prsnt = 1;
2193 entry_count++;
2194 }
2195
2196 sle_dma = sg_dma_address(sg);
2197 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2198 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2199 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2200 loop_iterartion++;
2201 avail_dsds--;
2202 }
2203 ct_iocb->entry_count = entry_count;
2204 }
2205
2206 /*
2207 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2208 * @sp: command to send to the ISP
2209 *
2210 * Returns non-zero if a failure occurred, else zero.
2211 */
2212 int
qla82xx_start_scsi(srb_t * sp)2213 qla82xx_start_scsi(srb_t *sp)
2214 {
2215 int ret, nseg;
2216 unsigned long flags;
2217 struct scsi_cmnd *cmd;
2218 uint32_t *clr_ptr;
2219 uint32_t index;
2220 uint32_t handle;
2221 uint16_t cnt;
2222 uint16_t req_cnt;
2223 uint16_t tot_dsds;
2224 struct device_reg_82xx __iomem *reg;
2225 uint32_t dbval;
2226 uint32_t *fcp_dl;
2227 uint8_t additional_cdb_len;
2228 struct ct6_dsd *ctx;
2229 struct scsi_qla_host *vha = sp->fcport->vha;
2230 struct qla_hw_data *ha = vha->hw;
2231 struct req_que *req = NULL;
2232 struct rsp_que *rsp = NULL;
2233 char tag[2];
2234
2235 /* Setup device pointers. */
2236 ret = 0;
2237 reg = &ha->iobase->isp82;
2238 cmd = GET_CMD_SP(sp);
2239 req = vha->req;
2240 rsp = ha->rsp_q_map[0];
2241
2242 /* So we know we haven't pci_map'ed anything yet */
2243 tot_dsds = 0;
2244
2245 dbval = 0x04 | (ha->portnum << 5);
2246
2247 /* Send marker if required */
2248 if (vha->marker_needed != 0) {
2249 if (qla2x00_marker(vha, req,
2250 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2251 ql_log(ql_log_warn, vha, 0x300c,
2252 "qla2x00_marker failed for cmd=%p.\n", cmd);
2253 return QLA_FUNCTION_FAILED;
2254 }
2255 vha->marker_needed = 0;
2256 }
2257
2258 /* Acquire ring specific lock */
2259 spin_lock_irqsave(&ha->hardware_lock, flags);
2260
2261 /* Check for room in outstanding command list. */
2262 handle = req->current_outstanding_cmd;
2263 for (index = 1; index < req->num_outstanding_cmds; index++) {
2264 handle++;
2265 if (handle == req->num_outstanding_cmds)
2266 handle = 1;
2267 if (!req->outstanding_cmds[handle])
2268 break;
2269 }
2270 if (index == req->num_outstanding_cmds)
2271 goto queuing_error;
2272
2273 /* Map the sg table so we have an accurate count of sg entries needed */
2274 if (scsi_sg_count(cmd)) {
2275 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2276 scsi_sg_count(cmd), cmd->sc_data_direction);
2277 if (unlikely(!nseg))
2278 goto queuing_error;
2279 } else
2280 nseg = 0;
2281
2282 tot_dsds = nseg;
2283
2284 if (tot_dsds > ql2xshiftctondsd) {
2285 struct cmd_type_6 *cmd_pkt;
2286 uint16_t more_dsd_lists = 0;
2287 struct dsd_dma *dsd_ptr;
2288 uint16_t i;
2289
2290 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2291 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2292 ql_dbg(ql_dbg_io, vha, 0x300d,
2293 "Num of DSD list %d is than %d for cmd=%p.\n",
2294 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2295 cmd);
2296 goto queuing_error;
2297 }
2298
2299 if (more_dsd_lists <= ha->gbl_dsd_avail)
2300 goto sufficient_dsds;
2301 else
2302 more_dsd_lists -= ha->gbl_dsd_avail;
2303
2304 for (i = 0; i < more_dsd_lists; i++) {
2305 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2306 if (!dsd_ptr) {
2307 ql_log(ql_log_fatal, vha, 0x300e,
2308 "Failed to allocate memory for dsd_dma "
2309 "for cmd=%p.\n", cmd);
2310 goto queuing_error;
2311 }
2312
2313 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2314 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2315 if (!dsd_ptr->dsd_addr) {
2316 kfree(dsd_ptr);
2317 ql_log(ql_log_fatal, vha, 0x300f,
2318 "Failed to allocate memory for dsd_addr "
2319 "for cmd=%p.\n", cmd);
2320 goto queuing_error;
2321 }
2322 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2323 ha->gbl_dsd_avail++;
2324 }
2325
2326 sufficient_dsds:
2327 req_cnt = 1;
2328
2329 if (req->cnt < (req_cnt + 2)) {
2330 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2331 ®->req_q_out[0]);
2332 if (req->ring_index < cnt)
2333 req->cnt = cnt - req->ring_index;
2334 else
2335 req->cnt = req->length -
2336 (req->ring_index - cnt);
2337 if (req->cnt < (req_cnt + 2))
2338 goto queuing_error;
2339 }
2340
2341 ctx = sp->u.scmd.ctx =
2342 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2343 if (!ctx) {
2344 ql_log(ql_log_fatal, vha, 0x3010,
2345 "Failed to allocate ctx for cmd=%p.\n", cmd);
2346 goto queuing_error;
2347 }
2348
2349 memset(ctx, 0, sizeof(struct ct6_dsd));
2350 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2351 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2352 if (!ctx->fcp_cmnd) {
2353 ql_log(ql_log_fatal, vha, 0x3011,
2354 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2355 goto queuing_error;
2356 }
2357
2358 /* Initialize the DSD list and dma handle */
2359 INIT_LIST_HEAD(&ctx->dsd_list);
2360 ctx->dsd_use_cnt = 0;
2361
2362 if (cmd->cmd_len > 16) {
2363 additional_cdb_len = cmd->cmd_len - 16;
2364 if ((cmd->cmd_len % 4) != 0) {
2365 /* SCSI command bigger than 16 bytes must be
2366 * multiple of 4
2367 */
2368 ql_log(ql_log_warn, vha, 0x3012,
2369 "scsi cmd len %d not multiple of 4 "
2370 "for cmd=%p.\n", cmd->cmd_len, cmd);
2371 goto queuing_error_fcp_cmnd;
2372 }
2373 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2374 } else {
2375 additional_cdb_len = 0;
2376 ctx->fcp_cmnd_len = 12 + 16 + 4;
2377 }
2378
2379 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2380 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2381
2382 /* Zero out remaining portion of packet. */
2383 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2384 clr_ptr = (uint32_t *)cmd_pkt + 2;
2385 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2386 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2387
2388 /* Set NPORT-ID and LUN number*/
2389 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2390 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2391 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2392 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2393 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2394
2395 /* Build IOCB segments */
2396 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2397 goto queuing_error_fcp_cmnd;
2398
2399 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2400 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2401
2402 /* build FCP_CMND IU */
2403 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2404 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2405 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2406
2407 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2408 ctx->fcp_cmnd->additional_cdb_len |= 1;
2409 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2410 ctx->fcp_cmnd->additional_cdb_len |= 2;
2411
2412 /*
2413 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2414 */
2415 if (scsi_populate_tag_msg(cmd, tag)) {
2416 switch (tag[0]) {
2417 case HEAD_OF_QUEUE_TAG:
2418 ctx->fcp_cmnd->task_attribute =
2419 TSK_HEAD_OF_QUEUE;
2420 break;
2421 case ORDERED_QUEUE_TAG:
2422 ctx->fcp_cmnd->task_attribute =
2423 TSK_ORDERED;
2424 break;
2425 }
2426 }
2427
2428 /* Populate the FCP_PRIO. */
2429 if (ha->flags.fcp_prio_enabled)
2430 ctx->fcp_cmnd->task_attribute |=
2431 sp->fcport->fcp_prio << 3;
2432
2433 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2434
2435 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2436 additional_cdb_len);
2437 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2438
2439 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2440 cmd_pkt->fcp_cmnd_dseg_address[0] =
2441 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2442 cmd_pkt->fcp_cmnd_dseg_address[1] =
2443 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2444
2445 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2446 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2447 /* Set total data segment count. */
2448 cmd_pkt->entry_count = (uint8_t)req_cnt;
2449 /* Specify response queue number where
2450 * completion should happen
2451 */
2452 cmd_pkt->entry_status = (uint8_t) rsp->id;
2453 } else {
2454 struct cmd_type_7 *cmd_pkt;
2455 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2456 if (req->cnt < (req_cnt + 2)) {
2457 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2458 ®->req_q_out[0]);
2459 if (req->ring_index < cnt)
2460 req->cnt = cnt - req->ring_index;
2461 else
2462 req->cnt = req->length -
2463 (req->ring_index - cnt);
2464 }
2465 if (req->cnt < (req_cnt + 2))
2466 goto queuing_error;
2467
2468 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2469 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2470
2471 /* Zero out remaining portion of packet. */
2472 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2473 clr_ptr = (uint32_t *)cmd_pkt + 2;
2474 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2475 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2476
2477 /* Set NPORT-ID and LUN number*/
2478 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2479 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2480 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2481 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2482 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2483
2484 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2485 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2486 sizeof(cmd_pkt->lun));
2487
2488 /*
2489 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2490 */
2491 if (scsi_populate_tag_msg(cmd, tag)) {
2492 switch (tag[0]) {
2493 case HEAD_OF_QUEUE_TAG:
2494 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2495 break;
2496 case ORDERED_QUEUE_TAG:
2497 cmd_pkt->task = TSK_ORDERED;
2498 break;
2499 }
2500 }
2501
2502 /* Populate the FCP_PRIO. */
2503 if (ha->flags.fcp_prio_enabled)
2504 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2505
2506 /* Load SCSI command packet. */
2507 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2508 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2509
2510 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2511
2512 /* Build IOCB segments */
2513 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2514
2515 /* Set total data segment count. */
2516 cmd_pkt->entry_count = (uint8_t)req_cnt;
2517 /* Specify response queue number where
2518 * completion should happen.
2519 */
2520 cmd_pkt->entry_status = (uint8_t) rsp->id;
2521
2522 }
2523 /* Build command packet. */
2524 req->current_outstanding_cmd = handle;
2525 req->outstanding_cmds[handle] = sp;
2526 sp->handle = handle;
2527 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2528 req->cnt -= req_cnt;
2529 wmb();
2530
2531 /* Adjust ring index. */
2532 req->ring_index++;
2533 if (req->ring_index == req->length) {
2534 req->ring_index = 0;
2535 req->ring_ptr = req->ring;
2536 } else
2537 req->ring_ptr++;
2538
2539 sp->flags |= SRB_DMA_VALID;
2540
2541 /* Set chip new ring index. */
2542 /* write, read and verify logic */
2543 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2544 if (ql2xdbwr)
2545 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2546 else {
2547 WRT_REG_DWORD(
2548 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2549 dbval);
2550 wmb();
2551 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2552 WRT_REG_DWORD(
2553 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2554 dbval);
2555 wmb();
2556 }
2557 }
2558
2559 /* Manage unprocessed RIO/ZIO commands in response queue. */
2560 if (vha->flags.process_response_queue &&
2561 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2562 qla24xx_process_response_queue(vha, rsp);
2563
2564 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2565 return QLA_SUCCESS;
2566
2567 queuing_error_fcp_cmnd:
2568 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2569 queuing_error:
2570 if (tot_dsds)
2571 scsi_dma_unmap(cmd);
2572
2573 if (sp->u.scmd.ctx) {
2574 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2575 sp->u.scmd.ctx = NULL;
2576 }
2577 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2578
2579 return QLA_FUNCTION_FAILED;
2580 }
2581
2582 int
qla2x00_start_sp(srb_t * sp)2583 qla2x00_start_sp(srb_t *sp)
2584 {
2585 int rval;
2586 struct qla_hw_data *ha = sp->fcport->vha->hw;
2587 void *pkt;
2588 unsigned long flags;
2589
2590 rval = QLA_FUNCTION_FAILED;
2591 spin_lock_irqsave(&ha->hardware_lock, flags);
2592 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2593 if (!pkt) {
2594 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2595 "qla2x00_alloc_iocbs failed.\n");
2596 goto done;
2597 }
2598
2599 rval = QLA_SUCCESS;
2600 switch (sp->type) {
2601 case SRB_LOGIN_CMD:
2602 IS_FWI2_CAPABLE(ha) ?
2603 qla24xx_login_iocb(sp, pkt) :
2604 qla2x00_login_iocb(sp, pkt);
2605 break;
2606 case SRB_LOGOUT_CMD:
2607 IS_FWI2_CAPABLE(ha) ?
2608 qla24xx_logout_iocb(sp, pkt) :
2609 qla2x00_logout_iocb(sp, pkt);
2610 break;
2611 case SRB_ELS_CMD_RPT:
2612 case SRB_ELS_CMD_HST:
2613 qla24xx_els_iocb(sp, pkt);
2614 break;
2615 case SRB_CT_CMD:
2616 IS_FWI2_CAPABLE(ha) ?
2617 qla24xx_ct_iocb(sp, pkt) :
2618 qla2x00_ct_iocb(sp, pkt);
2619 break;
2620 case SRB_ADISC_CMD:
2621 IS_FWI2_CAPABLE(ha) ?
2622 qla24xx_adisc_iocb(sp, pkt) :
2623 qla2x00_adisc_iocb(sp, pkt);
2624 break;
2625 case SRB_TM_CMD:
2626 IS_QLAFX00(ha) ?
2627 qlafx00_tm_iocb(sp, pkt) :
2628 qla24xx_tm_iocb(sp, pkt);
2629 break;
2630 case SRB_FXIOCB_DCMD:
2631 case SRB_FXIOCB_BCMD:
2632 qlafx00_fxdisc_iocb(sp, pkt);
2633 break;
2634 case SRB_ABT_CMD:
2635 qlafx00_abort_iocb(sp, pkt);
2636 break;
2637 default:
2638 break;
2639 }
2640
2641 wmb();
2642 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2643 done:
2644 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2645 return rval;
2646 }
2647
2648 static void
qla25xx_build_bidir_iocb(srb_t * sp,struct scsi_qla_host * vha,struct cmd_bidir * cmd_pkt,uint32_t tot_dsds)2649 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2650 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2651 {
2652 uint16_t avail_dsds;
2653 uint32_t *cur_dsd;
2654 uint32_t req_data_len = 0;
2655 uint32_t rsp_data_len = 0;
2656 struct scatterlist *sg;
2657 int index;
2658 int entry_count = 1;
2659 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2660
2661 /*Update entry type to indicate bidir command */
2662 *((uint32_t *)(&cmd_pkt->entry_type)) =
2663 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2664
2665 /* Set the transfer direction, in this set both flags
2666 * Also set the BD_WRAP_BACK flag, firmware will take care
2667 * assigning DID=SID for outgoing pkts.
2668 */
2669 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2670 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2671 cmd_pkt->control_flags =
2672 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2673 BD_WRAP_BACK);
2674
2675 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2676 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2677 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2678 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2679
2680 vha->bidi_stats.transfer_bytes += req_data_len;
2681 vha->bidi_stats.io_count++;
2682
2683 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2684 * are bundled in continuation iocb
2685 */
2686 avail_dsds = 1;
2687 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2688
2689 index = 0;
2690
2691 for_each_sg(bsg_job->request_payload.sg_list, sg,
2692 bsg_job->request_payload.sg_cnt, index) {
2693 dma_addr_t sle_dma;
2694 cont_a64_entry_t *cont_pkt;
2695
2696 /* Allocate additional continuation packets */
2697 if (avail_dsds == 0) {
2698 /* Continuation type 1 IOCB can accomodate
2699 * 5 DSDS
2700 */
2701 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2702 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2703 avail_dsds = 5;
2704 entry_count++;
2705 }
2706 sle_dma = sg_dma_address(sg);
2707 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2708 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2709 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2710 avail_dsds--;
2711 }
2712 /* For read request DSD will always goes to continuation IOCB
2713 * and follow the write DSD. If there is room on the current IOCB
2714 * then it is added to that IOCB else new continuation IOCB is
2715 * allocated.
2716 */
2717 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2718 bsg_job->reply_payload.sg_cnt, index) {
2719 dma_addr_t sle_dma;
2720 cont_a64_entry_t *cont_pkt;
2721
2722 /* Allocate additional continuation packets */
2723 if (avail_dsds == 0) {
2724 /* Continuation type 1 IOCB can accomodate
2725 * 5 DSDS
2726 */
2727 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2728 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2729 avail_dsds = 5;
2730 entry_count++;
2731 }
2732 sle_dma = sg_dma_address(sg);
2733 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2734 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2735 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2736 avail_dsds--;
2737 }
2738 /* This value should be same as number of IOCB required for this cmd */
2739 cmd_pkt->entry_count = entry_count;
2740 }
2741
2742 int
qla2x00_start_bidir(srb_t * sp,struct scsi_qla_host * vha,uint32_t tot_dsds)2743 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2744 {
2745
2746 struct qla_hw_data *ha = vha->hw;
2747 unsigned long flags;
2748 uint32_t handle;
2749 uint32_t index;
2750 uint16_t req_cnt;
2751 uint16_t cnt;
2752 uint32_t *clr_ptr;
2753 struct cmd_bidir *cmd_pkt = NULL;
2754 struct rsp_que *rsp;
2755 struct req_que *req;
2756 int rval = EXT_STATUS_OK;
2757
2758 rval = QLA_SUCCESS;
2759
2760 rsp = ha->rsp_q_map[0];
2761 req = vha->req;
2762
2763 /* Send marker if required */
2764 if (vha->marker_needed != 0) {
2765 if (qla2x00_marker(vha, req,
2766 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2767 return EXT_STATUS_MAILBOX;
2768 vha->marker_needed = 0;
2769 }
2770
2771 /* Acquire ring specific lock */
2772 spin_lock_irqsave(&ha->hardware_lock, flags);
2773
2774 /* Check for room in outstanding command list. */
2775 handle = req->current_outstanding_cmd;
2776 for (index = 1; index < req->num_outstanding_cmds; index++) {
2777 handle++;
2778 if (handle == req->num_outstanding_cmds)
2779 handle = 1;
2780 if (!req->outstanding_cmds[handle])
2781 break;
2782 }
2783
2784 if (index == req->num_outstanding_cmds) {
2785 rval = EXT_STATUS_BUSY;
2786 goto queuing_error;
2787 }
2788
2789 /* Calculate number of IOCB required */
2790 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2791
2792 /* Check for room on request queue. */
2793 if (req->cnt < req_cnt + 2) {
2794 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2795
2796 if (req->ring_index < cnt)
2797 req->cnt = cnt - req->ring_index;
2798 else
2799 req->cnt = req->length -
2800 (req->ring_index - cnt);
2801 }
2802 if (req->cnt < req_cnt + 2) {
2803 rval = EXT_STATUS_BUSY;
2804 goto queuing_error;
2805 }
2806
2807 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2808 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2809
2810 /* Zero out remaining portion of packet. */
2811 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2812 clr_ptr = (uint32_t *)cmd_pkt + 2;
2813 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2814
2815 /* Set NPORT-ID (of vha)*/
2816 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2817 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2818 cmd_pkt->port_id[1] = vha->d_id.b.area;
2819 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2820
2821 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2822 cmd_pkt->entry_status = (uint8_t) rsp->id;
2823 /* Build command packet. */
2824 req->current_outstanding_cmd = handle;
2825 req->outstanding_cmds[handle] = sp;
2826 sp->handle = handle;
2827 req->cnt -= req_cnt;
2828
2829 /* Send the command to the firmware */
2830 wmb();
2831 qla2x00_start_iocbs(vha, req);
2832 queuing_error:
2833 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2834 return rval;
2835 }
2836