• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 
7 #include "qla_target.h"
8 /**
9  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10  * Continuation Type 1 IOCBs to allocate.
11  *
12  * @vha: HA context
13  * @dsds: number of data segment descriptors needed
14  *
15  * Returns the number of IOCB entries needed to store @dsds.
16  */
17 static inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t * vha,uint16_t dsds)18 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
19 {
20 	uint16_t iocbs;
21 
22 	iocbs = 1;
23 	if (dsds > 1) {
24 		iocbs += (dsds - 1) / 5;
25 		if ((dsds - 1) % 5)
26 			iocbs++;
27 	}
28 	return iocbs;
29 }
30 
31 /*
32  * qla2x00_debounce_register
33  *      Debounce register.
34  *
35  * Input:
36  *      port = register address.
37  *
38  * Returns:
39  *      register value.
40  */
41 static __inline__ uint16_t
qla2x00_debounce_register(volatile __le16 __iomem * addr)42 qla2x00_debounce_register(volatile __le16 __iomem *addr)
43 {
44 	volatile uint16_t first;
45 	volatile uint16_t second;
46 
47 	do {
48 		first = rd_reg_word(addr);
49 		barrier();
50 		cpu_relax();
51 		second = rd_reg_word(addr);
52 	} while (first != second);
53 
54 	return (first);
55 }
56 
57 static inline void
qla2x00_poll(struct rsp_que * rsp)58 qla2x00_poll(struct rsp_que *rsp)
59 {
60 	struct qla_hw_data *ha = rsp->hw;
61 
62 	if (IS_P3P_TYPE(ha))
63 		qla82xx_poll(0, rsp);
64 	else
65 		ha->isp_ops->intr_handler(0, rsp);
66 }
67 
68 static inline uint8_t *
host_to_fcp_swap(uint8_t * fcp,uint32_t bsize)69 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
70 {
71        uint32_t *ifcp = (uint32_t *) fcp;
72        uint32_t *ofcp = (uint32_t *) fcp;
73        uint32_t iter = bsize >> 2;
74 
75        for (; iter ; iter--)
76                *ofcp++ = swab32(*ifcp++);
77 
78        return fcp;
79 }
80 
81 static inline void
host_to_adap(uint8_t * src,uint8_t * dst,uint32_t bsize)82 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
83 {
84 	uint32_t *isrc = (uint32_t *) src;
85 	__le32 *odest = (__le32 *) dst;
86 	uint32_t iter = bsize >> 2;
87 
88 	for ( ; iter--; isrc++)
89 		*odest++ = cpu_to_le32(*isrc);
90 }
91 
92 static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data * ha,struct crc_context * ctx)93 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
94 {
95 	struct dsd_dma *dsd, *tdsd;
96 
97 	/* clean up allocated prev pool */
98 	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
99 		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
100 		    dsd->dsd_list_dma);
101 		list_del(&dsd->list);
102 		kfree(dsd);
103 	}
104 	INIT_LIST_HEAD(&ctx->dsd_list);
105 }
106 
107 static inline void
qla2x00_set_fcport_disc_state(fc_port_t * fcport,int state)108 qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
109 {
110 	int old_val;
111 	uint8_t shiftbits, mask;
112 	uint8_t port_dstate_str_sz;
113 
114 	/* This will have to change when the max no. of states > 16 */
115 	shiftbits = 4;
116 	mask = (1 << shiftbits) - 1;
117 
118 	port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *);
119 	fcport->disc_state = state;
120 	while (1) {
121 		old_val = atomic_read(&fcport->shadow_disc_state);
122 		if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
123 		    old_val, (old_val << shiftbits) | state)) {
124 			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
125 			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
126 			    fcport->port_name, (old_val & mask) < port_dstate_str_sz ?
127 				    port_dstate_str[old_val & mask] : "Unknown",
128 			    port_dstate_str[state], fcport->d_id.b24);
129 			return;
130 		}
131 	}
132 }
133 
134 static inline int
qla2x00_hba_err_chk_enabled(srb_t * sp)135 qla2x00_hba_err_chk_enabled(srb_t *sp)
136 {
137 	/*
138 	 * Uncomment when corresponding SCSI changes are done.
139 	 *
140 	if (!sp->cmd->prot_chk)
141 		return 0;
142 	 *
143 	 */
144 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
145 	case SCSI_PROT_READ_STRIP:
146 	case SCSI_PROT_WRITE_INSERT:
147 		if (ql2xenablehba_err_chk >= 1)
148 			return 1;
149 		break;
150 	case SCSI_PROT_READ_PASS:
151 	case SCSI_PROT_WRITE_PASS:
152 		if (ql2xenablehba_err_chk >= 2)
153 			return 1;
154 		break;
155 	case SCSI_PROT_READ_INSERT:
156 	case SCSI_PROT_WRITE_STRIP:
157 		return 1;
158 	}
159 	return 0;
160 }
161 
162 static inline int
qla2x00_reset_active(scsi_qla_host_t * vha)163 qla2x00_reset_active(scsi_qla_host_t *vha)
164 {
165 	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
166 
167 	/* Test appropriate base-vha and vha flags. */
168 	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
169 	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
170 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
171 	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
172 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
173 }
174 
175 static inline int
qla2x00_chip_is_down(scsi_qla_host_t * vha)176 qla2x00_chip_is_down(scsi_qla_host_t *vha)
177 {
178 	return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
179 }
180 
qla2xxx_init_sp(srb_t * sp,scsi_qla_host_t * vha,struct qla_qpair * qpair,fc_port_t * fcport)181 static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
182 			    struct qla_qpair *qpair, fc_port_t *fcport)
183 {
184 	memset(sp, 0, sizeof(*sp));
185 	sp->fcport = fcport;
186 	sp->iocbs = 1;
187 	sp->vha = vha;
188 	sp->qpair = qpair;
189 	sp->cmd_type = TYPE_SRB;
190 	INIT_LIST_HEAD(&sp->elem);
191 }
192 
193 static inline srb_t *
qla2xxx_get_qpair_sp(scsi_qla_host_t * vha,struct qla_qpair * qpair,fc_port_t * fcport,gfp_t flag)194 qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
195     fc_port_t *fcport, gfp_t flag)
196 {
197 	srb_t *sp = NULL;
198 	uint8_t bail;
199 
200 	QLA_QPAIR_MARK_BUSY(qpair, bail);
201 	if (unlikely(bail))
202 		return NULL;
203 
204 	sp = mempool_alloc(qpair->srb_mempool, flag);
205 	if (sp)
206 		qla2xxx_init_sp(sp, vha, qpair, fcport);
207 	else
208 		QLA_QPAIR_MARK_NOT_BUSY(qpair);
209 	return sp;
210 }
211 
212 void qla2xxx_rel_done_warning(srb_t *sp, int res);
213 void qla2xxx_rel_free_warning(srb_t *sp);
214 
215 static inline void
qla2xxx_rel_qpair_sp(struct qla_qpair * qpair,srb_t * sp)216 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
217 {
218 	sp->qpair = NULL;
219 	sp->done = qla2xxx_rel_done_warning;
220 	sp->free = qla2xxx_rel_free_warning;
221 	mempool_free(sp, qpair->srb_mempool);
222 	QLA_QPAIR_MARK_NOT_BUSY(qpair);
223 }
224 
225 static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t * vha,fc_port_t * fcport,gfp_t flag)226 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
227 {
228 	srb_t *sp = NULL;
229 	uint8_t bail;
230 	struct qla_qpair *qpair;
231 
232 	QLA_VHA_MARK_BUSY(vha, bail);
233 	if (unlikely(bail))
234 		return NULL;
235 
236 	qpair = vha->hw->base_qpair;
237 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
238 	if (!sp)
239 		goto done;
240 
241 	sp->vha = vha;
242 done:
243 	if (!sp)
244 		QLA_VHA_MARK_NOT_BUSY(vha);
245 	return sp;
246 }
247 
248 static inline void
qla2x00_rel_sp(srb_t * sp)249 qla2x00_rel_sp(srb_t *sp)
250 {
251 	QLA_VHA_MARK_NOT_BUSY(sp->vha);
252 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
253 }
254 
255 static inline int
qla2x00_gid_list_size(struct qla_hw_data * ha)256 qla2x00_gid_list_size(struct qla_hw_data *ha)
257 {
258 	if (IS_QLAFX00(ha))
259 		return sizeof(uint32_t) * 32;
260 	else
261 		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
262 }
263 
264 static inline void
qla2x00_handle_mbx_completion(struct qla_hw_data * ha,int status)265 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
266 {
267 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
268 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
269 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
270 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
271 		complete(&ha->mbx_intr_comp);
272 	}
273 }
274 
275 static inline void
qla2x00_set_retry_delay_timestamp(fc_port_t * fcport,uint16_t sts_qual)276 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
277 {
278 	u8 scope;
279 	u16 qual;
280 #define SQ_SCOPE_MASK		0xc000 /* SAM-6 rev5 5.3.2 */
281 #define SQ_SCOPE_SHIFT		14
282 #define SQ_QUAL_MASK		0x3fff
283 
284 #define SQ_MAX_WAIT_SEC		60 /* Max I/O hold off time in seconds. */
285 #define SQ_MAX_WAIT_TIME	(SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
286 
287 	if (!sts_qual) /* Common case. */
288 		return;
289 
290 	scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
291 	/* Handle only scope 1 or 2, which is for I-T nexus. */
292 	if (scope != 1 && scope != 2)
293 		return;
294 
295 	/* Skip processing, if retry delay timer is already in effect. */
296 	if (fcport->retry_delay_timestamp &&
297 	    time_before(jiffies, fcport->retry_delay_timestamp))
298 		return;
299 
300 	qual = sts_qual & SQ_QUAL_MASK;
301 	if (qual < 1 || qual > 0x3fef)
302 		return;
303 	qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
304 
305 	/* qual is expressed in 100ms increments. */
306 	fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
307 
308 	ql_log(ql_log_warn, fcport->vha, 0x5101,
309 	       "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
310 	       fcport->port_name, sts_qual, qual * 100);
311 }
312 
313 static inline bool
qla_is_exch_offld_enabled(struct scsi_qla_host * vha)314 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
315 {
316 	if (qla_ini_mode_enabled(vha) &&
317 	    (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
318 		return true;
319 	else if (qla_tgt_mode_enabled(vha) &&
320 	    (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
321 		return true;
322 	else if (qla_dual_mode_enabled(vha) &&
323 	    ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
324 		return true;
325 	else
326 		return false;
327 }
328 
329 static inline void
qla_cpu_update(struct qla_qpair * qpair,uint16_t cpuid)330 qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
331 {
332 	qpair->cpuid = cpuid;
333 
334 	if (!list_empty(&qpair->hints_list)) {
335 		struct qla_qpair_hint *h;
336 
337 		list_for_each_entry(h, &qpair->hints_list, hint_elem)
338 			h->cpuid = qpair->cpuid;
339 	}
340 }
341 
342 static inline struct qla_qpair_hint *
qla_qpair_to_hint(struct qla_tgt * tgt,struct qla_qpair * qpair)343 qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
344 {
345 	struct qla_qpair_hint *h;
346 	u16 i;
347 
348 	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
349 		h = &tgt->qphints[i];
350 		if (h->qpair == qpair)
351 			return h;
352 	}
353 
354 	return NULL;
355 }
356 
357 static inline void
qla_83xx_start_iocbs(struct qla_qpair * qpair)358 qla_83xx_start_iocbs(struct qla_qpair *qpair)
359 {
360 	struct req_que *req = qpair->req;
361 
362 	req->ring_index++;
363 	if (req->ring_index == req->length) {
364 		req->ring_index = 0;
365 		req->ring_ptr = req->ring;
366 	} else
367 		req->ring_ptr++;
368 
369 	wrt_reg_dword(req->req_q_in, req->ring_index);
370 }
371 
372 static inline int
qla2xxx_get_fc4_priority(struct scsi_qla_host * vha)373 qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
374 {
375 	uint32_t data;
376 
377 	data =
378 	    ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
379 
380 
381 	return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
382 }
383 
384 enum {
385 	RESOURCE_NONE,
386 	RESOURCE_INI,
387 };
388 
389 static inline int
qla_get_iocbs(struct qla_qpair * qp,struct iocb_resource * iores)390 qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
391 {
392 	u16 iocbs_used, i;
393 	struct qla_hw_data *ha = qp->vha->hw;
394 
395 	if (!ql2xenforce_iocb_limit) {
396 		iores->res_type = RESOURCE_NONE;
397 		return 0;
398 	}
399 
400 	if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
401 		qp->fwres.iocbs_used += iores->iocb_cnt;
402 		return 0;
403 	} else {
404 		/* no need to acquire qpair lock. It's just rough calculation */
405 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
406 		for (i = 0; i < ha->max_qpairs; i++) {
407 			if (ha->queue_pair_map[i])
408 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
409 		}
410 
411 		if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
412 			qp->fwres.iocbs_used += iores->iocb_cnt;
413 			return 0;
414 		} else {
415 			iores->res_type = RESOURCE_NONE;
416 			return -ENOSPC;
417 		}
418 	}
419 }
420 
421 static inline void
qla_put_iocbs(struct qla_qpair * qp,struct iocb_resource * iores)422 qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
423 {
424 	switch (iores->res_type) {
425 	case RESOURCE_NONE:
426 		break;
427 	default:
428 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
429 			qp->fwres.iocbs_used -= iores->iocb_cnt;
430 		} else {
431 			// should not happen
432 			qp->fwres.iocbs_used = 0;
433 		}
434 		break;
435 	}
436 	iores->res_type = RESOURCE_NONE;
437 }
438 
439 #define ISP_REG_DISCONNECT 0xffffffffU
440 /**************************************************************************
441  * qla2x00_isp_reg_stat
442  *
443  * Description:
444  *        Read the host status register of ISP before aborting the command.
445  *
446  * Input:
447  *       ha = pointer to host adapter structure.
448  *
449  *
450  * Returns:
451  *       Either true or false.
452  *
453  * Note: Return true if there is register disconnect.
454  **************************************************************************/
455 static inline
qla2x00_isp_reg_stat(struct qla_hw_data * ha)456 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
457 {
458 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
459 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
460 
461 	if (IS_P3P_TYPE(ha))
462 		return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
463 	else
464 		return ((rd_reg_dword(&reg->host_status)) ==
465 			ISP_REG_DISCONNECT);
466 }
467 
468 static inline
qla_pci_disconnected(struct scsi_qla_host * vha,struct device_reg_24xx __iomem * reg)469 bool qla_pci_disconnected(struct scsi_qla_host *vha,
470 			  struct device_reg_24xx __iomem *reg)
471 {
472 	uint32_t stat;
473 	bool ret = false;
474 
475 	stat = rd_reg_dword(&reg->host_status);
476 	if (stat == 0xffffffff) {
477 		ql_log(ql_log_info, vha, 0x8041,
478 		       "detected PCI disconnect.\n");
479 		qla_schedule_eeh_work(vha);
480 		ret = true;
481 	}
482 	return ret;
483 }
484