• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 #include <linux/moduleparam.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/iscsi_boot_sysfs.h>
11 #include <linux/inet.h>
12 
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsicam.h>
15 
16 #include "ql4_def.h"
17 #include "ql4_version.h"
18 #include "ql4_glbl.h"
19 #include "ql4_dbg.h"
20 #include "ql4_inline.h"
21 #include "ql4_83xx.h"
22 
23 /*
24  * Driver version
25  */
26 static char qla4xxx_version_str[40];
27 
28 /*
29  * SRB allocation cache
30  */
31 static struct kmem_cache *srb_cachep;
32 
33 /*
34  * Module parameter information and variables
35  */
36 static int ql4xdisablesysfsboot = 1;
37 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(ql4xdisablesysfsboot,
39 		 " Set to disable exporting boot targets to sysfs.\n"
40 		 "\t\t  0 - Export boot targets\n"
41 		 "\t\t  1 - Do not export boot targets (Default)");
42 
43 int ql4xdontresethba;
44 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(ql4xdontresethba,
46 		 " Don't reset the HBA for driver recovery.\n"
47 		 "\t\t  0 - It will reset HBA (Default)\n"
48 		 "\t\t  1 - It will NOT reset HBA");
49 
50 int ql4xextended_error_logging;
51 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
52 MODULE_PARM_DESC(ql4xextended_error_logging,
53 		 " Option to enable extended error logging.\n"
54 		 "\t\t  0 - no logging (Default)\n"
55 		 "\t\t  2 - debug logging");
56 
57 int ql4xenablemsix = 1;
58 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
59 MODULE_PARM_DESC(ql4xenablemsix,
60 		 " Set to enable MSI or MSI-X interrupt mechanism.\n"
61 		 "\t\t  0 = enable INTx interrupt mechanism.\n"
62 		 "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
63 		 "\t\t  2 = enable MSI interrupt mechanism.");
64 
65 #define QL4_DEF_QDEPTH 32
66 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
67 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
68 MODULE_PARM_DESC(ql4xmaxqdepth,
69 		 " Maximum queue depth to report for target devices.\n"
70 		 "\t\t  Default: 32.");
71 
72 static int ql4xqfulltracking = 1;
73 module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
74 MODULE_PARM_DESC(ql4xqfulltracking,
75 		 " Enable or disable dynamic tracking and adjustment of\n"
76 		 "\t\t scsi device queue depth.\n"
77 		 "\t\t  0 - Disable.\n"
78 		 "\t\t  1 - Enable. (Default)");
79 
80 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
81 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
82 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
83 		" Target Session Recovery Timeout.\n"
84 		"\t\t  Default: 120 sec.");
85 
86 int ql4xmdcapmask = 0x1F;
87 module_param(ql4xmdcapmask, int, S_IRUGO);
88 MODULE_PARM_DESC(ql4xmdcapmask,
89 		 " Set the Minidump driver capture mask level.\n"
90 		 "\t\t  Default is 0x1F.\n"
91 		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
92 
93 int ql4xenablemd = 1;
94 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(ql4xenablemd,
96 		 " Set to enable minidump.\n"
97 		 "\t\t  0 - disable minidump\n"
98 		 "\t\t  1 - enable minidump (Default)");
99 
100 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
101 /*
102  * SCSI host template entry points
103  */
104 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
105 
106 /*
107  * iSCSI template entry points
108  */
109 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
110 				     enum iscsi_param param, char *buf);
111 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
112 				  enum iscsi_param param, char *buf);
113 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
114 				  enum iscsi_host_param param, char *buf);
115 static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
116 				   uint32_t len);
117 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
118 				   enum iscsi_param_type param_type,
119 				   int param, char *buf);
120 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
121 static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
122 						 struct sockaddr *dst_addr,
123 						 int non_blocking);
124 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
125 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
126 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
127 				enum iscsi_param param, char *buf);
128 static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
129 static struct iscsi_cls_conn *
130 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
131 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
132 			     struct iscsi_cls_conn *cls_conn,
133 			     uint64_t transport_fd, int is_leading);
134 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
135 static struct iscsi_cls_session *
136 qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
137 			uint16_t qdepth, uint32_t initial_cmdsn);
138 static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
139 static void qla4xxx_task_work(struct work_struct *wdata);
140 static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
141 static int qla4xxx_task_xmit(struct iscsi_task *);
142 static void qla4xxx_task_cleanup(struct iscsi_task *);
143 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
144 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
145 				   struct iscsi_stats *stats);
146 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
147 			     uint32_t iface_type, uint32_t payload_size,
148 			     uint32_t pid, struct sockaddr *dst_addr);
149 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
150 				 uint32_t *num_entries, char *buf);
151 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
152 
153 /*
154  * SCSI host template entry points
155  */
156 static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
157 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd);
158 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd);
159 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd);
160 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
161 static int qla4xxx_slave_alloc(struct scsi_device *device);
162 static int qla4xxx_slave_configure(struct scsi_device *device);
163 static void qla4xxx_slave_destroy(struct scsi_device *sdev);
164 static umode_t qla4_attr_is_visible(int param_type, int param);
165 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
166 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
167 				      int reason);
168 
169 /*
170  * iSCSI Flash DDB sysfs entry points
171  */
172 static int
173 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
174 			    struct iscsi_bus_flash_conn *fnode_conn,
175 			    void *data, int len);
176 static int
177 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
178 			    int param, char *buf);
179 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
180 				 int len);
181 static int
182 qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
183 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
184 				   struct iscsi_bus_flash_conn *fnode_conn);
185 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
186 				    struct iscsi_bus_flash_conn *fnode_conn);
187 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
188 
189 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
190     QLA82XX_LEGACY_INTR_CONFIG;
191 
192 static struct scsi_host_template qla4xxx_driver_template = {
193 	.module			= THIS_MODULE,
194 	.name			= DRIVER_NAME,
195 	.proc_name		= DRIVER_NAME,
196 	.queuecommand		= qla4xxx_queuecommand,
197 
198 	.eh_abort_handler	= qla4xxx_eh_abort,
199 	.eh_device_reset_handler = qla4xxx_eh_device_reset,
200 	.eh_target_reset_handler = qla4xxx_eh_target_reset,
201 	.eh_host_reset_handler	= qla4xxx_eh_host_reset,
202 	.eh_timed_out		= qla4xxx_eh_cmd_timed_out,
203 
204 	.slave_configure	= qla4xxx_slave_configure,
205 	.slave_alloc		= qla4xxx_slave_alloc,
206 	.slave_destroy		= qla4xxx_slave_destroy,
207 	.change_queue_depth	= qla4xxx_change_queue_depth,
208 
209 	.this_id		= -1,
210 	.cmd_per_lun		= 3,
211 	.use_clustering		= ENABLE_CLUSTERING,
212 	.sg_tablesize		= SG_ALL,
213 
214 	.max_sectors		= 0xFFFF,
215 	.shost_attrs		= qla4xxx_host_attrs,
216 	.host_reset		= qla4xxx_host_reset,
217 	.vendor_id		= SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
218 };
219 
220 static struct iscsi_transport qla4xxx_iscsi_transport = {
221 	.owner			= THIS_MODULE,
222 	.name			= DRIVER_NAME,
223 	.caps			= CAP_TEXT_NEGO |
224 				  CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
225 				  CAP_DATADGST | CAP_LOGIN_OFFLOAD |
226 				  CAP_MULTI_R2T,
227 	.attr_is_visible	= qla4_attr_is_visible,
228 	.create_session         = qla4xxx_session_create,
229 	.destroy_session        = qla4xxx_session_destroy,
230 	.start_conn             = qla4xxx_conn_start,
231 	.create_conn            = qla4xxx_conn_create,
232 	.bind_conn              = qla4xxx_conn_bind,
233 	.stop_conn              = iscsi_conn_stop,
234 	.destroy_conn           = qla4xxx_conn_destroy,
235 	.set_param              = iscsi_set_param,
236 	.get_conn_param		= qla4xxx_conn_get_param,
237 	.get_session_param	= qla4xxx_session_get_param,
238 	.get_ep_param           = qla4xxx_get_ep_param,
239 	.ep_connect		= qla4xxx_ep_connect,
240 	.ep_poll		= qla4xxx_ep_poll,
241 	.ep_disconnect		= qla4xxx_ep_disconnect,
242 	.get_stats		= qla4xxx_conn_get_stats,
243 	.send_pdu		= iscsi_conn_send_pdu,
244 	.xmit_task		= qla4xxx_task_xmit,
245 	.cleanup_task		= qla4xxx_task_cleanup,
246 	.alloc_pdu		= qla4xxx_alloc_pdu,
247 
248 	.get_host_param		= qla4xxx_host_get_param,
249 	.set_iface_param	= qla4xxx_iface_set_param,
250 	.get_iface_param	= qla4xxx_get_iface_param,
251 	.bsg_request		= qla4xxx_bsg_request,
252 	.send_ping		= qla4xxx_send_ping,
253 	.get_chap		= qla4xxx_get_chap_list,
254 	.delete_chap		= qla4xxx_delete_chap,
255 	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
256 	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
257 	.new_flashnode		= qla4xxx_sysfs_ddb_add,
258 	.del_flashnode		= qla4xxx_sysfs_ddb_delete,
259 	.login_flashnode	= qla4xxx_sysfs_ddb_login,
260 	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,
261 	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid,
262 };
263 
264 static struct scsi_transport_template *qla4xxx_scsi_transport;
265 
qla4xxx_send_ping(struct Scsi_Host * shost,uint32_t iface_num,uint32_t iface_type,uint32_t payload_size,uint32_t pid,struct sockaddr * dst_addr)266 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
267 			     uint32_t iface_type, uint32_t payload_size,
268 			     uint32_t pid, struct sockaddr *dst_addr)
269 {
270 	struct scsi_qla_host *ha = to_qla_host(shost);
271 	struct sockaddr_in *addr;
272 	struct sockaddr_in6 *addr6;
273 	uint32_t options = 0;
274 	uint8_t ipaddr[IPv6_ADDR_LEN];
275 	int rval;
276 
277 	memset(ipaddr, 0, IPv6_ADDR_LEN);
278 	/* IPv4 to IPv4 */
279 	if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
280 	    (dst_addr->sa_family == AF_INET)) {
281 		addr = (struct sockaddr_in *)dst_addr;
282 		memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
283 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
284 				  "dest: %pI4\n", __func__,
285 				  &ha->ip_config.ip_address, ipaddr));
286 		rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
287 					 ipaddr);
288 		if (rval)
289 			rval = -EINVAL;
290 	} else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
291 		   (dst_addr->sa_family == AF_INET6)) {
292 		/* IPv6 to IPv6 */
293 		addr6 = (struct sockaddr_in6 *)dst_addr;
294 		memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
295 
296 		options |= PING_IPV6_PROTOCOL_ENABLE;
297 
298 		/* Ping using LinkLocal address */
299 		if ((iface_num == 0) || (iface_num == 1)) {
300 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
301 					  "src: %pI6 dest: %pI6\n", __func__,
302 					  &ha->ip_config.ipv6_link_local_addr,
303 					  ipaddr));
304 			options |= PING_IPV6_LINKLOCAL_ADDR;
305 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
306 						 pid, ipaddr);
307 		} else {
308 			ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
309 				   "not supported\n", __func__, iface_num);
310 			rval = -ENOSYS;
311 			goto exit_send_ping;
312 		}
313 
314 		/*
315 		 * If ping using LinkLocal address fails, try ping using
316 		 * IPv6 address
317 		 */
318 		if (rval != QLA_SUCCESS) {
319 			options &= ~PING_IPV6_LINKLOCAL_ADDR;
320 			if (iface_num == 0) {
321 				options |= PING_IPV6_ADDR0;
322 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
323 						  "Ping src: %pI6 "
324 						  "dest: %pI6\n", __func__,
325 						  &ha->ip_config.ipv6_addr0,
326 						  ipaddr));
327 			} else if (iface_num == 1) {
328 				options |= PING_IPV6_ADDR1;
329 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
330 						  "Ping src: %pI6 "
331 						  "dest: %pI6\n", __func__,
332 						  &ha->ip_config.ipv6_addr1,
333 						  ipaddr));
334 			}
335 			rval = qla4xxx_ping_iocb(ha, options, payload_size,
336 						 pid, ipaddr);
337 			if (rval)
338 				rval = -EINVAL;
339 		}
340 	} else
341 		rval = -ENOSYS;
342 exit_send_ping:
343 	return rval;
344 }
345 
qla4_attr_is_visible(int param_type,int param)346 static umode_t qla4_attr_is_visible(int param_type, int param)
347 {
348 	switch (param_type) {
349 	case ISCSI_HOST_PARAM:
350 		switch (param) {
351 		case ISCSI_HOST_PARAM_HWADDRESS:
352 		case ISCSI_HOST_PARAM_IPADDRESS:
353 		case ISCSI_HOST_PARAM_INITIATOR_NAME:
354 		case ISCSI_HOST_PARAM_PORT_STATE:
355 		case ISCSI_HOST_PARAM_PORT_SPEED:
356 			return S_IRUGO;
357 		default:
358 			return 0;
359 		}
360 	case ISCSI_PARAM:
361 		switch (param) {
362 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
363 		case ISCSI_PARAM_PERSISTENT_PORT:
364 		case ISCSI_PARAM_CONN_ADDRESS:
365 		case ISCSI_PARAM_CONN_PORT:
366 		case ISCSI_PARAM_TARGET_NAME:
367 		case ISCSI_PARAM_TPGT:
368 		case ISCSI_PARAM_TARGET_ALIAS:
369 		case ISCSI_PARAM_MAX_BURST:
370 		case ISCSI_PARAM_MAX_R2T:
371 		case ISCSI_PARAM_FIRST_BURST:
372 		case ISCSI_PARAM_MAX_RECV_DLENGTH:
373 		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
374 		case ISCSI_PARAM_IFACE_NAME:
375 		case ISCSI_PARAM_CHAP_OUT_IDX:
376 		case ISCSI_PARAM_CHAP_IN_IDX:
377 		case ISCSI_PARAM_USERNAME:
378 		case ISCSI_PARAM_PASSWORD:
379 		case ISCSI_PARAM_USERNAME_IN:
380 		case ISCSI_PARAM_PASSWORD_IN:
381 			return S_IRUGO;
382 		default:
383 			return 0;
384 		}
385 	case ISCSI_NET_PARAM:
386 		switch (param) {
387 		case ISCSI_NET_PARAM_IPV4_ADDR:
388 		case ISCSI_NET_PARAM_IPV4_SUBNET:
389 		case ISCSI_NET_PARAM_IPV4_GW:
390 		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
391 		case ISCSI_NET_PARAM_IFACE_ENABLE:
392 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
393 		case ISCSI_NET_PARAM_IPV6_ADDR:
394 		case ISCSI_NET_PARAM_IPV6_ROUTER:
395 		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
396 		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
397 		case ISCSI_NET_PARAM_VLAN_ID:
398 		case ISCSI_NET_PARAM_VLAN_PRIORITY:
399 		case ISCSI_NET_PARAM_VLAN_ENABLED:
400 		case ISCSI_NET_PARAM_MTU:
401 		case ISCSI_NET_PARAM_PORT:
402 			return S_IRUGO;
403 		default:
404 			return 0;
405 		}
406 	case ISCSI_FLASHNODE_PARAM:
407 		switch (param) {
408 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
409 		case ISCSI_FLASHNODE_PORTAL_TYPE:
410 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
411 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
412 		case ISCSI_FLASHNODE_ENTRY_EN:
413 		case ISCSI_FLASHNODE_HDR_DGST_EN:
414 		case ISCSI_FLASHNODE_DATA_DGST_EN:
415 		case ISCSI_FLASHNODE_IMM_DATA_EN:
416 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
417 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
418 		case ISCSI_FLASHNODE_PDU_INORDER:
419 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
420 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
421 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
422 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
423 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
424 		case ISCSI_FLASHNODE_ERL:
425 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
426 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
427 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
428 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
429 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
430 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
431 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
432 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
433 		case ISCSI_FLASHNODE_FIRST_BURST:
434 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
435 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
436 		case ISCSI_FLASHNODE_MAX_R2T:
437 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
438 		case ISCSI_FLASHNODE_ISID:
439 		case ISCSI_FLASHNODE_TSID:
440 		case ISCSI_FLASHNODE_PORT:
441 		case ISCSI_FLASHNODE_MAX_BURST:
442 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
443 		case ISCSI_FLASHNODE_IPADDR:
444 		case ISCSI_FLASHNODE_ALIAS:
445 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
446 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
447 		case ISCSI_FLASHNODE_LOCAL_PORT:
448 		case ISCSI_FLASHNODE_IPV4_TOS:
449 		case ISCSI_FLASHNODE_IPV6_TC:
450 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
451 		case ISCSI_FLASHNODE_NAME:
452 		case ISCSI_FLASHNODE_TPGT:
453 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
454 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
455 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
456 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
457 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
458 		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
459 		case ISCSI_FLASHNODE_USERNAME:
460 		case ISCSI_FLASHNODE_PASSWORD:
461 		case ISCSI_FLASHNODE_STATSN:
462 		case ISCSI_FLASHNODE_EXP_STATSN:
463 		case ISCSI_FLASHNODE_IS_BOOT_TGT:
464 			return S_IRUGO;
465 		default:
466 			return 0;
467 		}
468 	}
469 
470 	return 0;
471 }
472 
qla4xxx_get_chap_list(struct Scsi_Host * shost,uint16_t chap_tbl_idx,uint32_t * num_entries,char * buf)473 static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
474 				  uint32_t *num_entries, char *buf)
475 {
476 	struct scsi_qla_host *ha = to_qla_host(shost);
477 	struct ql4_chap_table *chap_table;
478 	struct iscsi_chap_rec *chap_rec;
479 	int max_chap_entries = 0;
480 	int valid_chap_entries = 0;
481 	int ret = 0, i;
482 
483 	if (is_qla80XX(ha))
484 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
485 					sizeof(struct ql4_chap_table);
486 	else
487 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
488 
489 	ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
490 			__func__, *num_entries, chap_tbl_idx);
491 
492 	if (!buf) {
493 		ret = -ENOMEM;
494 		goto exit_get_chap_list;
495 	}
496 
497 	chap_rec = (struct iscsi_chap_rec *) buf;
498 	mutex_lock(&ha->chap_sem);
499 	for (i = chap_tbl_idx; i < max_chap_entries; i++) {
500 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
501 		if (chap_table->cookie !=
502 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE))
503 			continue;
504 
505 		chap_rec->chap_tbl_idx = i;
506 		strncpy(chap_rec->username, chap_table->name,
507 			ISCSI_CHAP_AUTH_NAME_MAX_LEN);
508 		strncpy(chap_rec->password, chap_table->secret,
509 			QL4_CHAP_MAX_SECRET_LEN);
510 		chap_rec->password_length = chap_table->secret_len;
511 
512 		if (chap_table->flags & BIT_7) /* local */
513 			chap_rec->chap_type = CHAP_TYPE_OUT;
514 
515 		if (chap_table->flags & BIT_6) /* peer */
516 			chap_rec->chap_type = CHAP_TYPE_IN;
517 
518 		chap_rec++;
519 
520 		valid_chap_entries++;
521 		if (valid_chap_entries == *num_entries)
522 			break;
523 		else
524 			continue;
525 	}
526 	mutex_unlock(&ha->chap_sem);
527 
528 exit_get_chap_list:
529 	ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
530 			__func__,  valid_chap_entries);
531 	*num_entries = valid_chap_entries;
532 	return ret;
533 }
534 
__qla4xxx_is_chap_active(struct device * dev,void * data)535 static int __qla4xxx_is_chap_active(struct device *dev, void *data)
536 {
537 	int ret = 0;
538 	uint16_t *chap_tbl_idx = (uint16_t *) data;
539 	struct iscsi_cls_session *cls_session;
540 	struct iscsi_session *sess;
541 	struct ddb_entry *ddb_entry;
542 
543 	if (!iscsi_is_session_dev(dev))
544 		goto exit_is_chap_active;
545 
546 	cls_session = iscsi_dev_to_session(dev);
547 	sess = cls_session->dd_data;
548 	ddb_entry = sess->dd_data;
549 
550 	if (iscsi_session_chkready(cls_session))
551 		goto exit_is_chap_active;
552 
553 	if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
554 		ret = 1;
555 
556 exit_is_chap_active:
557 	return ret;
558 }
559 
qla4xxx_is_chap_active(struct Scsi_Host * shost,uint16_t chap_tbl_idx)560 static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
561 				  uint16_t chap_tbl_idx)
562 {
563 	int ret = 0;
564 
565 	ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
566 				    __qla4xxx_is_chap_active);
567 
568 	return ret;
569 }
570 
qla4xxx_delete_chap(struct Scsi_Host * shost,uint16_t chap_tbl_idx)571 static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
572 {
573 	struct scsi_qla_host *ha = to_qla_host(shost);
574 	struct ql4_chap_table *chap_table;
575 	dma_addr_t chap_dma;
576 	int max_chap_entries = 0;
577 	uint32_t offset = 0;
578 	uint32_t chap_size;
579 	int ret = 0;
580 
581 	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
582 	if (chap_table == NULL)
583 		return -ENOMEM;
584 
585 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
586 
587 	if (is_qla80XX(ha))
588 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
589 				   sizeof(struct ql4_chap_table);
590 	else
591 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
592 
593 	if (chap_tbl_idx > max_chap_entries) {
594 		ret = -EINVAL;
595 		goto exit_delete_chap;
596 	}
597 
598 	/* Check if chap index is in use.
599 	 * If chap is in use don't delet chap entry */
600 	ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
601 	if (ret) {
602 		ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
603 			   "delete from flash\n", chap_tbl_idx);
604 		ret = -EBUSY;
605 		goto exit_delete_chap;
606 	}
607 
608 	chap_size = sizeof(struct ql4_chap_table);
609 	if (is_qla40XX(ha))
610 		offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
611 	else {
612 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
613 		/* flt_chap_size is CHAP table size for both ports
614 		 * so divide it by 2 to calculate the offset for second port
615 		 */
616 		if (ha->port_num == 1)
617 			offset += (ha->hw.flt_chap_size / 2);
618 		offset += (chap_tbl_idx * chap_size);
619 	}
620 
621 	ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
622 	if (ret != QLA_SUCCESS) {
623 		ret = -EINVAL;
624 		goto exit_delete_chap;
625 	}
626 
627 	DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
628 			  __le16_to_cpu(chap_table->cookie)));
629 
630 	if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
631 		ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
632 		goto exit_delete_chap;
633 	}
634 
635 	chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
636 
637 	offset = FLASH_CHAP_OFFSET |
638 			(chap_tbl_idx * sizeof(struct ql4_chap_table));
639 	ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
640 				FLASH_OPT_RMW_COMMIT);
641 	if (ret == QLA_SUCCESS && ha->chap_list) {
642 		mutex_lock(&ha->chap_sem);
643 		/* Update ha chap_list cache */
644 		memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
645 			chap_table, sizeof(struct ql4_chap_table));
646 		mutex_unlock(&ha->chap_sem);
647 	}
648 	if (ret != QLA_SUCCESS)
649 		ret =  -EINVAL;
650 
651 exit_delete_chap:
652 	dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
653 	return ret;
654 }
655 
qla4xxx_get_iface_param(struct iscsi_iface * iface,enum iscsi_param_type param_type,int param,char * buf)656 static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
657 				   enum iscsi_param_type param_type,
658 				   int param, char *buf)
659 {
660 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
661 	struct scsi_qla_host *ha = to_qla_host(shost);
662 	int len = -ENOSYS;
663 
664 	if (param_type != ISCSI_NET_PARAM)
665 		return -ENOSYS;
666 
667 	switch (param) {
668 	case ISCSI_NET_PARAM_IPV4_ADDR:
669 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
670 		break;
671 	case ISCSI_NET_PARAM_IPV4_SUBNET:
672 		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
673 		break;
674 	case ISCSI_NET_PARAM_IPV4_GW:
675 		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
676 		break;
677 	case ISCSI_NET_PARAM_IFACE_ENABLE:
678 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
679 			len = sprintf(buf, "%s\n",
680 				      (ha->ip_config.ipv4_options &
681 				       IPOPT_IPV4_PROTOCOL_ENABLE) ?
682 				      "enabled" : "disabled");
683 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
684 			len = sprintf(buf, "%s\n",
685 				      (ha->ip_config.ipv6_options &
686 				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
687 				       "enabled" : "disabled");
688 		break;
689 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
690 		len = sprintf(buf, "%s\n",
691 			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
692 			      "dhcp" : "static");
693 		break;
694 	case ISCSI_NET_PARAM_IPV6_ADDR:
695 		if (iface->iface_num == 0)
696 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
697 		if (iface->iface_num == 1)
698 			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
699 		break;
700 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
701 		len = sprintf(buf, "%pI6\n",
702 			      &ha->ip_config.ipv6_link_local_addr);
703 		break;
704 	case ISCSI_NET_PARAM_IPV6_ROUTER:
705 		len = sprintf(buf, "%pI6\n",
706 			      &ha->ip_config.ipv6_default_router_addr);
707 		break;
708 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
709 		len = sprintf(buf, "%s\n",
710 			      (ha->ip_config.ipv6_addl_options &
711 			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
712 			       "nd" : "static");
713 		break;
714 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
715 		len = sprintf(buf, "%s\n",
716 			      (ha->ip_config.ipv6_addl_options &
717 			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
718 			       "auto" : "static");
719 		break;
720 	case ISCSI_NET_PARAM_VLAN_ID:
721 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
722 			len = sprintf(buf, "%d\n",
723 				      (ha->ip_config.ipv4_vlan_tag &
724 				       ISCSI_MAX_VLAN_ID));
725 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
726 			len = sprintf(buf, "%d\n",
727 				      (ha->ip_config.ipv6_vlan_tag &
728 				       ISCSI_MAX_VLAN_ID));
729 		break;
730 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
731 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
732 			len = sprintf(buf, "%d\n",
733 				      ((ha->ip_config.ipv4_vlan_tag >> 13) &
734 					ISCSI_MAX_VLAN_PRIORITY));
735 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
736 			len = sprintf(buf, "%d\n",
737 				      ((ha->ip_config.ipv6_vlan_tag >> 13) &
738 					ISCSI_MAX_VLAN_PRIORITY));
739 		break;
740 	case ISCSI_NET_PARAM_VLAN_ENABLED:
741 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
742 			len = sprintf(buf, "%s\n",
743 				      (ha->ip_config.ipv4_options &
744 				       IPOPT_VLAN_TAGGING_ENABLE) ?
745 				       "enabled" : "disabled");
746 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
747 			len = sprintf(buf, "%s\n",
748 				      (ha->ip_config.ipv6_options &
749 				       IPV6_OPT_VLAN_TAGGING_ENABLE) ?
750 				       "enabled" : "disabled");
751 		break;
752 	case ISCSI_NET_PARAM_MTU:
753 		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
754 		break;
755 	case ISCSI_NET_PARAM_PORT:
756 		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
757 			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
758 		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
759 			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
760 		break;
761 	default:
762 		len = -ENOSYS;
763 	}
764 
765 	return len;
766 }
767 
768 static struct iscsi_endpoint *
qla4xxx_ep_connect(struct Scsi_Host * shost,struct sockaddr * dst_addr,int non_blocking)769 qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
770 		   int non_blocking)
771 {
772 	int ret;
773 	struct iscsi_endpoint *ep;
774 	struct qla_endpoint *qla_ep;
775 	struct scsi_qla_host *ha;
776 	struct sockaddr_in *addr;
777 	struct sockaddr_in6 *addr6;
778 
779 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
780 	if (!shost) {
781 		ret = -ENXIO;
782 		printk(KERN_ERR "%s: shost is NULL\n",
783 		       __func__);
784 		return ERR_PTR(ret);
785 	}
786 
787 	ha = iscsi_host_priv(shost);
788 
789 	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
790 	if (!ep) {
791 		ret = -ENOMEM;
792 		return ERR_PTR(ret);
793 	}
794 
795 	qla_ep = ep->dd_data;
796 	memset(qla_ep, 0, sizeof(struct qla_endpoint));
797 	if (dst_addr->sa_family == AF_INET) {
798 		memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
799 		addr = (struct sockaddr_in *)&qla_ep->dst_addr;
800 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
801 				  (char *)&addr->sin_addr));
802 	} else if (dst_addr->sa_family == AF_INET6) {
803 		memcpy(&qla_ep->dst_addr, dst_addr,
804 		       sizeof(struct sockaddr_in6));
805 		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
806 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
807 				  (char *)&addr6->sin6_addr));
808 	}
809 
810 	qla_ep->host = shost;
811 
812 	return ep;
813 }
814 
qla4xxx_ep_poll(struct iscsi_endpoint * ep,int timeout_ms)815 static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
816 {
817 	struct qla_endpoint *qla_ep;
818 	struct scsi_qla_host *ha;
819 	int ret = 0;
820 
821 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
822 	qla_ep = ep->dd_data;
823 	ha = to_qla_host(qla_ep->host);
824 
825 	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
826 		ret = 1;
827 
828 	return ret;
829 }
830 
qla4xxx_ep_disconnect(struct iscsi_endpoint * ep)831 static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
832 {
833 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
834 	iscsi_destroy_endpoint(ep);
835 }
836 
qla4xxx_get_ep_param(struct iscsi_endpoint * ep,enum iscsi_param param,char * buf)837 static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
838 				enum iscsi_param param,
839 				char *buf)
840 {
841 	struct qla_endpoint *qla_ep = ep->dd_data;
842 	struct sockaddr *dst_addr;
843 
844 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
845 
846 	switch (param) {
847 	case ISCSI_PARAM_CONN_PORT:
848 	case ISCSI_PARAM_CONN_ADDRESS:
849 		if (!qla_ep)
850 			return -ENOTCONN;
851 
852 		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
853 		if (!dst_addr)
854 			return -ENOTCONN;
855 
856 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
857 						 &qla_ep->dst_addr, param, buf);
858 	default:
859 		return -ENOSYS;
860 	}
861 }
862 
qla4xxx_conn_get_stats(struct iscsi_cls_conn * cls_conn,struct iscsi_stats * stats)863 static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
864 				   struct iscsi_stats *stats)
865 {
866 	struct iscsi_session *sess;
867 	struct iscsi_cls_session *cls_sess;
868 	struct ddb_entry *ddb_entry;
869 	struct scsi_qla_host *ha;
870 	struct ql_iscsi_stats *ql_iscsi_stats;
871 	int stats_size;
872 	int ret;
873 	dma_addr_t iscsi_stats_dma;
874 
875 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
876 
877 	cls_sess = iscsi_conn_to_session(cls_conn);
878 	sess = cls_sess->dd_data;
879 	ddb_entry = sess->dd_data;
880 	ha = ddb_entry->ha;
881 
882 	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
883 	/* Allocate memory */
884 	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
885 					    &iscsi_stats_dma, GFP_KERNEL);
886 	if (!ql_iscsi_stats) {
887 		ql4_printk(KERN_ERR, ha,
888 			   "Unable to allocate memory for iscsi stats\n");
889 		goto exit_get_stats;
890 	}
891 
892 	ret =  qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
893 				     iscsi_stats_dma);
894 	if (ret != QLA_SUCCESS) {
895 		ql4_printk(KERN_ERR, ha,
896 			   "Unable to retrieve iscsi stats\n");
897 		goto free_stats;
898 	}
899 
900 	/* octets */
901 	stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
902 	stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
903 	/* xmit pdus */
904 	stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
905 	stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
906 	stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
907 	stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
908 	stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
909 	stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
910 	stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
911 	stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
912 	/* recv pdus */
913 	stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
914 	stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
915 	stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
916 	stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
917 	stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
918 	stats->logoutrsp_pdus =
919 			le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
920 	stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
921 	stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
922 	stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
923 
924 free_stats:
925 	dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
926 			  iscsi_stats_dma);
927 exit_get_stats:
928 	return;
929 }
930 
qla4xxx_eh_cmd_timed_out(struct scsi_cmnd * sc)931 static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
932 {
933 	struct iscsi_cls_session *session;
934 	struct iscsi_session *sess;
935 	unsigned long flags;
936 	enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
937 
938 	session = starget_to_session(scsi_target(sc->device));
939 	sess = session->dd_data;
940 
941 	spin_lock_irqsave(&session->lock, flags);
942 	if (session->state == ISCSI_SESSION_FAILED)
943 		ret = BLK_EH_RESET_TIMER;
944 	spin_unlock_irqrestore(&session->lock, flags);
945 
946 	return ret;
947 }
948 
qla4xxx_set_port_speed(struct Scsi_Host * shost)949 static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
950 {
951 	struct scsi_qla_host *ha = to_qla_host(shost);
952 	struct iscsi_cls_host *ihost = shost->shost_data;
953 	uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
954 
955 	qla4xxx_get_firmware_state(ha);
956 
957 	switch (ha->addl_fw_state & 0x0F00) {
958 	case FW_ADDSTATE_LINK_SPEED_10MBPS:
959 		speed = ISCSI_PORT_SPEED_10MBPS;
960 		break;
961 	case FW_ADDSTATE_LINK_SPEED_100MBPS:
962 		speed = ISCSI_PORT_SPEED_100MBPS;
963 		break;
964 	case FW_ADDSTATE_LINK_SPEED_1GBPS:
965 		speed = ISCSI_PORT_SPEED_1GBPS;
966 		break;
967 	case FW_ADDSTATE_LINK_SPEED_10GBPS:
968 		speed = ISCSI_PORT_SPEED_10GBPS;
969 		break;
970 	}
971 	ihost->port_speed = speed;
972 }
973 
qla4xxx_set_port_state(struct Scsi_Host * shost)974 static void qla4xxx_set_port_state(struct Scsi_Host *shost)
975 {
976 	struct scsi_qla_host *ha = to_qla_host(shost);
977 	struct iscsi_cls_host *ihost = shost->shost_data;
978 	uint32_t state = ISCSI_PORT_STATE_DOWN;
979 
980 	if (test_bit(AF_LINK_UP, &ha->flags))
981 		state = ISCSI_PORT_STATE_UP;
982 
983 	ihost->port_state = state;
984 }
985 
qla4xxx_host_get_param(struct Scsi_Host * shost,enum iscsi_host_param param,char * buf)986 static int qla4xxx_host_get_param(struct Scsi_Host *shost,
987 				  enum iscsi_host_param param, char *buf)
988 {
989 	struct scsi_qla_host *ha = to_qla_host(shost);
990 	int len;
991 
992 	switch (param) {
993 	case ISCSI_HOST_PARAM_HWADDRESS:
994 		len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
995 		break;
996 	case ISCSI_HOST_PARAM_IPADDRESS:
997 		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
998 		break;
999 	case ISCSI_HOST_PARAM_INITIATOR_NAME:
1000 		len = sprintf(buf, "%s\n", ha->name_string);
1001 		break;
1002 	case ISCSI_HOST_PARAM_PORT_STATE:
1003 		qla4xxx_set_port_state(shost);
1004 		len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
1005 		break;
1006 	case ISCSI_HOST_PARAM_PORT_SPEED:
1007 		qla4xxx_set_port_speed(shost);
1008 		len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
1009 		break;
1010 	default:
1011 		return -ENOSYS;
1012 	}
1013 
1014 	return len;
1015 }
1016 
qla4xxx_create_ipv4_iface(struct scsi_qla_host * ha)1017 static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
1018 {
1019 	if (ha->iface_ipv4)
1020 		return;
1021 
1022 	/* IPv4 */
1023 	ha->iface_ipv4 = iscsi_create_iface(ha->host,
1024 					    &qla4xxx_iscsi_transport,
1025 					    ISCSI_IFACE_TYPE_IPV4, 0, 0);
1026 	if (!ha->iface_ipv4)
1027 		ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
1028 			   "iface0.\n");
1029 }
1030 
qla4xxx_create_ipv6_iface(struct scsi_qla_host * ha)1031 static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
1032 {
1033 	if (!ha->iface_ipv6_0)
1034 		/* IPv6 iface-0 */
1035 		ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
1036 						      &qla4xxx_iscsi_transport,
1037 						      ISCSI_IFACE_TYPE_IPV6, 0,
1038 						      0);
1039 	if (!ha->iface_ipv6_0)
1040 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1041 			   "iface0.\n");
1042 
1043 	if (!ha->iface_ipv6_1)
1044 		/* IPv6 iface-1 */
1045 		ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
1046 						      &qla4xxx_iscsi_transport,
1047 						      ISCSI_IFACE_TYPE_IPV6, 1,
1048 						      0);
1049 	if (!ha->iface_ipv6_1)
1050 		ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
1051 			   "iface1.\n");
1052 }
1053 
qla4xxx_create_ifaces(struct scsi_qla_host * ha)1054 static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
1055 {
1056 	if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
1057 		qla4xxx_create_ipv4_iface(ha);
1058 
1059 	if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
1060 		qla4xxx_create_ipv6_iface(ha);
1061 }
1062 
qla4xxx_destroy_ipv4_iface(struct scsi_qla_host * ha)1063 static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
1064 {
1065 	if (ha->iface_ipv4) {
1066 		iscsi_destroy_iface(ha->iface_ipv4);
1067 		ha->iface_ipv4 = NULL;
1068 	}
1069 }
1070 
qla4xxx_destroy_ipv6_iface(struct scsi_qla_host * ha)1071 static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
1072 {
1073 	if (ha->iface_ipv6_0) {
1074 		iscsi_destroy_iface(ha->iface_ipv6_0);
1075 		ha->iface_ipv6_0 = NULL;
1076 	}
1077 	if (ha->iface_ipv6_1) {
1078 		iscsi_destroy_iface(ha->iface_ipv6_1);
1079 		ha->iface_ipv6_1 = NULL;
1080 	}
1081 }
1082 
qla4xxx_destroy_ifaces(struct scsi_qla_host * ha)1083 static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
1084 {
1085 	qla4xxx_destroy_ipv4_iface(ha);
1086 	qla4xxx_destroy_ipv6_iface(ha);
1087 }
1088 
qla4xxx_set_ipv6(struct scsi_qla_host * ha,struct iscsi_iface_param_info * iface_param,struct addr_ctrl_blk * init_fw_cb)1089 static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
1090 			     struct iscsi_iface_param_info *iface_param,
1091 			     struct addr_ctrl_blk *init_fw_cb)
1092 {
1093 	/*
1094 	 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
1095 	 * iface_num 1 is valid only for IPv6 Addr.
1096 	 */
1097 	switch (iface_param->param) {
1098 	case ISCSI_NET_PARAM_IPV6_ADDR:
1099 		if (iface_param->iface_num & 0x1)
1100 			/* IPv6 Addr 1 */
1101 			memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
1102 			       sizeof(init_fw_cb->ipv6_addr1));
1103 		else
1104 			/* IPv6 Addr 0 */
1105 			memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
1106 			       sizeof(init_fw_cb->ipv6_addr0));
1107 		break;
1108 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
1109 		if (iface_param->iface_num & 0x1)
1110 			break;
1111 		memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
1112 		       sizeof(init_fw_cb->ipv6_if_id));
1113 		break;
1114 	case ISCSI_NET_PARAM_IPV6_ROUTER:
1115 		if (iface_param->iface_num & 0x1)
1116 			break;
1117 		memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
1118 		       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1119 		break;
1120 	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
1121 		/* Autocfg applies to even interface */
1122 		if (iface_param->iface_num & 0x1)
1123 			break;
1124 
1125 		if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
1126 			init_fw_cb->ipv6_addtl_opts &=
1127 				cpu_to_le16(
1128 				  ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1129 		else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
1130 			init_fw_cb->ipv6_addtl_opts |=
1131 				cpu_to_le16(
1132 				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
1133 		else
1134 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1135 				   "IPv6 addr\n");
1136 		break;
1137 	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
1138 		/* Autocfg applies to even interface */
1139 		if (iface_param->iface_num & 0x1)
1140 			break;
1141 
1142 		if (iface_param->value[0] ==
1143 		    ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
1144 			init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
1145 					IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1146 		else if (iface_param->value[0] ==
1147 			 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
1148 			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
1149 				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
1150 		else
1151 			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
1152 				   "IPv6 linklocal addr\n");
1153 		break;
1154 	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
1155 		/* Autocfg applies to even interface */
1156 		if (iface_param->iface_num & 0x1)
1157 			break;
1158 
1159 		if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
1160 			memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
1161 			       sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
1162 		break;
1163 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1164 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1165 			init_fw_cb->ipv6_opts |=
1166 				cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
1167 			qla4xxx_create_ipv6_iface(ha);
1168 		} else {
1169 			init_fw_cb->ipv6_opts &=
1170 				cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
1171 					    0xFFFF);
1172 			qla4xxx_destroy_ipv6_iface(ha);
1173 		}
1174 		break;
1175 	case ISCSI_NET_PARAM_VLAN_TAG:
1176 		if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
1177 			break;
1178 		init_fw_cb->ipv6_vlan_tag =
1179 				cpu_to_be16(*(uint16_t *)iface_param->value);
1180 		break;
1181 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1182 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1183 			init_fw_cb->ipv6_opts |=
1184 				cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
1185 		else
1186 			init_fw_cb->ipv6_opts &=
1187 				cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
1188 		break;
1189 	case ISCSI_NET_PARAM_MTU:
1190 		init_fw_cb->eth_mtu_size =
1191 				cpu_to_le16(*(uint16_t *)iface_param->value);
1192 		break;
1193 	case ISCSI_NET_PARAM_PORT:
1194 		/* Autocfg applies to even interface */
1195 		if (iface_param->iface_num & 0x1)
1196 			break;
1197 
1198 		init_fw_cb->ipv6_port =
1199 				cpu_to_le16(*(uint16_t *)iface_param->value);
1200 		break;
1201 	default:
1202 		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
1203 			   iface_param->param);
1204 		break;
1205 	}
1206 }
1207 
qla4xxx_set_ipv4(struct scsi_qla_host * ha,struct iscsi_iface_param_info * iface_param,struct addr_ctrl_blk * init_fw_cb)1208 static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
1209 			     struct iscsi_iface_param_info *iface_param,
1210 			     struct addr_ctrl_blk *init_fw_cb)
1211 {
1212 	switch (iface_param->param) {
1213 	case ISCSI_NET_PARAM_IPV4_ADDR:
1214 		memcpy(init_fw_cb->ipv4_addr, iface_param->value,
1215 		       sizeof(init_fw_cb->ipv4_addr));
1216 		break;
1217 	case ISCSI_NET_PARAM_IPV4_SUBNET:
1218 		memcpy(init_fw_cb->ipv4_subnet,	iface_param->value,
1219 		       sizeof(init_fw_cb->ipv4_subnet));
1220 		break;
1221 	case ISCSI_NET_PARAM_IPV4_GW:
1222 		memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
1223 		       sizeof(init_fw_cb->ipv4_gw_addr));
1224 		break;
1225 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1226 		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
1227 			init_fw_cb->ipv4_tcp_opts |=
1228 					cpu_to_le16(TCPOPT_DHCP_ENABLE);
1229 		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
1230 			init_fw_cb->ipv4_tcp_opts &=
1231 					cpu_to_le16(~TCPOPT_DHCP_ENABLE);
1232 		else
1233 			ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
1234 		break;
1235 	case ISCSI_NET_PARAM_IFACE_ENABLE:
1236 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
1237 			init_fw_cb->ipv4_ip_opts |=
1238 				cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
1239 			qla4xxx_create_ipv4_iface(ha);
1240 		} else {
1241 			init_fw_cb->ipv4_ip_opts &=
1242 				cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
1243 					    0xFFFF);
1244 			qla4xxx_destroy_ipv4_iface(ha);
1245 		}
1246 		break;
1247 	case ISCSI_NET_PARAM_VLAN_TAG:
1248 		if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
1249 			break;
1250 		init_fw_cb->ipv4_vlan_tag =
1251 				cpu_to_be16(*(uint16_t *)iface_param->value);
1252 		break;
1253 	case ISCSI_NET_PARAM_VLAN_ENABLED:
1254 		if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
1255 			init_fw_cb->ipv4_ip_opts |=
1256 					cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
1257 		else
1258 			init_fw_cb->ipv4_ip_opts &=
1259 					cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
1260 		break;
1261 	case ISCSI_NET_PARAM_MTU:
1262 		init_fw_cb->eth_mtu_size =
1263 				cpu_to_le16(*(uint16_t *)iface_param->value);
1264 		break;
1265 	case ISCSI_NET_PARAM_PORT:
1266 		init_fw_cb->ipv4_port =
1267 				cpu_to_le16(*(uint16_t *)iface_param->value);
1268 		break;
1269 	default:
1270 		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
1271 			   iface_param->param);
1272 		break;
1273 	}
1274 }
1275 
1276 static void
qla4xxx_initcb_to_acb(struct addr_ctrl_blk * init_fw_cb)1277 qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
1278 {
1279 	struct addr_ctrl_blk_def *acb;
1280 	acb = (struct addr_ctrl_blk_def *)init_fw_cb;
1281 	memset(acb->reserved1, 0, sizeof(acb->reserved1));
1282 	memset(acb->reserved2, 0, sizeof(acb->reserved2));
1283 	memset(acb->reserved3, 0, sizeof(acb->reserved3));
1284 	memset(acb->reserved4, 0, sizeof(acb->reserved4));
1285 	memset(acb->reserved5, 0, sizeof(acb->reserved5));
1286 	memset(acb->reserved6, 0, sizeof(acb->reserved6));
1287 	memset(acb->reserved7, 0, sizeof(acb->reserved7));
1288 	memset(acb->reserved8, 0, sizeof(acb->reserved8));
1289 	memset(acb->reserved9, 0, sizeof(acb->reserved9));
1290 	memset(acb->reserved10, 0, sizeof(acb->reserved10));
1291 	memset(acb->reserved11, 0, sizeof(acb->reserved11));
1292 	memset(acb->reserved12, 0, sizeof(acb->reserved12));
1293 	memset(acb->reserved13, 0, sizeof(acb->reserved13));
1294 	memset(acb->reserved14, 0, sizeof(acb->reserved14));
1295 	memset(acb->reserved15, 0, sizeof(acb->reserved15));
1296 }
1297 
1298 static int
qla4xxx_iface_set_param(struct Scsi_Host * shost,void * data,uint32_t len)1299 qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
1300 {
1301 	struct scsi_qla_host *ha = to_qla_host(shost);
1302 	int rval = 0;
1303 	struct iscsi_iface_param_info *iface_param = NULL;
1304 	struct addr_ctrl_blk *init_fw_cb = NULL;
1305 	dma_addr_t init_fw_cb_dma;
1306 	uint32_t mbox_cmd[MBOX_REG_COUNT];
1307 	uint32_t mbox_sts[MBOX_REG_COUNT];
1308 	uint32_t rem = len;
1309 	struct nlattr *attr;
1310 
1311 	init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
1312 					sizeof(struct addr_ctrl_blk),
1313 					&init_fw_cb_dma, GFP_KERNEL);
1314 	if (!init_fw_cb) {
1315 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
1316 			   __func__);
1317 		return -ENOMEM;
1318 	}
1319 
1320 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1321 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1322 	memset(&mbox_sts, 0, sizeof(mbox_sts));
1323 
1324 	if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
1325 		ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
1326 		rval = -EIO;
1327 		goto exit_init_fw_cb;
1328 	}
1329 
1330 	nla_for_each_attr(attr, data, len, rem) {
1331 		iface_param = nla_data(attr);
1332 
1333 		if (iface_param->param_type != ISCSI_NET_PARAM)
1334 			continue;
1335 
1336 		switch (iface_param->iface_type) {
1337 		case ISCSI_IFACE_TYPE_IPV4:
1338 			switch (iface_param->iface_num) {
1339 			case 0:
1340 				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
1341 				break;
1342 			default:
1343 				/* Cannot have more than one IPv4 interface */
1344 				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
1345 					   "number = %d\n",
1346 					   iface_param->iface_num);
1347 				break;
1348 			}
1349 			break;
1350 		case ISCSI_IFACE_TYPE_IPV6:
1351 			switch (iface_param->iface_num) {
1352 			case 0:
1353 			case 1:
1354 				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
1355 				break;
1356 			default:
1357 				/* Cannot have more than two IPv6 interface */
1358 				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
1359 					   "number = %d\n",
1360 					   iface_param->iface_num);
1361 				break;
1362 			}
1363 			break;
1364 		default:
1365 			ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
1366 			break;
1367 		}
1368 	}
1369 
1370 	init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
1371 
1372 	rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
1373 				 sizeof(struct addr_ctrl_blk),
1374 				 FLASH_OPT_RMW_COMMIT);
1375 	if (rval != QLA_SUCCESS) {
1376 		ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
1377 			   __func__);
1378 		rval = -EIO;
1379 		goto exit_init_fw_cb;
1380 	}
1381 
1382 	rval = qla4xxx_disable_acb(ha);
1383 	if (rval != QLA_SUCCESS) {
1384 		ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
1385 			   __func__);
1386 		rval = -EIO;
1387 		goto exit_init_fw_cb;
1388 	}
1389 
1390 	wait_for_completion_timeout(&ha->disable_acb_comp,
1391 				    DISABLE_ACB_TOV * HZ);
1392 
1393 	qla4xxx_initcb_to_acb(init_fw_cb);
1394 
1395 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
1396 	if (rval != QLA_SUCCESS) {
1397 		ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
1398 			   __func__);
1399 		rval = -EIO;
1400 		goto exit_init_fw_cb;
1401 	}
1402 
1403 	memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
1404 	qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
1405 				  init_fw_cb_dma);
1406 
1407 exit_init_fw_cb:
1408 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
1409 			  init_fw_cb, init_fw_cb_dma);
1410 
1411 	return rval;
1412 }
1413 
qla4xxx_session_get_param(struct iscsi_cls_session * cls_sess,enum iscsi_param param,char * buf)1414 static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1415 				     enum iscsi_param param, char *buf)
1416 {
1417 	struct iscsi_session *sess = cls_sess->dd_data;
1418 	struct ddb_entry *ddb_entry = sess->dd_data;
1419 	struct scsi_qla_host *ha = ddb_entry->ha;
1420 	int rval, len;
1421 	uint16_t idx;
1422 
1423 	switch (param) {
1424 	case ISCSI_PARAM_CHAP_IN_IDX:
1425 		rval = qla4xxx_get_chap_index(ha, sess->username_in,
1426 					      sess->password_in, BIDI_CHAP,
1427 					      &idx);
1428 		if (rval)
1429 			len = sprintf(buf, "\n");
1430 		else
1431 			len = sprintf(buf, "%hu\n", idx);
1432 		break;
1433 	case ISCSI_PARAM_CHAP_OUT_IDX:
1434 		rval = qla4xxx_get_chap_index(ha, sess->username,
1435 					      sess->password, LOCAL_CHAP,
1436 					      &idx);
1437 		if (rval)
1438 			len = sprintf(buf, "\n");
1439 		else
1440 			len = sprintf(buf, "%hu\n", idx);
1441 		break;
1442 	default:
1443 		return iscsi_session_get_param(cls_sess, param, buf);
1444 	}
1445 
1446 	return len;
1447 }
1448 
qla4xxx_conn_get_param(struct iscsi_cls_conn * cls_conn,enum iscsi_param param,char * buf)1449 static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1450 				  enum iscsi_param param, char *buf)
1451 {
1452 	struct iscsi_conn *conn;
1453 	struct qla_conn *qla_conn;
1454 	struct sockaddr *dst_addr;
1455 	int len = 0;
1456 
1457 	conn = cls_conn->dd_data;
1458 	qla_conn = conn->dd_data;
1459 	dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1460 
1461 	switch (param) {
1462 	case ISCSI_PARAM_CONN_PORT:
1463 	case ISCSI_PARAM_CONN_ADDRESS:
1464 		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
1465 						 dst_addr, param, buf);
1466 	default:
1467 		return iscsi_conn_get_param(cls_conn, param, buf);
1468 	}
1469 
1470 	return len;
1471 
1472 }
1473 
qla4xxx_get_ddb_index(struct scsi_qla_host * ha,uint16_t * ddb_index)1474 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
1475 {
1476 	uint32_t mbx_sts = 0;
1477 	uint16_t tmp_ddb_index;
1478 	int ret;
1479 
1480 get_ddb_index:
1481 	tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1482 
1483 	if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
1484 		DEBUG2(ql4_printk(KERN_INFO, ha,
1485 				  "Free DDB index not available\n"));
1486 		ret = QLA_ERROR;
1487 		goto exit_get_ddb_index;
1488 	}
1489 
1490 	if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
1491 		goto get_ddb_index;
1492 
1493 	DEBUG2(ql4_printk(KERN_INFO, ha,
1494 			  "Found a free DDB index at %d\n", tmp_ddb_index));
1495 	ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
1496 	if (ret == QLA_ERROR) {
1497 		if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1498 			ql4_printk(KERN_INFO, ha,
1499 				   "DDB index = %d not available trying next\n",
1500 				   tmp_ddb_index);
1501 			goto get_ddb_index;
1502 		}
1503 		DEBUG2(ql4_printk(KERN_INFO, ha,
1504 				  "Free FW DDB not available\n"));
1505 	}
1506 
1507 	*ddb_index = tmp_ddb_index;
1508 
1509 exit_get_ddb_index:
1510 	return ret;
1511 }
1512 
qla4xxx_match_ipaddress(struct scsi_qla_host * ha,struct ddb_entry * ddb_entry,char * existing_ipaddr,char * user_ipaddr)1513 static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
1514 				   struct ddb_entry *ddb_entry,
1515 				   char *existing_ipaddr,
1516 				   char *user_ipaddr)
1517 {
1518 	uint8_t dst_ipaddr[IPv6_ADDR_LEN];
1519 	char formatted_ipaddr[DDB_IPADDR_LEN];
1520 	int status = QLA_SUCCESS, ret = 0;
1521 
1522 	if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
1523 		ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1524 			       '\0', NULL);
1525 		if (ret == 0) {
1526 			status = QLA_ERROR;
1527 			goto out_match;
1528 		}
1529 		ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
1530 	} else {
1531 		ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
1532 			       '\0', NULL);
1533 		if (ret == 0) {
1534 			status = QLA_ERROR;
1535 			goto out_match;
1536 		}
1537 		ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
1538 	}
1539 
1540 	if (strcmp(existing_ipaddr, formatted_ipaddr))
1541 		status = QLA_ERROR;
1542 
1543 out_match:
1544 	return status;
1545 }
1546 
qla4xxx_match_fwdb_session(struct scsi_qla_host * ha,struct iscsi_cls_conn * cls_conn)1547 static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
1548 				      struct iscsi_cls_conn *cls_conn)
1549 {
1550 	int idx = 0, max_ddbs, rval;
1551 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1552 	struct iscsi_session *sess, *existing_sess;
1553 	struct iscsi_conn *conn, *existing_conn;
1554 	struct ddb_entry *ddb_entry;
1555 
1556 	sess = cls_sess->dd_data;
1557 	conn = cls_conn->dd_data;
1558 
1559 	if (sess->targetname == NULL ||
1560 	    conn->persistent_address == NULL ||
1561 	    conn->persistent_port == 0)
1562 		return QLA_ERROR;
1563 
1564 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
1565 				     MAX_DEV_DB_ENTRIES;
1566 
1567 	for (idx = 0; idx < max_ddbs; idx++) {
1568 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
1569 		if (ddb_entry == NULL)
1570 			continue;
1571 
1572 		if (ddb_entry->ddb_type != FLASH_DDB)
1573 			continue;
1574 
1575 		existing_sess = ddb_entry->sess->dd_data;
1576 		existing_conn = ddb_entry->conn->dd_data;
1577 
1578 		if (existing_sess->targetname == NULL ||
1579 		    existing_conn->persistent_address == NULL ||
1580 		    existing_conn->persistent_port == 0)
1581 			continue;
1582 
1583 		DEBUG2(ql4_printk(KERN_INFO, ha,
1584 				  "IQN = %s User IQN = %s\n",
1585 				  existing_sess->targetname,
1586 				  sess->targetname));
1587 
1588 		DEBUG2(ql4_printk(KERN_INFO, ha,
1589 				  "IP = %s User IP = %s\n",
1590 				  existing_conn->persistent_address,
1591 				  conn->persistent_address));
1592 
1593 		DEBUG2(ql4_printk(KERN_INFO, ha,
1594 				  "Port = %d User Port = %d\n",
1595 				  existing_conn->persistent_port,
1596 				  conn->persistent_port));
1597 
1598 		if (strcmp(existing_sess->targetname, sess->targetname))
1599 			continue;
1600 		rval = qla4xxx_match_ipaddress(ha, ddb_entry,
1601 					existing_conn->persistent_address,
1602 					conn->persistent_address);
1603 		if (rval == QLA_ERROR)
1604 			continue;
1605 		if (existing_conn->persistent_port != conn->persistent_port)
1606 			continue;
1607 		break;
1608 	}
1609 
1610 	if (idx == max_ddbs)
1611 		return QLA_ERROR;
1612 
1613 	DEBUG2(ql4_printk(KERN_INFO, ha,
1614 			  "Match found in fwdb sessions\n"));
1615 	return QLA_SUCCESS;
1616 }
1617 
1618 static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint * ep,uint16_t cmds_max,uint16_t qdepth,uint32_t initial_cmdsn)1619 qla4xxx_session_create(struct iscsi_endpoint *ep,
1620 			uint16_t cmds_max, uint16_t qdepth,
1621 			uint32_t initial_cmdsn)
1622 {
1623 	struct iscsi_cls_session *cls_sess;
1624 	struct scsi_qla_host *ha;
1625 	struct qla_endpoint *qla_ep;
1626 	struct ddb_entry *ddb_entry;
1627 	uint16_t ddb_index;
1628 	struct iscsi_session *sess;
1629 	struct sockaddr *dst_addr;
1630 	int ret;
1631 
1632 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1633 	if (!ep) {
1634 		printk(KERN_ERR "qla4xxx: missing ep.\n");
1635 		return NULL;
1636 	}
1637 
1638 	qla_ep = ep->dd_data;
1639 	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1640 	ha = to_qla_host(qla_ep->host);
1641 
1642 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
1643 	if (ret == QLA_ERROR)
1644 		return NULL;
1645 
1646 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1647 				       cmds_max, sizeof(struct ddb_entry),
1648 				       sizeof(struct ql4_task_data),
1649 				       initial_cmdsn, ddb_index);
1650 	if (!cls_sess)
1651 		return NULL;
1652 
1653 	sess = cls_sess->dd_data;
1654 	ddb_entry = sess->dd_data;
1655 	ddb_entry->fw_ddb_index = ddb_index;
1656 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1657 	ddb_entry->ha = ha;
1658 	ddb_entry->sess = cls_sess;
1659 	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
1660 	ddb_entry->ddb_change = qla4xxx_ddb_change;
1661 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1662 	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1663 	ha->tot_ddbs++;
1664 
1665 	return cls_sess;
1666 }
1667 
qla4xxx_session_destroy(struct iscsi_cls_session * cls_sess)1668 static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1669 {
1670 	struct iscsi_session *sess;
1671 	struct ddb_entry *ddb_entry;
1672 	struct scsi_qla_host *ha;
1673 	unsigned long flags, wtime;
1674 	struct dev_db_entry *fw_ddb_entry = NULL;
1675 	dma_addr_t fw_ddb_entry_dma;
1676 	uint32_t ddb_state;
1677 	int ret;
1678 
1679 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1680 	sess = cls_sess->dd_data;
1681 	ddb_entry = sess->dd_data;
1682 	ha = ddb_entry->ha;
1683 
1684 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1685 					  &fw_ddb_entry_dma, GFP_KERNEL);
1686 	if (!fw_ddb_entry) {
1687 		ql4_printk(KERN_ERR, ha,
1688 			   "%s: Unable to allocate dma buffer\n", __func__);
1689 		goto destroy_session;
1690 	}
1691 
1692 	wtime = jiffies + (HZ * LOGOUT_TOV);
1693 	do {
1694 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1695 					      fw_ddb_entry, fw_ddb_entry_dma,
1696 					      NULL, NULL, &ddb_state, NULL,
1697 					      NULL, NULL);
1698 		if (ret == QLA_ERROR)
1699 			goto destroy_session;
1700 
1701 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1702 		    (ddb_state == DDB_DS_SESSION_FAILED))
1703 			goto destroy_session;
1704 
1705 		schedule_timeout_uninterruptible(HZ);
1706 	} while ((time_after(wtime, jiffies)));
1707 
1708 destroy_session:
1709 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1710 
1711 	spin_lock_irqsave(&ha->hardware_lock, flags);
1712 	qla4xxx_free_ddb(ha, ddb_entry);
1713 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1714 
1715 	iscsi_session_teardown(cls_sess);
1716 
1717 	if (fw_ddb_entry)
1718 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1719 				  fw_ddb_entry, fw_ddb_entry_dma);
1720 }
1721 
1722 static struct iscsi_cls_conn *
qla4xxx_conn_create(struct iscsi_cls_session * cls_sess,uint32_t conn_idx)1723 qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1724 {
1725 	struct iscsi_cls_conn *cls_conn;
1726 	struct iscsi_session *sess;
1727 	struct ddb_entry *ddb_entry;
1728 
1729 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1730 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1731 				    conn_idx);
1732 	if (!cls_conn)
1733 		return NULL;
1734 
1735 	sess = cls_sess->dd_data;
1736 	ddb_entry = sess->dd_data;
1737 	ddb_entry->conn = cls_conn;
1738 
1739 	return cls_conn;
1740 }
1741 
qla4xxx_conn_bind(struct iscsi_cls_session * cls_session,struct iscsi_cls_conn * cls_conn,uint64_t transport_fd,int is_leading)1742 static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1743 			     struct iscsi_cls_conn *cls_conn,
1744 			     uint64_t transport_fd, int is_leading)
1745 {
1746 	struct iscsi_conn *conn;
1747 	struct qla_conn *qla_conn;
1748 	struct iscsi_endpoint *ep;
1749 
1750 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1751 
1752 	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1753 		return -EINVAL;
1754 	ep = iscsi_lookup_endpoint(transport_fd);
1755 	conn = cls_conn->dd_data;
1756 	qla_conn = conn->dd_data;
1757 	qla_conn->qla_ep = ep->dd_data;
1758 	return 0;
1759 }
1760 
qla4xxx_conn_start(struct iscsi_cls_conn * cls_conn)1761 static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1762 {
1763 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1764 	struct iscsi_session *sess;
1765 	struct ddb_entry *ddb_entry;
1766 	struct scsi_qla_host *ha;
1767 	struct dev_db_entry *fw_ddb_entry = NULL;
1768 	dma_addr_t fw_ddb_entry_dma;
1769 	uint32_t mbx_sts = 0;
1770 	int ret = 0;
1771 	int status = QLA_SUCCESS;
1772 
1773 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1774 	sess = cls_sess->dd_data;
1775 	ddb_entry = sess->dd_data;
1776 	ha = ddb_entry->ha;
1777 
1778 	/* Check if we have  matching FW DDB, if yes then do not
1779 	 * login to this target. This could cause target to logout previous
1780 	 * connection
1781 	 */
1782 	ret = qla4xxx_match_fwdb_session(ha, cls_conn);
1783 	if (ret == QLA_SUCCESS) {
1784 		ql4_printk(KERN_INFO, ha,
1785 			   "Session already exist in FW.\n");
1786 		ret = -EEXIST;
1787 		goto exit_conn_start;
1788 	}
1789 
1790 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1791 					  &fw_ddb_entry_dma, GFP_KERNEL);
1792 	if (!fw_ddb_entry) {
1793 		ql4_printk(KERN_ERR, ha,
1794 			   "%s: Unable to allocate dma buffer\n", __func__);
1795 		ret = -ENOMEM;
1796 		goto exit_conn_start;
1797 	}
1798 
1799 	ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
1800 	if (ret) {
1801 		/* If iscsid is stopped and started then no need to do
1802 		* set param again since ddb state will be already
1803 		* active and FW does not allow set ddb to an
1804 		* active session.
1805 		*/
1806 		if (mbx_sts)
1807 			if (ddb_entry->fw_ddb_device_state ==
1808 						DDB_DS_SESSION_ACTIVE) {
1809 				ddb_entry->unblock_sess(ddb_entry->sess);
1810 				goto exit_set_param;
1811 			}
1812 
1813 		ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1814 			   __func__, ddb_entry->fw_ddb_index);
1815 		goto exit_conn_start;
1816 	}
1817 
1818 	status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1819 	if (status == QLA_ERROR) {
1820 		ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1821 			   sess->targetname);
1822 		ret = -EINVAL;
1823 		goto exit_conn_start;
1824 	}
1825 
1826 	if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1827 		ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1828 
1829 	DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1830 		      ddb_entry->fw_ddb_device_state));
1831 
1832 exit_set_param:
1833 	ret = 0;
1834 
1835 exit_conn_start:
1836 	if (fw_ddb_entry)
1837 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1838 				  fw_ddb_entry, fw_ddb_entry_dma);
1839 	return ret;
1840 }
1841 
qla4xxx_conn_destroy(struct iscsi_cls_conn * cls_conn)1842 static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
1843 {
1844 	struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1845 	struct iscsi_session *sess;
1846 	struct scsi_qla_host *ha;
1847 	struct ddb_entry *ddb_entry;
1848 	int options;
1849 
1850 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1851 	sess = cls_sess->dd_data;
1852 	ddb_entry = sess->dd_data;
1853 	ha = ddb_entry->ha;
1854 
1855 	options = LOGOUT_OPTION_CLOSE_SESSION;
1856 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1857 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1858 }
1859 
qla4xxx_task_work(struct work_struct * wdata)1860 static void qla4xxx_task_work(struct work_struct *wdata)
1861 {
1862 	struct ql4_task_data *task_data;
1863 	struct scsi_qla_host *ha;
1864 	struct passthru_status *sts;
1865 	struct iscsi_task *task;
1866 	struct iscsi_hdr *hdr;
1867 	uint8_t *data;
1868 	uint32_t data_len;
1869 	struct iscsi_conn *conn;
1870 	int hdr_len;
1871 	itt_t itt;
1872 
1873 	task_data = container_of(wdata, struct ql4_task_data, task_work);
1874 	ha = task_data->ha;
1875 	task = task_data->task;
1876 	sts = &task_data->sts;
1877 	hdr_len = sizeof(struct iscsi_hdr);
1878 
1879 	DEBUG3(printk(KERN_INFO "Status returned\n"));
1880 	DEBUG3(qla4xxx_dump_buffer(sts, 64));
1881 	DEBUG3(printk(KERN_INFO "Response buffer"));
1882 	DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1883 
1884 	conn = task->conn;
1885 
1886 	switch (sts->completionStatus) {
1887 	case PASSTHRU_STATUS_COMPLETE:
1888 		hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1889 		/* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1890 		itt = sts->handle;
1891 		hdr->itt = itt;
1892 		data = task_data->resp_buffer + hdr_len;
1893 		data_len = task_data->resp_len - hdr_len;
1894 		iscsi_complete_pdu(conn, hdr, data, data_len);
1895 		break;
1896 	default:
1897 		ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1898 			   sts->completionStatus);
1899 		break;
1900 	}
1901 	return;
1902 }
1903 
qla4xxx_alloc_pdu(struct iscsi_task * task,uint8_t opcode)1904 static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
1905 {
1906 	struct ql4_task_data *task_data;
1907 	struct iscsi_session *sess;
1908 	struct ddb_entry *ddb_entry;
1909 	struct scsi_qla_host *ha;
1910 	int hdr_len;
1911 
1912 	sess = task->conn->session;
1913 	ddb_entry = sess->dd_data;
1914 	ha = ddb_entry->ha;
1915 	task_data = task->dd_data;
1916 	memset(task_data, 0, sizeof(struct ql4_task_data));
1917 
1918 	if (task->sc) {
1919 		ql4_printk(KERN_INFO, ha,
1920 			   "%s: SCSI Commands not implemented\n", __func__);
1921 		return -EINVAL;
1922 	}
1923 
1924 	hdr_len = sizeof(struct iscsi_hdr);
1925 	task_data->ha = ha;
1926 	task_data->task = task;
1927 
1928 	if (task->data_count) {
1929 		task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1930 						     task->data_count,
1931 						     PCI_DMA_TODEVICE);
1932 	}
1933 
1934 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1935 		      __func__, task->conn->max_recv_dlength, hdr_len));
1936 
1937 	task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1938 	task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1939 						    task_data->resp_len,
1940 						    &task_data->resp_dma,
1941 						    GFP_ATOMIC);
1942 	if (!task_data->resp_buffer)
1943 		goto exit_alloc_pdu;
1944 
1945 	task_data->req_len = task->data_count + hdr_len;
1946 	task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1947 						   task_data->req_len,
1948 						   &task_data->req_dma,
1949 						   GFP_ATOMIC);
1950 	if (!task_data->req_buffer)
1951 		goto exit_alloc_pdu;
1952 
1953 	task->hdr = task_data->req_buffer;
1954 
1955 	INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1956 
1957 	return 0;
1958 
1959 exit_alloc_pdu:
1960 	if (task_data->resp_buffer)
1961 		dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1962 				  task_data->resp_buffer, task_data->resp_dma);
1963 
1964 	if (task_data->req_buffer)
1965 		dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1966 				  task_data->req_buffer, task_data->req_dma);
1967 	return -ENOMEM;
1968 }
1969 
qla4xxx_task_cleanup(struct iscsi_task * task)1970 static void qla4xxx_task_cleanup(struct iscsi_task *task)
1971 {
1972 	struct ql4_task_data *task_data;
1973 	struct iscsi_session *sess;
1974 	struct ddb_entry *ddb_entry;
1975 	struct scsi_qla_host *ha;
1976 	int hdr_len;
1977 
1978 	hdr_len = sizeof(struct iscsi_hdr);
1979 	sess = task->conn->session;
1980 	ddb_entry = sess->dd_data;
1981 	ha = ddb_entry->ha;
1982 	task_data = task->dd_data;
1983 
1984 	if (task->data_count) {
1985 		dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1986 				 task->data_count, PCI_DMA_TODEVICE);
1987 	}
1988 
1989 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1990 		      __func__, task->conn->max_recv_dlength, hdr_len));
1991 
1992 	dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1993 			  task_data->resp_buffer, task_data->resp_dma);
1994 	dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1995 			  task_data->req_buffer, task_data->req_dma);
1996 	return;
1997 }
1998 
qla4xxx_task_xmit(struct iscsi_task * task)1999 static int qla4xxx_task_xmit(struct iscsi_task *task)
2000 {
2001 	struct scsi_cmnd *sc = task->sc;
2002 	struct iscsi_session *sess = task->conn->session;
2003 	struct ddb_entry *ddb_entry = sess->dd_data;
2004 	struct scsi_qla_host *ha = ddb_entry->ha;
2005 
2006 	if (!sc)
2007 		return qla4xxx_send_passthru0(task);
2008 
2009 	ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
2010 		   __func__);
2011 	return -ENOSYS;
2012 }
2013 
qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session * sess,struct iscsi_bus_flash_conn * conn,struct dev_db_entry * fw_ddb_entry)2014 static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
2015 					 struct iscsi_bus_flash_conn *conn,
2016 					 struct dev_db_entry *fw_ddb_entry)
2017 {
2018 	unsigned long options = 0;
2019 	int rc = 0;
2020 
2021 	options = le16_to_cpu(fw_ddb_entry->options);
2022 	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
2023 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
2024 		rc = iscsi_switch_str_param(&sess->portal_type,
2025 					    PORTAL_TYPE_IPV6);
2026 		if (rc)
2027 			goto exit_copy;
2028 	} else {
2029 		rc = iscsi_switch_str_param(&sess->portal_type,
2030 					    PORTAL_TYPE_IPV4);
2031 		if (rc)
2032 			goto exit_copy;
2033 	}
2034 
2035 	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
2036 					      &options);
2037 	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
2038 	sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
2039 
2040 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2041 	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
2042 	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
2043 	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
2044 	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
2045 	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
2046 					    &options);
2047 	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
2048 	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
2049 	conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
2050 	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
2051 					     &options);
2052 	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
2053 	sess->discovery_auth_optional =
2054 			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
2055 	if (test_bit(ISCSIOPT_ERL1, &options))
2056 		sess->erl |= BIT_1;
2057 	if (test_bit(ISCSIOPT_ERL0, &options))
2058 		sess->erl |= BIT_0;
2059 
2060 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2061 	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
2062 	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
2063 	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
2064 	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
2065 		conn->tcp_timer_scale |= BIT_3;
2066 	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
2067 		conn->tcp_timer_scale |= BIT_2;
2068 	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
2069 		conn->tcp_timer_scale |= BIT_1;
2070 
2071 	conn->tcp_timer_scale >>= 1;
2072 	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
2073 
2074 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2075 	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
2076 
2077 	conn->max_recv_dlength = BYTE_UNITS *
2078 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2079 	conn->max_xmit_dlength = BYTE_UNITS *
2080 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2081 	sess->first_burst = BYTE_UNITS *
2082 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2083 	sess->max_burst = BYTE_UNITS *
2084 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2085 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2086 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2087 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2088 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2089 	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
2090 	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
2091 	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
2092 	conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
2093 	conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
2094 	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
2095 	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
2096 	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
2097 	sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
2098 	sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
2099 	sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2100 	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
2101 
2102 	sess->default_taskmgmt_timeout =
2103 				le16_to_cpu(fw_ddb_entry->def_timeout);
2104 	conn->port = le16_to_cpu(fw_ddb_entry->port);
2105 
2106 	options = le16_to_cpu(fw_ddb_entry->options);
2107 	conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2108 	if (!conn->ipaddress) {
2109 		rc = -ENOMEM;
2110 		goto exit_copy;
2111 	}
2112 
2113 	conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2114 	if (!conn->redirect_ipaddr) {
2115 		rc = -ENOMEM;
2116 		goto exit_copy;
2117 	}
2118 
2119 	memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
2120 	memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
2121 
2122 	if (test_bit(OPT_IPV6_DEVICE, &options)) {
2123 		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
2124 
2125 		conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
2126 		if (!conn->link_local_ipv6_addr) {
2127 			rc = -ENOMEM;
2128 			goto exit_copy;
2129 		}
2130 
2131 		memcpy(conn->link_local_ipv6_addr,
2132 		       fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
2133 	} else {
2134 		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
2135 	}
2136 
2137 	if (fw_ddb_entry->iscsi_name[0]) {
2138 		rc = iscsi_switch_str_param(&sess->targetname,
2139 					    (char *)fw_ddb_entry->iscsi_name);
2140 		if (rc)
2141 			goto exit_copy;
2142 	}
2143 
2144 	if (fw_ddb_entry->iscsi_alias[0]) {
2145 		rc = iscsi_switch_str_param(&sess->targetalias,
2146 					    (char *)fw_ddb_entry->iscsi_alias);
2147 		if (rc)
2148 			goto exit_copy;
2149 	}
2150 
2151 	COPY_ISID(sess->isid, fw_ddb_entry->isid);
2152 
2153 exit_copy:
2154 	return rc;
2155 }
2156 
qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session * sess,struct iscsi_bus_flash_conn * conn,struct dev_db_entry * fw_ddb_entry)2157 static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
2158 				       struct iscsi_bus_flash_conn *conn,
2159 				       struct dev_db_entry *fw_ddb_entry)
2160 {
2161 	uint16_t options;
2162 	int rc = 0;
2163 
2164 	options = le16_to_cpu(fw_ddb_entry->options);
2165 	SET_BITVAL(conn->is_fw_assigned_ipv6,  options, BIT_11);
2166 	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
2167 		options |= BIT_8;
2168 	else
2169 		options &= ~BIT_8;
2170 
2171 	SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
2172 	SET_BITVAL(sess->discovery_sess, options, BIT_4);
2173 	SET_BITVAL(sess->entry_state, options, BIT_3);
2174 	fw_ddb_entry->options = cpu_to_le16(options);
2175 
2176 	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
2177 	SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
2178 	SET_BITVAL(conn->datadgst_en, options, BIT_12);
2179 	SET_BITVAL(sess->imm_data_en, options, BIT_11);
2180 	SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
2181 	SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
2182 	SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
2183 	SET_BITVAL(sess->chap_auth_en, options, BIT_7);
2184 	SET_BITVAL(conn->snack_req_en, options, BIT_6);
2185 	SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
2186 	SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
2187 	SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
2188 	SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
2189 	SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
2190 	fw_ddb_entry->iscsi_options = cpu_to_le16(options);
2191 
2192 	options = le16_to_cpu(fw_ddb_entry->tcp_options);
2193 	SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
2194 	SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
2195 	SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
2196 	SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
2197 	SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
2198 	SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
2199 	SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
2200 	fw_ddb_entry->tcp_options = cpu_to_le16(options);
2201 
2202 	options = le16_to_cpu(fw_ddb_entry->ip_options);
2203 	SET_BITVAL(conn->fragment_disable, options, BIT_4);
2204 	fw_ddb_entry->ip_options = cpu_to_le16(options);
2205 
2206 	fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
2207 	fw_ddb_entry->iscsi_max_rcv_data_seg_len =
2208 			       cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
2209 	fw_ddb_entry->iscsi_max_snd_data_seg_len =
2210 			       cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
2211 	fw_ddb_entry->iscsi_first_burst_len =
2212 				cpu_to_le16(sess->first_burst / BYTE_UNITS);
2213 	fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
2214 					    BYTE_UNITS);
2215 	fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
2216 	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
2217 	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
2218 	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
2219 	fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
2220 	fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
2221 	fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
2222 	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
2223 	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
2224 	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
2225 	fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
2226 	fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
2227 	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
2228 	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
2229 	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
2230 	fw_ddb_entry->port = cpu_to_le16(conn->port);
2231 	fw_ddb_entry->def_timeout =
2232 				cpu_to_le16(sess->default_taskmgmt_timeout);
2233 
2234 	if (conn->ipaddress)
2235 		memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
2236 		       sizeof(fw_ddb_entry->ip_addr));
2237 
2238 	if (conn->redirect_ipaddr)
2239 		memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
2240 		       sizeof(fw_ddb_entry->tgt_addr));
2241 
2242 	if (conn->link_local_ipv6_addr)
2243 		memcpy(fw_ddb_entry->link_local_ipv6_addr,
2244 		       conn->link_local_ipv6_addr,
2245 		       sizeof(fw_ddb_entry->link_local_ipv6_addr));
2246 
2247 	if (sess->targetname)
2248 		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
2249 		       sizeof(fw_ddb_entry->iscsi_name));
2250 
2251 	if (sess->targetalias)
2252 		memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
2253 		       sizeof(fw_ddb_entry->iscsi_alias));
2254 
2255 	COPY_ISID(fw_ddb_entry->isid, sess->isid);
2256 
2257 	return rc;
2258 }
2259 
qla4xxx_copy_fwddb_param(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry,struct iscsi_cls_session * cls_sess,struct iscsi_cls_conn * cls_conn)2260 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
2261 				     struct dev_db_entry *fw_ddb_entry,
2262 				     struct iscsi_cls_session *cls_sess,
2263 				     struct iscsi_cls_conn *cls_conn)
2264 {
2265 	int buflen = 0;
2266 	struct iscsi_session *sess;
2267 	struct ddb_entry *ddb_entry;
2268 	struct iscsi_conn *conn;
2269 	char ip_addr[DDB_IPADDR_LEN];
2270 	uint16_t options = 0;
2271 
2272 	sess = cls_sess->dd_data;
2273 	ddb_entry = sess->dd_data;
2274 	conn = cls_conn->dd_data;
2275 
2276 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2277 
2278 	conn->max_recv_dlength = BYTE_UNITS *
2279 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2280 
2281 	conn->max_xmit_dlength = BYTE_UNITS *
2282 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2283 
2284 	sess->initial_r2t_en =
2285 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2286 
2287 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2288 
2289 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2290 
2291 	sess->first_burst = BYTE_UNITS *
2292 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2293 
2294 	sess->max_burst = BYTE_UNITS *
2295 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2296 
2297 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2298 
2299 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2300 
2301 	conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
2302 
2303 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2304 
2305 	options = le16_to_cpu(fw_ddb_entry->options);
2306 	if (options & DDB_OPT_IPV6_DEVICE)
2307 		sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
2308 	else
2309 		sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
2310 
2311 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
2312 			(char *)fw_ddb_entry->iscsi_name, buflen);
2313 	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
2314 			(char *)ha->name_string, buflen);
2315 	iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
2316 			(char *)ip_addr, buflen);
2317 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2318 			(char *)fw_ddb_entry->iscsi_alias, buflen);
2319 }
2320 
qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host * ha,struct ddb_entry * ddb_entry)2321 void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
2322 					     struct ddb_entry *ddb_entry)
2323 {
2324 	struct iscsi_cls_session *cls_sess;
2325 	struct iscsi_cls_conn *cls_conn;
2326 	uint32_t ddb_state;
2327 	dma_addr_t fw_ddb_entry_dma;
2328 	struct dev_db_entry *fw_ddb_entry;
2329 
2330 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2331 					  &fw_ddb_entry_dma, GFP_KERNEL);
2332 	if (!fw_ddb_entry) {
2333 		ql4_printk(KERN_ERR, ha,
2334 			   "%s: Unable to allocate dma buffer\n", __func__);
2335 		goto exit_session_conn_fwddb_param;
2336 	}
2337 
2338 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2339 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2340 				    NULL, NULL, NULL) == QLA_ERROR) {
2341 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2342 				  "get_ddb_entry for fw_ddb_index %d\n",
2343 				  ha->host_no, __func__,
2344 				  ddb_entry->fw_ddb_index));
2345 		goto exit_session_conn_fwddb_param;
2346 	}
2347 
2348 	cls_sess = ddb_entry->sess;
2349 
2350 	cls_conn = ddb_entry->conn;
2351 
2352 	/* Update params */
2353 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
2354 
2355 exit_session_conn_fwddb_param:
2356 	if (fw_ddb_entry)
2357 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2358 				  fw_ddb_entry, fw_ddb_entry_dma);
2359 }
2360 
qla4xxx_update_session_conn_param(struct scsi_qla_host * ha,struct ddb_entry * ddb_entry)2361 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
2362 				       struct ddb_entry *ddb_entry)
2363 {
2364 	struct iscsi_cls_session *cls_sess;
2365 	struct iscsi_cls_conn *cls_conn;
2366 	struct iscsi_session *sess;
2367 	struct iscsi_conn *conn;
2368 	uint32_t ddb_state;
2369 	dma_addr_t fw_ddb_entry_dma;
2370 	struct dev_db_entry *fw_ddb_entry;
2371 
2372 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2373 					  &fw_ddb_entry_dma, GFP_KERNEL);
2374 	if (!fw_ddb_entry) {
2375 		ql4_printk(KERN_ERR, ha,
2376 			   "%s: Unable to allocate dma buffer\n", __func__);
2377 		goto exit_session_conn_param;
2378 	}
2379 
2380 	if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
2381 				    fw_ddb_entry_dma, NULL, NULL, &ddb_state,
2382 				    NULL, NULL, NULL) == QLA_ERROR) {
2383 		DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
2384 				  "get_ddb_entry for fw_ddb_index %d\n",
2385 				  ha->host_no, __func__,
2386 				  ddb_entry->fw_ddb_index));
2387 		goto exit_session_conn_param;
2388 	}
2389 
2390 	cls_sess = ddb_entry->sess;
2391 	sess = cls_sess->dd_data;
2392 
2393 	cls_conn = ddb_entry->conn;
2394 	conn = cls_conn->dd_data;
2395 
2396 	/* Update timers after login */
2397 	ddb_entry->default_relogin_timeout =
2398 		(le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
2399 		 (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
2400 		 le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
2401 	ddb_entry->default_time2wait =
2402 				le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2403 
2404 	/* Update params */
2405 	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2406 	conn->max_recv_dlength = BYTE_UNITS *
2407 			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
2408 
2409 	conn->max_xmit_dlength = BYTE_UNITS *
2410 			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
2411 
2412 	sess->initial_r2t_en =
2413 			    (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2414 
2415 	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
2416 
2417 	sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
2418 
2419 	sess->first_burst = BYTE_UNITS *
2420 			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
2421 
2422 	sess->max_burst = BYTE_UNITS *
2423 				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
2424 
2425 	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
2426 
2427 	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
2428 
2429 	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
2430 
2431 	memcpy(sess->initiatorname, ha->name_string,
2432 	       min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
2433 
2434 	iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2435 			(char *)fw_ddb_entry->iscsi_alias, 0);
2436 
2437 exit_session_conn_param:
2438 	if (fw_ddb_entry)
2439 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2440 				  fw_ddb_entry, fw_ddb_entry_dma);
2441 }
2442 
2443 /*
2444  * Timer routines
2445  */
2446 
qla4xxx_start_timer(struct scsi_qla_host * ha,void * func,unsigned long interval)2447 static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
2448 				unsigned long interval)
2449 {
2450 	DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
2451 		     __func__, ha->host->host_no));
2452 	init_timer(&ha->timer);
2453 	ha->timer.expires = jiffies + interval * HZ;
2454 	ha->timer.data = (unsigned long)ha;
2455 	ha->timer.function = (void (*)(unsigned long))func;
2456 	add_timer(&ha->timer);
2457 	ha->timer_active = 1;
2458 }
2459 
qla4xxx_stop_timer(struct scsi_qla_host * ha)2460 static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
2461 {
2462 	del_timer_sync(&ha->timer);
2463 	ha->timer_active = 0;
2464 }
2465 
2466 /***
2467  * qla4xxx_mark_device_missing - blocks the session
2468  * @cls_session: Pointer to the session to be blocked
2469  * @ddb_entry: Pointer to device database entry
2470  *
2471  * This routine marks a device missing and close connection.
2472  **/
qla4xxx_mark_device_missing(struct iscsi_cls_session * cls_session)2473 void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
2474 {
2475 	iscsi_block_session(cls_session);
2476 }
2477 
2478 /**
2479  * qla4xxx_mark_all_devices_missing - mark all devices as missing.
2480  * @ha: Pointer to host adapter structure.
2481  *
2482  * This routine marks a device missing and resets the relogin retry count.
2483  **/
qla4xxx_mark_all_devices_missing(struct scsi_qla_host * ha)2484 void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
2485 {
2486 	iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
2487 }
2488 
qla4xxx_get_new_srb(struct scsi_qla_host * ha,struct ddb_entry * ddb_entry,struct scsi_cmnd * cmd)2489 static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
2490 				       struct ddb_entry *ddb_entry,
2491 				       struct scsi_cmnd *cmd)
2492 {
2493 	struct srb *srb;
2494 
2495 	srb = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
2496 	if (!srb)
2497 		return srb;
2498 
2499 	kref_init(&srb->srb_ref);
2500 	srb->ha = ha;
2501 	srb->ddb = ddb_entry;
2502 	srb->cmd = cmd;
2503 	srb->flags = 0;
2504 	CMD_SP(cmd) = (void *)srb;
2505 
2506 	return srb;
2507 }
2508 
qla4xxx_srb_free_dma(struct scsi_qla_host * ha,struct srb * srb)2509 static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
2510 {
2511 	struct scsi_cmnd *cmd = srb->cmd;
2512 
2513 	if (srb->flags & SRB_DMA_VALID) {
2514 		scsi_dma_unmap(cmd);
2515 		srb->flags &= ~SRB_DMA_VALID;
2516 	}
2517 	CMD_SP(cmd) = NULL;
2518 }
2519 
qla4xxx_srb_compl(struct kref * ref)2520 void qla4xxx_srb_compl(struct kref *ref)
2521 {
2522 	struct srb *srb = container_of(ref, struct srb, srb_ref);
2523 	struct scsi_cmnd *cmd = srb->cmd;
2524 	struct scsi_qla_host *ha = srb->ha;
2525 
2526 	qla4xxx_srb_free_dma(ha, srb);
2527 
2528 	mempool_free(srb, ha->srb_mempool);
2529 
2530 	cmd->scsi_done(cmd);
2531 }
2532 
2533 /**
2534  * qla4xxx_queuecommand - scsi layer issues scsi command to driver.
2535  * @host: scsi host
2536  * @cmd: Pointer to Linux's SCSI command structure
2537  *
2538  * Remarks:
2539  * This routine is invoked by Linux to send a SCSI command to the driver.
2540  * The mid-level driver tries to ensure that queuecommand never gets
2541  * invoked concurrently with itself or the interrupt handler (although
2542  * the interrupt handler may call this routine as part of request-
2543  * completion handling).   Unfortunely, it sometimes calls the scheduler
2544  * in interrupt context which is a big NO! NO!.
2545  **/
qla4xxx_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * cmd)2546 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2547 {
2548 	struct scsi_qla_host *ha = to_qla_host(host);
2549 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
2550 	struct iscsi_cls_session *sess = ddb_entry->sess;
2551 	struct srb *srb;
2552 	int rval;
2553 
2554 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
2555 		if (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))
2556 			cmd->result = DID_NO_CONNECT << 16;
2557 		else
2558 			cmd->result = DID_REQUEUE << 16;
2559 		goto qc_fail_command;
2560 	}
2561 
2562 	if (!sess) {
2563 		cmd->result = DID_IMM_RETRY << 16;
2564 		goto qc_fail_command;
2565 	}
2566 
2567 	rval = iscsi_session_chkready(sess);
2568 	if (rval) {
2569 		cmd->result = rval;
2570 		goto qc_fail_command;
2571 	}
2572 
2573 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
2574 	    test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2575 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2576 	    test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
2577 	    test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
2578 	    !test_bit(AF_ONLINE, &ha->flags) ||
2579 	    !test_bit(AF_LINK_UP, &ha->flags) ||
2580 	    test_bit(AF_LOOPBACK, &ha->flags) ||
2581 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
2582 		goto qc_host_busy;
2583 
2584 	srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd);
2585 	if (!srb)
2586 		goto qc_host_busy;
2587 
2588 	rval = qla4xxx_send_command_to_isp(ha, srb);
2589 	if (rval != QLA_SUCCESS)
2590 		goto qc_host_busy_free_sp;
2591 
2592 	return 0;
2593 
2594 qc_host_busy_free_sp:
2595 	qla4xxx_srb_free_dma(ha, srb);
2596 	mempool_free(srb, ha->srb_mempool);
2597 
2598 qc_host_busy:
2599 	return SCSI_MLQUEUE_HOST_BUSY;
2600 
2601 qc_fail_command:
2602 	cmd->scsi_done(cmd);
2603 
2604 	return 0;
2605 }
2606 
2607 /**
2608  * qla4xxx_mem_free - frees memory allocated to adapter
2609  * @ha: Pointer to host adapter structure.
2610  *
2611  * Frees memory previously allocated by qla4xxx_mem_alloc
2612  **/
qla4xxx_mem_free(struct scsi_qla_host * ha)2613 static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2614 {
2615 	if (ha->queues)
2616 		dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2617 				  ha->queues_dma);
2618 
2619 	 if (ha->fw_dump)
2620 		vfree(ha->fw_dump);
2621 
2622 	ha->queues_len = 0;
2623 	ha->queues = NULL;
2624 	ha->queues_dma = 0;
2625 	ha->request_ring = NULL;
2626 	ha->request_dma = 0;
2627 	ha->response_ring = NULL;
2628 	ha->response_dma = 0;
2629 	ha->shadow_regs = NULL;
2630 	ha->shadow_regs_dma = 0;
2631 	ha->fw_dump = NULL;
2632 	ha->fw_dump_size = 0;
2633 
2634 	/* Free srb pool. */
2635 	if (ha->srb_mempool)
2636 		mempool_destroy(ha->srb_mempool);
2637 
2638 	ha->srb_mempool = NULL;
2639 
2640 	if (ha->chap_dma_pool)
2641 		dma_pool_destroy(ha->chap_dma_pool);
2642 
2643 	if (ha->chap_list)
2644 		vfree(ha->chap_list);
2645 	ha->chap_list = NULL;
2646 
2647 	if (ha->fw_ddb_dma_pool)
2648 		dma_pool_destroy(ha->fw_ddb_dma_pool);
2649 
2650 	/* release io space registers  */
2651 	if (is_qla8022(ha)) {
2652 		if (ha->nx_pcibase)
2653 			iounmap(
2654 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2655 	} else if (is_qla8032(ha)) {
2656 		if (ha->nx_pcibase)
2657 			iounmap(
2658 			    (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2659 	} else if (ha->reg) {
2660 		iounmap(ha->reg);
2661 	}
2662 
2663 	if (ha->reset_tmplt.buff)
2664 		vfree(ha->reset_tmplt.buff);
2665 
2666 	pci_release_regions(ha->pdev);
2667 }
2668 
2669 /**
2670  * qla4xxx_mem_alloc - allocates memory for use by adapter.
2671  * @ha: Pointer to host adapter structure
2672  *
2673  * Allocates DMA memory for request and response queues. Also allocates memory
2674  * for srbs.
2675  **/
qla4xxx_mem_alloc(struct scsi_qla_host * ha)2676 static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
2677 {
2678 	unsigned long align;
2679 
2680 	/* Allocate contiguous block of DMA memory for queues. */
2681 	ha->queues_len = ((REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2682 			  (RESPONSE_QUEUE_DEPTH * QUEUE_SIZE) +
2683 			  sizeof(struct shadow_regs) +
2684 			  MEM_ALIGN_VALUE +
2685 			  (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
2686 	ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
2687 					&ha->queues_dma, GFP_KERNEL);
2688 	if (ha->queues == NULL) {
2689 		ql4_printk(KERN_WARNING, ha,
2690 		    "Memory Allocation failed - queues.\n");
2691 
2692 		goto mem_alloc_error_exit;
2693 	}
2694 	memset(ha->queues, 0, ha->queues_len);
2695 
2696 	/*
2697 	 * As per RISC alignment requirements -- the bus-address must be a
2698 	 * multiple of the request-ring size (in bytes).
2699 	 */
2700 	align = 0;
2701 	if ((unsigned long)ha->queues_dma & (MEM_ALIGN_VALUE - 1))
2702 		align = MEM_ALIGN_VALUE - ((unsigned long)ha->queues_dma &
2703 					   (MEM_ALIGN_VALUE - 1));
2704 
2705 	/* Update request and response queue pointers. */
2706 	ha->request_dma = ha->queues_dma + align;
2707 	ha->request_ring = (struct queue_entry *) (ha->queues + align);
2708 	ha->response_dma = ha->queues_dma + align +
2709 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE);
2710 	ha->response_ring = (struct queue_entry *) (ha->queues + align +
2711 						    (REQUEST_QUEUE_DEPTH *
2712 						     QUEUE_SIZE));
2713 	ha->shadow_regs_dma = ha->queues_dma + align +
2714 		(REQUEST_QUEUE_DEPTH * QUEUE_SIZE) +
2715 		(RESPONSE_QUEUE_DEPTH * QUEUE_SIZE);
2716 	ha->shadow_regs = (struct shadow_regs *) (ha->queues + align +
2717 						  (REQUEST_QUEUE_DEPTH *
2718 						   QUEUE_SIZE) +
2719 						  (RESPONSE_QUEUE_DEPTH *
2720 						   QUEUE_SIZE));
2721 
2722 	/* Allocate memory for srb pool. */
2723 	ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab,
2724 					 mempool_free_slab, srb_cachep);
2725 	if (ha->srb_mempool == NULL) {
2726 		ql4_printk(KERN_WARNING, ha,
2727 		    "Memory Allocation failed - SRB Pool.\n");
2728 
2729 		goto mem_alloc_error_exit;
2730 	}
2731 
2732 	ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
2733 					    CHAP_DMA_BLOCK_SIZE, 8, 0);
2734 
2735 	if (ha->chap_dma_pool == NULL) {
2736 		ql4_printk(KERN_WARNING, ha,
2737 		    "%s: chap_dma_pool allocation failed..\n", __func__);
2738 		goto mem_alloc_error_exit;
2739 	}
2740 
2741 	ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
2742 					      DDB_DMA_BLOCK_SIZE, 8, 0);
2743 
2744 	if (ha->fw_ddb_dma_pool == NULL) {
2745 		ql4_printk(KERN_WARNING, ha,
2746 			   "%s: fw_ddb_dma_pool allocation failed..\n",
2747 			   __func__);
2748 		goto mem_alloc_error_exit;
2749 	}
2750 
2751 	return QLA_SUCCESS;
2752 
2753 mem_alloc_error_exit:
2754 	qla4xxx_mem_free(ha);
2755 	return QLA_ERROR;
2756 }
2757 
2758 /**
2759  * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2760  * @ha: adapter block pointer.
2761  *
2762  * Note: The caller should not hold the idc lock.
2763  **/
qla4_8xxx_check_temp(struct scsi_qla_host * ha)2764 static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2765 {
2766 	uint32_t temp, temp_state, temp_val;
2767 	int status = QLA_SUCCESS;
2768 
2769 	temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2770 
2771 	temp_state = qla82xx_get_temp_state(temp);
2772 	temp_val = qla82xx_get_temp_val(temp);
2773 
2774 	if (temp_state == QLA82XX_TEMP_PANIC) {
2775 		ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
2776 			   " exceeds maximum allowed. Hardware has been shut"
2777 			   " down.\n", temp_val);
2778 		status = QLA_ERROR;
2779 	} else if (temp_state == QLA82XX_TEMP_WARN) {
2780 		if (ha->temperature == QLA82XX_TEMP_NORMAL)
2781 			ql4_printk(KERN_WARNING, ha, "Device temperature %d"
2782 				   " degrees C exceeds operating range."
2783 				   " Immediate action needed.\n", temp_val);
2784 	} else {
2785 		if (ha->temperature == QLA82XX_TEMP_WARN)
2786 			ql4_printk(KERN_INFO, ha, "Device temperature is"
2787 				   " now %d degrees C in normal range.\n",
2788 				   temp_val);
2789 	}
2790 	ha->temperature = temp_state;
2791 	return status;
2792 }
2793 
2794 /**
2795  * qla4_8xxx_check_fw_alive  - Check firmware health
2796  * @ha: Pointer to host adapter structure.
2797  *
2798  * Context: Interrupt
2799  **/
qla4_8xxx_check_fw_alive(struct scsi_qla_host * ha)2800 static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2801 {
2802 	uint32_t fw_heartbeat_counter;
2803 	int status = QLA_SUCCESS;
2804 
2805 	fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2806 						   QLA8XXX_PEG_ALIVE_COUNTER);
2807 	/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2808 	if (fw_heartbeat_counter == 0xffffffff) {
2809 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
2810 		    "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2811 		    ha->host_no, __func__));
2812 		return status;
2813 	}
2814 
2815 	if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
2816 		ha->seconds_since_last_heartbeat++;
2817 		/* FW not alive after 2 seconds */
2818 		if (ha->seconds_since_last_heartbeat == 2) {
2819 			ha->seconds_since_last_heartbeat = 0;
2820 			qla4_8xxx_dump_peg_reg(ha);
2821 			status = QLA_ERROR;
2822 		}
2823 	} else
2824 		ha->seconds_since_last_heartbeat = 0;
2825 
2826 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
2827 	return status;
2828 }
2829 
qla4_8xxx_process_fw_error(struct scsi_qla_host * ha)2830 static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2831 {
2832 	uint32_t halt_status;
2833 	int halt_status_unrecoverable = 0;
2834 
2835 	halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
2836 
2837 	if (is_qla8022(ha)) {
2838 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2839 			   __func__);
2840 		qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2841 				CRB_NIU_XG_PAUSE_CTL_P0 |
2842 				CRB_NIU_XG_PAUSE_CTL_P1);
2843 
2844 		if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2845 			ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
2846 				   __func__);
2847 		if (halt_status & HALT_STATUS_UNRECOVERABLE)
2848 			halt_status_unrecoverable = 1;
2849 	} else if (is_qla8032(ha)) {
2850 		if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2851 			ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2852 				   __func__);
2853 		else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
2854 			halt_status_unrecoverable = 1;
2855 	}
2856 
2857 	/*
2858 	 * Since we cannot change dev_state in interrupt context,
2859 	 * set appropriate DPC flag then wakeup DPC
2860 	 */
2861 	if (halt_status_unrecoverable) {
2862 		set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2863 	} else {
2864 		ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
2865 			   __func__);
2866 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
2867 	}
2868 	qla4xxx_mailbox_premature_completion(ha);
2869 	qla4xxx_wake_dpc(ha);
2870 }
2871 
2872 /**
2873  * qla4_8xxx_watchdog - Poll dev state
2874  * @ha: Pointer to host adapter structure.
2875  *
2876  * Context: Interrupt
2877  **/
qla4_8xxx_watchdog(struct scsi_qla_host * ha)2878 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2879 {
2880 	uint32_t dev_state;
2881 	uint32_t idc_ctrl;
2882 
2883 	/* don't poll if reset is going on */
2884 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2885 	    test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2886 	    test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2887 		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2888 
2889 		if (qla4_8xxx_check_temp(ha)) {
2890 			if (is_qla8022(ha)) {
2891 				ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
2892 				qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2893 						CRB_NIU_XG_PAUSE_CTL_P0 |
2894 						CRB_NIU_XG_PAUSE_CTL_P1);
2895 			}
2896 			set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2897 			qla4xxx_wake_dpc(ha);
2898 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2899 			   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2900 
2901 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
2902 				   __func__);
2903 
2904 			if (is_qla8032(ha)) {
2905 				idc_ctrl = qla4_83xx_rd_reg(ha,
2906 							QLA83XX_IDC_DRV_CTRL);
2907 				if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
2908 					ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
2909 						   __func__);
2910 					qla4xxx_mailbox_premature_completion(
2911 									    ha);
2912 				}
2913 			}
2914 
2915 			if (is_qla8032(ha) ||
2916 			    (is_qla8022(ha) && !ql4xdontresethba)) {
2917 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
2918 				qla4xxx_wake_dpc(ha);
2919 			}
2920 		} else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2921 		    !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2922 			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2923 			    __func__);
2924 			set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags);
2925 			qla4xxx_wake_dpc(ha);
2926 		} else  {
2927 			/* Check firmware health */
2928 			if (qla4_8xxx_check_fw_alive(ha))
2929 				qla4_8xxx_process_fw_error(ha);
2930 		}
2931 	}
2932 }
2933 
qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session * cls_sess)2934 static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
2935 {
2936 	struct iscsi_session *sess;
2937 	struct ddb_entry *ddb_entry;
2938 	struct scsi_qla_host *ha;
2939 
2940 	sess = cls_sess->dd_data;
2941 	ddb_entry = sess->dd_data;
2942 	ha = ddb_entry->ha;
2943 
2944 	if (!(ddb_entry->ddb_type == FLASH_DDB))
2945 		return;
2946 
2947 	if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
2948 	    !iscsi_is_session_online(cls_sess)) {
2949 		if (atomic_read(&ddb_entry->retry_relogin_timer) !=
2950 		    INVALID_ENTRY) {
2951 			if (atomic_read(&ddb_entry->retry_relogin_timer) ==
2952 					0) {
2953 				atomic_set(&ddb_entry->retry_relogin_timer,
2954 					   INVALID_ENTRY);
2955 				set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2956 				set_bit(DF_RELOGIN, &ddb_entry->flags);
2957 				DEBUG2(ql4_printk(KERN_INFO, ha,
2958 				       "%s: index [%d] login device\n",
2959 					__func__, ddb_entry->fw_ddb_index));
2960 			} else
2961 				atomic_dec(&ddb_entry->retry_relogin_timer);
2962 		}
2963 	}
2964 
2965 	/* Wait for relogin to timeout */
2966 	if (atomic_read(&ddb_entry->relogin_timer) &&
2967 	    (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
2968 		/*
2969 		 * If the relogin times out and the device is
2970 		 * still NOT ONLINE then try and relogin again.
2971 		 */
2972 		if (!iscsi_is_session_online(cls_sess)) {
2973 			/* Reset retry relogin timer */
2974 			atomic_inc(&ddb_entry->relogin_retry_count);
2975 			DEBUG2(ql4_printk(KERN_INFO, ha,
2976 				"%s: index[%d] relogin timed out-retrying"
2977 				" relogin (%d), retry (%d)\n", __func__,
2978 				ddb_entry->fw_ddb_index,
2979 				atomic_read(&ddb_entry->relogin_retry_count),
2980 				ddb_entry->default_time2wait + 4));
2981 			set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
2982 			atomic_set(&ddb_entry->retry_relogin_timer,
2983 				   ddb_entry->default_time2wait + 4);
2984 		}
2985 	}
2986 }
2987 
2988 /**
2989  * qla4xxx_timer - checks every second for work to do.
2990  * @ha: Pointer to host adapter structure.
2991  **/
qla4xxx_timer(struct scsi_qla_host * ha)2992 static void qla4xxx_timer(struct scsi_qla_host *ha)
2993 {
2994 	int start_dpc = 0;
2995 	uint16_t w;
2996 
2997 	iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
2998 
2999 	/* If we are in the middle of AER/EEH processing
3000 	 * skip any processing and reschedule the timer
3001 	 */
3002 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3003 		mod_timer(&ha->timer, jiffies + HZ);
3004 		return;
3005 	}
3006 
3007 	/* Hardware read to trigger an EEH error during mailbox waits. */
3008 	if (!pci_channel_offline(ha->pdev))
3009 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
3010 
3011 	if (is_qla80XX(ha))
3012 		qla4_8xxx_watchdog(ha);
3013 
3014 	if (is_qla40XX(ha)) {
3015 		/* Check for heartbeat interval. */
3016 		if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
3017 		    ha->heartbeat_interval != 0) {
3018 			ha->seconds_since_last_heartbeat++;
3019 			if (ha->seconds_since_last_heartbeat >
3020 			    ha->heartbeat_interval + 2)
3021 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
3022 		}
3023 	}
3024 
3025 	/* Process any deferred work. */
3026 	if (!list_empty(&ha->work_list))
3027 		start_dpc++;
3028 
3029 	/* Wakeup the dpc routine for this adapter, if needed. */
3030 	if (start_dpc ||
3031 	     test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3032 	     test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
3033 	     test_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags) ||
3034 	     test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3035 	     test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3036 	     test_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags) ||
3037 	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
3038 	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
3039 	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
3040 	     test_bit(DPC_AEN, &ha->dpc_flags)) {
3041 		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
3042 			      " - dpc flags = 0x%lx\n",
3043 			      ha->host_no, __func__, ha->dpc_flags));
3044 		qla4xxx_wake_dpc(ha);
3045 	}
3046 
3047 	/* Reschedule timer thread to call us back in one second */
3048 	mod_timer(&ha->timer, jiffies + HZ);
3049 
3050 	DEBUG2(ha->seconds_since_last_intr++);
3051 }
3052 
3053 /**
3054  * qla4xxx_cmd_wait - waits for all outstanding commands to complete
3055  * @ha: Pointer to host adapter structure.
3056  *
3057  * This routine stalls the driver until all outstanding commands are returned.
3058  * Caller must release the Hardware Lock prior to calling this routine.
3059  **/
qla4xxx_cmd_wait(struct scsi_qla_host * ha)3060 static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
3061 {
3062 	uint32_t index = 0;
3063 	unsigned long flags;
3064 	struct scsi_cmnd *cmd;
3065 
3066 	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ);
3067 
3068 	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to "
3069 	    "complete\n", WAIT_CMD_TOV));
3070 
3071 	while (!time_after_eq(jiffies, wtime)) {
3072 		spin_lock_irqsave(&ha->hardware_lock, flags);
3073 		/* Find a command that hasn't completed. */
3074 		for (index = 0; index < ha->host->can_queue; index++) {
3075 			cmd = scsi_host_find_tag(ha->host, index);
3076 			/*
3077 			 * We cannot just check if the index is valid,
3078 			 * becase if we are run from the scsi eh, then
3079 			 * the scsi/block layer is going to prevent
3080 			 * the tag from being released.
3081 			 */
3082 			if (cmd != NULL && CMD_SP(cmd))
3083 				break;
3084 		}
3085 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3086 
3087 		/* If No Commands are pending, wait is complete */
3088 		if (index == ha->host->can_queue)
3089 			return QLA_SUCCESS;
3090 
3091 		msleep(1000);
3092 	}
3093 	/* If we timed out on waiting for commands to come back
3094 	 * return ERROR. */
3095 	return QLA_ERROR;
3096 }
3097 
qla4xxx_hw_reset(struct scsi_qla_host * ha)3098 int qla4xxx_hw_reset(struct scsi_qla_host *ha)
3099 {
3100 	uint32_t ctrl_status;
3101 	unsigned long flags = 0;
3102 
3103 	DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
3104 
3105 	if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
3106 		return QLA_ERROR;
3107 
3108 	spin_lock_irqsave(&ha->hardware_lock, flags);
3109 
3110 	/*
3111 	 * If the SCSI Reset Interrupt bit is set, clear it.
3112 	 * Otherwise, the Soft Reset won't work.
3113 	 */
3114 	ctrl_status = readw(&ha->reg->ctrl_status);
3115 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0)
3116 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3117 
3118 	/* Issue Soft Reset */
3119 	writel(set_rmask(CSR_SOFT_RESET), &ha->reg->ctrl_status);
3120 	readl(&ha->reg->ctrl_status);
3121 
3122 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3123 	return QLA_SUCCESS;
3124 }
3125 
3126 /**
3127  * qla4xxx_soft_reset - performs soft reset.
3128  * @ha: Pointer to host adapter structure.
3129  **/
qla4xxx_soft_reset(struct scsi_qla_host * ha)3130 int qla4xxx_soft_reset(struct scsi_qla_host *ha)
3131 {
3132 	uint32_t max_wait_time;
3133 	unsigned long flags = 0;
3134 	int status;
3135 	uint32_t ctrl_status;
3136 
3137 	status = qla4xxx_hw_reset(ha);
3138 	if (status != QLA_SUCCESS)
3139 		return status;
3140 
3141 	status = QLA_ERROR;
3142 	/* Wait until the Network Reset Intr bit is cleared */
3143 	max_wait_time = RESET_INTR_TOV;
3144 	do {
3145 		spin_lock_irqsave(&ha->hardware_lock, flags);
3146 		ctrl_status = readw(&ha->reg->ctrl_status);
3147 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3148 
3149 		if ((ctrl_status & CSR_NET_RESET_INTR) == 0)
3150 			break;
3151 
3152 		msleep(1000);
3153 	} while ((--max_wait_time));
3154 
3155 	if ((ctrl_status & CSR_NET_RESET_INTR) != 0) {
3156 		DEBUG2(printk(KERN_WARNING
3157 			      "scsi%ld: Network Reset Intr not cleared by "
3158 			      "Network function, clearing it now!\n",
3159 			      ha->host_no));
3160 		spin_lock_irqsave(&ha->hardware_lock, flags);
3161 		writel(set_rmask(CSR_NET_RESET_INTR), &ha->reg->ctrl_status);
3162 		readl(&ha->reg->ctrl_status);
3163 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3164 	}
3165 
3166 	/* Wait until the firmware tells us the Soft Reset is done */
3167 	max_wait_time = SOFT_RESET_TOV;
3168 	do {
3169 		spin_lock_irqsave(&ha->hardware_lock, flags);
3170 		ctrl_status = readw(&ha->reg->ctrl_status);
3171 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3172 
3173 		if ((ctrl_status & CSR_SOFT_RESET) == 0) {
3174 			status = QLA_SUCCESS;
3175 			break;
3176 		}
3177 
3178 		msleep(1000);
3179 	} while ((--max_wait_time));
3180 
3181 	/*
3182 	 * Also, make sure that the SCSI Reset Interrupt bit has been cleared
3183 	 * after the soft reset has taken place.
3184 	 */
3185 	spin_lock_irqsave(&ha->hardware_lock, flags);
3186 	ctrl_status = readw(&ha->reg->ctrl_status);
3187 	if ((ctrl_status & CSR_SCSI_RESET_INTR) != 0) {
3188 		writel(set_rmask(CSR_SCSI_RESET_INTR), &ha->reg->ctrl_status);
3189 		readl(&ha->reg->ctrl_status);
3190 	}
3191 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3192 
3193 	/* If soft reset fails then most probably the bios on other
3194 	 * function is also enabled.
3195 	 * Since the initialization is sequential the other fn
3196 	 * wont be able to acknowledge the soft reset.
3197 	 * Issue a force soft reset to workaround this scenario.
3198 	 */
3199 	if (max_wait_time == 0) {
3200 		/* Issue Force Soft Reset */
3201 		spin_lock_irqsave(&ha->hardware_lock, flags);
3202 		writel(set_rmask(CSR_FORCE_SOFT_RESET), &ha->reg->ctrl_status);
3203 		readl(&ha->reg->ctrl_status);
3204 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
3205 		/* Wait until the firmware tells us the Soft Reset is done */
3206 		max_wait_time = SOFT_RESET_TOV;
3207 		do {
3208 			spin_lock_irqsave(&ha->hardware_lock, flags);
3209 			ctrl_status = readw(&ha->reg->ctrl_status);
3210 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
3211 
3212 			if ((ctrl_status & CSR_FORCE_SOFT_RESET) == 0) {
3213 				status = QLA_SUCCESS;
3214 				break;
3215 			}
3216 
3217 			msleep(1000);
3218 		} while ((--max_wait_time));
3219 	}
3220 
3221 	return status;
3222 }
3223 
3224 /**
3225  * qla4xxx_abort_active_cmds - returns all outstanding i/o requests to O.S.
3226  * @ha: Pointer to host adapter structure.
3227  * @res: returned scsi status
3228  *
3229  * This routine is called just prior to a HARD RESET to return all
3230  * outstanding commands back to the Operating System.
3231  * Caller should make sure that the following locks are released
3232  * before this calling routine: Hardware lock, and io_request_lock.
3233  **/
qla4xxx_abort_active_cmds(struct scsi_qla_host * ha,int res)3234 static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha, int res)
3235 {
3236 	struct srb *srb;
3237 	int i;
3238 	unsigned long flags;
3239 
3240 	spin_lock_irqsave(&ha->hardware_lock, flags);
3241 	for (i = 0; i < ha->host->can_queue; i++) {
3242 		srb = qla4xxx_del_from_active_array(ha, i);
3243 		if (srb != NULL) {
3244 			srb->cmd->result = res;
3245 			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
3246 		}
3247 	}
3248 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3249 }
3250 
qla4xxx_dead_adapter_cleanup(struct scsi_qla_host * ha)3251 void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
3252 {
3253 	clear_bit(AF_ONLINE, &ha->flags);
3254 
3255 	/* Disable the board */
3256 	ql4_printk(KERN_INFO, ha, "Disabling the board\n");
3257 
3258 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3259 	qla4xxx_mark_all_devices_missing(ha);
3260 	clear_bit(AF_INIT_DONE, &ha->flags);
3261 }
3262 
qla4xxx_fail_session(struct iscsi_cls_session * cls_session)3263 static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
3264 {
3265 	struct iscsi_session *sess;
3266 	struct ddb_entry *ddb_entry;
3267 
3268 	sess = cls_session->dd_data;
3269 	ddb_entry = sess->dd_data;
3270 	ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
3271 
3272 	if (ddb_entry->ddb_type == FLASH_DDB)
3273 		iscsi_block_session(ddb_entry->sess);
3274 	else
3275 		iscsi_session_failure(cls_session->dd_data,
3276 				      ISCSI_ERR_CONN_FAILED);
3277 }
3278 
3279 /**
3280  * qla4xxx_recover_adapter - recovers adapter after a fatal error
3281  * @ha: Pointer to host adapter structure.
3282  **/
qla4xxx_recover_adapter(struct scsi_qla_host * ha)3283 static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3284 {
3285 	int status = QLA_ERROR;
3286 	uint8_t reset_chip = 0;
3287 	uint32_t dev_state;
3288 	unsigned long wait;
3289 
3290 	/* Stall incoming I/O until we are done */
3291 	scsi_block_requests(ha->host);
3292 	clear_bit(AF_ONLINE, &ha->flags);
3293 	clear_bit(AF_LINK_UP, &ha->flags);
3294 
3295 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
3296 
3297 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3298 
3299 	if (is_qla8032(ha) &&
3300 	    !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3301 		ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3302 			   __func__);
3303 		/* disable pause frame for ISP83xx */
3304 		qla4_83xx_disable_pause(ha);
3305 	}
3306 
3307 	iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
3308 
3309 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3310 		reset_chip = 1;
3311 
3312 	/* For the DPC_RESET_HA_INTR case (ISP-4xxx specific)
3313 	 * do not reset adapter, jump to initialize_adapter */
3314 	if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3315 		status = QLA_SUCCESS;
3316 		goto recover_ha_init_adapter;
3317 	}
3318 
3319 	/* For the ISP-8xxx adapter, issue a stop_firmware if invoked
3320 	 * from eh_host_reset or ioctl module */
3321 	if (is_qla80XX(ha) && !reset_chip &&
3322 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
3323 
3324 		DEBUG2(ql4_printk(KERN_INFO, ha,
3325 		    "scsi%ld: %s - Performing stop_firmware...\n",
3326 		    ha->host_no, __func__));
3327 		status = ha->isp_ops->reset_firmware(ha);
3328 		if (status == QLA_SUCCESS) {
3329 			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3330 				qla4xxx_cmd_wait(ha);
3331 
3332 			ha->isp_ops->disable_intrs(ha);
3333 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3334 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3335 		} else {
3336 			/* If the stop_firmware fails then
3337 			 * reset the entire chip */
3338 			reset_chip = 1;
3339 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3340 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
3341 		}
3342 	}
3343 
3344 	/* Issue full chip reset if recovering from a catastrophic error,
3345 	 * or if stop_firmware fails for ISP-8xxx.
3346 	 * This is the default case for ISP-4xxx */
3347 	if (is_qla40XX(ha) || reset_chip) {
3348 		if (is_qla40XX(ha))
3349 			goto chip_reset;
3350 
3351 		/* Check if 8XXX firmware is alive or not
3352 		 * We may have arrived here from NEED_RESET
3353 		 * detection only */
3354 		if (test_bit(AF_FW_RECOVERY, &ha->flags))
3355 			goto chip_reset;
3356 
3357 		wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
3358 		while (time_before(jiffies, wait)) {
3359 			if (qla4_8xxx_check_fw_alive(ha)) {
3360 				qla4xxx_mailbox_premature_completion(ha);
3361 				break;
3362 			}
3363 
3364 			set_current_state(TASK_UNINTERRUPTIBLE);
3365 			schedule_timeout(HZ);
3366 		}
3367 chip_reset:
3368 		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3369 			qla4xxx_cmd_wait(ha);
3370 
3371 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3372 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3373 		DEBUG2(ql4_printk(KERN_INFO, ha,
3374 		    "scsi%ld: %s - Performing chip reset..\n",
3375 		    ha->host_no, __func__));
3376 		status = ha->isp_ops->reset_chip(ha);
3377 	}
3378 
3379 	/* Flush any pending ddb changed AENs */
3380 	qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3381 
3382 recover_ha_init_adapter:
3383 	/* Upon successful firmware/chip reset, re-initialize the adapter */
3384 	if (status == QLA_SUCCESS) {
3385 		/* For ISP-4xxx, force function 1 to always initialize
3386 		 * before function 3 to prevent both funcions from
3387 		 * stepping on top of the other */
3388 		if (is_qla40XX(ha) && (ha->mac_index == 3))
3389 			ssleep(6);
3390 
3391 		/* NOTE: AF_ONLINE flag set upon successful completion of
3392 		 *       qla4xxx_initialize_adapter */
3393 		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
3394 	}
3395 
3396 	/* Retry failed adapter initialization, if necessary
3397 	 * Do not retry initialize_adapter for RESET_HA_INTR (ISP-4xxx specific)
3398 	 * case to prevent ping-pong resets between functions */
3399 	if (!test_bit(AF_ONLINE, &ha->flags) &&
3400 	    !test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3401 		/* Adapter initialization failed, see if we can retry
3402 		 * resetting the ha.
3403 		 * Since we don't want to block the DPC for too long
3404 		 * with multiple resets in the same thread,
3405 		 * utilize DPC to retry */
3406 		if (is_qla80XX(ha)) {
3407 			ha->isp_ops->idc_lock(ha);
3408 			dev_state = qla4_8xxx_rd_direct(ha,
3409 							QLA8XXX_CRB_DEV_STATE);
3410 			ha->isp_ops->idc_unlock(ha);
3411 			if (dev_state == QLA8XXX_DEV_FAILED) {
3412 				ql4_printk(KERN_INFO, ha, "%s: don't retry "
3413 					   "recover adapter. H/W is in Failed "
3414 					   "state\n", __func__);
3415 				qla4xxx_dead_adapter_cleanup(ha);
3416 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3417 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3418 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3419 						&ha->dpc_flags);
3420 				status = QLA_ERROR;
3421 
3422 				goto exit_recover;
3423 			}
3424 		}
3425 
3426 		if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
3427 			ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
3428 			DEBUG2(printk("scsi%ld: recover adapter - retrying "
3429 				      "(%d) more times\n", ha->host_no,
3430 				      ha->retry_reset_ha_cnt));
3431 			set_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3432 			status = QLA_ERROR;
3433 		} else {
3434 			if (ha->retry_reset_ha_cnt > 0) {
3435 				/* Schedule another Reset HA--DPC will retry */
3436 				ha->retry_reset_ha_cnt--;
3437 				DEBUG2(printk("scsi%ld: recover adapter - "
3438 					      "retry remaining %d\n",
3439 					      ha->host_no,
3440 					      ha->retry_reset_ha_cnt));
3441 				status = QLA_ERROR;
3442 			}
3443 
3444 			if (ha->retry_reset_ha_cnt == 0) {
3445 				/* Recover adapter retries have been exhausted.
3446 				 * Adapter DEAD */
3447 				DEBUG2(printk("scsi%ld: recover adapter "
3448 					      "failed - board disabled\n",
3449 					      ha->host_no));
3450 				qla4xxx_dead_adapter_cleanup(ha);
3451 				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3452 				clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3453 				clear_bit(DPC_RESET_HA_FW_CONTEXT,
3454 					  &ha->dpc_flags);
3455 				status = QLA_ERROR;
3456 			}
3457 		}
3458 	} else {
3459 		clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3460 		clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3461 		clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
3462 	}
3463 
3464 exit_recover:
3465 	ha->adapter_error_count++;
3466 
3467 	if (test_bit(AF_ONLINE, &ha->flags))
3468 		ha->isp_ops->enable_intrs(ha);
3469 
3470 	scsi_unblock_requests(ha->host);
3471 
3472 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
3473 	DEBUG2(printk("scsi%ld: recover adapter: %s\n", ha->host_no,
3474 	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));
3475 
3476 	return status;
3477 }
3478 
qla4xxx_relogin_devices(struct iscsi_cls_session * cls_session)3479 static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
3480 {
3481 	struct iscsi_session *sess;
3482 	struct ddb_entry *ddb_entry;
3483 	struct scsi_qla_host *ha;
3484 
3485 	sess = cls_session->dd_data;
3486 	ddb_entry = sess->dd_data;
3487 	ha = ddb_entry->ha;
3488 	if (!iscsi_is_session_online(cls_session)) {
3489 		if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
3490 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3491 				   " unblock session\n", ha->host_no, __func__,
3492 				   ddb_entry->fw_ddb_index);
3493 			iscsi_unblock_session(ddb_entry->sess);
3494 		} else {
3495 			/* Trigger relogin */
3496 			if (ddb_entry->ddb_type == FLASH_DDB) {
3497 				if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
3498 					qla4xxx_arm_relogin_timer(ddb_entry);
3499 			} else
3500 				iscsi_session_failure(cls_session->dd_data,
3501 						      ISCSI_ERR_CONN_FAILED);
3502 		}
3503 	}
3504 }
3505 
qla4xxx_unblock_flash_ddb(struct iscsi_cls_session * cls_session)3506 int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
3507 {
3508 	struct iscsi_session *sess;
3509 	struct ddb_entry *ddb_entry;
3510 	struct scsi_qla_host *ha;
3511 
3512 	sess = cls_session->dd_data;
3513 	ddb_entry = sess->dd_data;
3514 	ha = ddb_entry->ha;
3515 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3516 		   " unblock session\n", ha->host_no, __func__,
3517 		   ddb_entry->fw_ddb_index);
3518 
3519 	iscsi_unblock_session(ddb_entry->sess);
3520 
3521 	/* Start scan target */
3522 	if (test_bit(AF_ONLINE, &ha->flags)) {
3523 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3524 			   " start scan\n", ha->host_no, __func__,
3525 			   ddb_entry->fw_ddb_index);
3526 		scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
3527 	}
3528 	return QLA_SUCCESS;
3529 }
3530 
qla4xxx_unblock_ddb(struct iscsi_cls_session * cls_session)3531 int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3532 {
3533 	struct iscsi_session *sess;
3534 	struct ddb_entry *ddb_entry;
3535 	struct scsi_qla_host *ha;
3536 	int status = QLA_SUCCESS;
3537 
3538 	sess = cls_session->dd_data;
3539 	ddb_entry = sess->dd_data;
3540 	ha = ddb_entry->ha;
3541 	ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3542 		   " unblock user space session\n", ha->host_no, __func__,
3543 		   ddb_entry->fw_ddb_index);
3544 
3545 	if (!iscsi_is_session_online(cls_session)) {
3546 		iscsi_conn_start(ddb_entry->conn);
3547 		iscsi_conn_login_event(ddb_entry->conn,
3548 				       ISCSI_CONN_STATE_LOGGED_IN);
3549 	} else {
3550 		ql4_printk(KERN_INFO, ha,
3551 			   "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3552 			   ha->host_no, __func__, ddb_entry->fw_ddb_index,
3553 			   cls_session->sid);
3554 		status = QLA_ERROR;
3555 	}
3556 
3557 	return status;
3558 }
3559 
qla4xxx_relogin_all_devices(struct scsi_qla_host * ha)3560 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
3561 {
3562 	iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
3563 }
3564 
qla4xxx_relogin_flash_ddb(struct iscsi_cls_session * cls_sess)3565 static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
3566 {
3567 	uint16_t relogin_timer;
3568 	struct iscsi_session *sess;
3569 	struct ddb_entry *ddb_entry;
3570 	struct scsi_qla_host *ha;
3571 
3572 	sess = cls_sess->dd_data;
3573 	ddb_entry = sess->dd_data;
3574 	ha = ddb_entry->ha;
3575 
3576 	relogin_timer = max(ddb_entry->default_relogin_timeout,
3577 			    (uint16_t)RELOGIN_TOV);
3578 	atomic_set(&ddb_entry->relogin_timer, relogin_timer);
3579 
3580 	DEBUG2(ql4_printk(KERN_INFO, ha,
3581 			  "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
3582 			  ddb_entry->fw_ddb_index, relogin_timer));
3583 
3584 	qla4xxx_login_flash_ddb(cls_sess);
3585 }
3586 
qla4xxx_dpc_relogin(struct iscsi_cls_session * cls_sess)3587 static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
3588 {
3589 	struct iscsi_session *sess;
3590 	struct ddb_entry *ddb_entry;
3591 	struct scsi_qla_host *ha;
3592 
3593 	sess = cls_sess->dd_data;
3594 	ddb_entry = sess->dd_data;
3595 	ha = ddb_entry->ha;
3596 
3597 	if (!(ddb_entry->ddb_type == FLASH_DDB))
3598 		return;
3599 
3600 	if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
3601 	    !iscsi_is_session_online(cls_sess)) {
3602 		DEBUG2(ql4_printk(KERN_INFO, ha,
3603 				  "relogin issued\n"));
3604 		qla4xxx_relogin_flash_ddb(cls_sess);
3605 	}
3606 }
3607 
qla4xxx_wake_dpc(struct scsi_qla_host * ha)3608 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
3609 {
3610 	if (ha->dpc_thread)
3611 		queue_work(ha->dpc_thread, &ha->dpc_work);
3612 }
3613 
3614 static struct qla4_work_evt *
qla4xxx_alloc_work(struct scsi_qla_host * ha,uint32_t data_size,enum qla4_work_type type)3615 qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3616 		   enum qla4_work_type type)
3617 {
3618 	struct qla4_work_evt *e;
3619 	uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3620 
3621 	e = kzalloc(size, GFP_ATOMIC);
3622 	if (!e)
3623 		return NULL;
3624 
3625 	INIT_LIST_HEAD(&e->list);
3626 	e->type = type;
3627 	return e;
3628 }
3629 
qla4xxx_post_work(struct scsi_qla_host * ha,struct qla4_work_evt * e)3630 static void qla4xxx_post_work(struct scsi_qla_host *ha,
3631 			     struct qla4_work_evt *e)
3632 {
3633 	unsigned long flags;
3634 
3635 	spin_lock_irqsave(&ha->work_lock, flags);
3636 	list_add_tail(&e->list, &ha->work_list);
3637 	spin_unlock_irqrestore(&ha->work_lock, flags);
3638 	qla4xxx_wake_dpc(ha);
3639 }
3640 
qla4xxx_post_aen_work(struct scsi_qla_host * ha,enum iscsi_host_event_code aen_code,uint32_t data_size,uint8_t * data)3641 int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3642 			  enum iscsi_host_event_code aen_code,
3643 			  uint32_t data_size, uint8_t *data)
3644 {
3645 	struct qla4_work_evt *e;
3646 
3647 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3648 	if (!e)
3649 		return QLA_ERROR;
3650 
3651 	e->u.aen.code = aen_code;
3652 	e->u.aen.data_size = data_size;
3653 	memcpy(e->u.aen.data, data, data_size);
3654 
3655 	qla4xxx_post_work(ha, e);
3656 
3657 	return QLA_SUCCESS;
3658 }
3659 
qla4xxx_post_ping_evt_work(struct scsi_qla_host * ha,uint32_t status,uint32_t pid,uint32_t data_size,uint8_t * data)3660 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3661 			       uint32_t status, uint32_t pid,
3662 			       uint32_t data_size, uint8_t *data)
3663 {
3664 	struct qla4_work_evt *e;
3665 
3666 	e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3667 	if (!e)
3668 		return QLA_ERROR;
3669 
3670 	e->u.ping.status = status;
3671 	e->u.ping.pid = pid;
3672 	e->u.ping.data_size = data_size;
3673 	memcpy(e->u.ping.data, data, data_size);
3674 
3675 	qla4xxx_post_work(ha, e);
3676 
3677 	return QLA_SUCCESS;
3678 }
3679 
qla4xxx_do_work(struct scsi_qla_host * ha)3680 static void qla4xxx_do_work(struct scsi_qla_host *ha)
3681 {
3682 	struct qla4_work_evt *e, *tmp;
3683 	unsigned long flags;
3684 	LIST_HEAD(work);
3685 
3686 	spin_lock_irqsave(&ha->work_lock, flags);
3687 	list_splice_init(&ha->work_list, &work);
3688 	spin_unlock_irqrestore(&ha->work_lock, flags);
3689 
3690 	list_for_each_entry_safe(e, tmp, &work, list) {
3691 		list_del_init(&e->list);
3692 
3693 		switch (e->type) {
3694 		case QLA4_EVENT_AEN:
3695 			iscsi_post_host_event(ha->host_no,
3696 					      &qla4xxx_iscsi_transport,
3697 					      e->u.aen.code,
3698 					      e->u.aen.data_size,
3699 					      e->u.aen.data);
3700 			break;
3701 		case QLA4_EVENT_PING_STATUS:
3702 			iscsi_ping_comp_event(ha->host_no,
3703 					      &qla4xxx_iscsi_transport,
3704 					      e->u.ping.status,
3705 					      e->u.ping.pid,
3706 					      e->u.ping.data_size,
3707 					      e->u.ping.data);
3708 			break;
3709 		default:
3710 			ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3711 				   "supported", e->type);
3712 		}
3713 		kfree(e);
3714 	}
3715 }
3716 
3717 /**
3718  * qla4xxx_do_dpc - dpc routine
3719  * @data: in our case pointer to adapter structure
3720  *
3721  * This routine is a task that is schedule by the interrupt handler
3722  * to perform the background processing for interrupts.  We put it
3723  * on a task queue that is consumed whenever the scheduler runs; that's
3724  * so you can do anything (i.e. put the process to sleep etc).  In fact,
3725  * the mid-level tries to sleep when it reaches the driver threshold
3726  * "host->can_queue". This can cause a panic if we were in our interrupt code.
3727  **/
qla4xxx_do_dpc(struct work_struct * work)3728 static void qla4xxx_do_dpc(struct work_struct *work)
3729 {
3730 	struct scsi_qla_host *ha =
3731 		container_of(work, struct scsi_qla_host, dpc_work);
3732 	int status = QLA_ERROR;
3733 
3734 	DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
3735 	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
3736 	    ha->host_no, __func__, ha->flags, ha->dpc_flags))
3737 
3738 	/* Initialization not yet finished. Don't do anything yet. */
3739 	if (!test_bit(AF_INIT_DONE, &ha->flags))
3740 		return;
3741 
3742 	if (test_bit(AF_EEH_BUSY, &ha->flags)) {
3743 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: flags = %lx\n",
3744 		    ha->host_no, __func__, ha->flags));
3745 		return;
3746 	}
3747 
3748 	/* post events to application */
3749 	qla4xxx_do_work(ha);
3750 
3751 	if (is_qla80XX(ha)) {
3752 		if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3753 			if (is_qla8032(ha)) {
3754 				ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3755 					   __func__);
3756 				/* disable pause frame for ISP83xx */
3757 				qla4_83xx_disable_pause(ha);
3758 			}
3759 
3760 			ha->isp_ops->idc_lock(ha);
3761 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3762 					    QLA8XXX_DEV_FAILED);
3763 			ha->isp_ops->idc_unlock(ha);
3764 			ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3765 			qla4_8xxx_device_state_handler(ha);
3766 		}
3767 
3768 		if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
3769 			qla4_83xx_post_idc_ack(ha);
3770 
3771 		if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3772 			qla4_8xxx_need_qsnt_handler(ha);
3773 		}
3774 	}
3775 
3776 	if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) &&
3777 	    (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3778 	    test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3779 	    test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3780 		if ((is_qla8022(ha) && ql4xdontresethba) ||
3781 		    (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
3782 			DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3783 			    ha->host_no, __func__));
3784 			clear_bit(DPC_RESET_HA, &ha->dpc_flags);
3785 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3786 			clear_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
3787 			goto dpc_post_reset_ha;
3788 		}
3789 		if (test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
3790 		    test_bit(DPC_RESET_HA, &ha->dpc_flags))
3791 			qla4xxx_recover_adapter(ha);
3792 
3793 		if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
3794 			uint8_t wait_time = RESET_INTR_TOV;
3795 
3796 			while ((readw(&ha->reg->ctrl_status) &
3797 				(CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
3798 				if (--wait_time == 0)
3799 					break;
3800 				msleep(1000);
3801 			}
3802 			if (wait_time == 0)
3803 				DEBUG2(printk("scsi%ld: %s: SR|FSR "
3804 					      "bit not cleared-- resetting\n",
3805 					      ha->host_no, __func__));
3806 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3807 			if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
3808 				qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3809 				status = qla4xxx_recover_adapter(ha);
3810 			}
3811 			clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
3812 			if (status == QLA_SUCCESS)
3813 				ha->isp_ops->enable_intrs(ha);
3814 		}
3815 	}
3816 
3817 dpc_post_reset_ha:
3818 	/* ---- process AEN? --- */
3819 	if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
3820 		qla4xxx_process_aen(ha, PROCESS_ALL_AENS);
3821 
3822 	/* ---- Get DHCP IP Address? --- */
3823 	if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
3824 		qla4xxx_get_dhcp_ip_address(ha);
3825 
3826 	/* ---- relogin device? --- */
3827 	if (adapter_up(ha) &&
3828 	    test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
3829 		iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
3830 	}
3831 
3832 	/* ---- link change? --- */
3833 	if (!test_bit(AF_LOOPBACK, &ha->flags) &&
3834 	    test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
3835 		if (!test_bit(AF_LINK_UP, &ha->flags)) {
3836 			/* ---- link down? --- */
3837 			qla4xxx_mark_all_devices_missing(ha);
3838 		} else {
3839 			/* ---- link up? --- *
3840 			 * F/W will auto login to all devices ONLY ONCE after
3841 			 * link up during driver initialization and runtime
3842 			 * fatal error recovery.  Therefore, the driver must
3843 			 * manually relogin to devices when recovering from
3844 			 * connection failures, logouts, expired KATO, etc. */
3845 			if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
3846 				qla4xxx_build_ddb_list(ha, ha->is_reset);
3847 				iscsi_host_for_each_session(ha->host,
3848 						qla4xxx_login_flash_ddb);
3849 			} else
3850 				qla4xxx_relogin_all_devices(ha);
3851 		}
3852 	}
3853 }
3854 
3855 /**
3856  * qla4xxx_free_adapter - release the adapter
3857  * @ha: pointer to adapter structure
3858  **/
qla4xxx_free_adapter(struct scsi_qla_host * ha)3859 static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3860 {
3861 	qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
3862 
3863 	/* Turn-off interrupts on the card. */
3864 	ha->isp_ops->disable_intrs(ha);
3865 
3866 	if (is_qla40XX(ha)) {
3867 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3868 		       &ha->reg->ctrl_status);
3869 		readl(&ha->reg->ctrl_status);
3870 	} else if (is_qla8022(ha)) {
3871 		writel(0, &ha->qla4_82xx_reg->host_int);
3872 		readl(&ha->qla4_82xx_reg->host_int);
3873 	} else if (is_qla8032(ha)) {
3874 		writel(0, &ha->qla4_83xx_reg->risc_intr);
3875 		readl(&ha->qla4_83xx_reg->risc_intr);
3876 	}
3877 
3878 	/* Remove timer thread, if present */
3879 	if (ha->timer_active)
3880 		qla4xxx_stop_timer(ha);
3881 
3882 	/* Kill the kernel thread for this host */
3883 	if (ha->dpc_thread)
3884 		destroy_workqueue(ha->dpc_thread);
3885 
3886 	/* Kill the kernel thread for this host */
3887 	if (ha->task_wq)
3888 		destroy_workqueue(ha->task_wq);
3889 
3890 	/* Put firmware in known state */
3891 	ha->isp_ops->reset_firmware(ha);
3892 
3893 	if (is_qla80XX(ha)) {
3894 		ha->isp_ops->idc_lock(ha);
3895 		qla4_8xxx_clear_drv_active(ha);
3896 		ha->isp_ops->idc_unlock(ha);
3897 	}
3898 
3899 	/* Detach interrupts */
3900 	qla4xxx_free_irqs(ha);
3901 
3902 	/* free extra memory */
3903 	qla4xxx_mem_free(ha);
3904 }
3905 
qla4_8xxx_iospace_config(struct scsi_qla_host * ha)3906 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3907 {
3908 	int status = 0;
3909 	unsigned long mem_base, mem_len, db_base, db_len;
3910 	struct pci_dev *pdev = ha->pdev;
3911 
3912 	status = pci_request_regions(pdev, DRIVER_NAME);
3913 	if (status) {
3914 		printk(KERN_WARNING
3915 		    "scsi(%ld) Failed to reserve PIO regions (%s) "
3916 		    "status=%d\n", ha->host_no, pci_name(pdev), status);
3917 		goto iospace_error_exit;
3918 	}
3919 
3920 	DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
3921 	    __func__, pdev->revision));
3922 	ha->revision_id = pdev->revision;
3923 
3924 	/* remap phys address */
3925 	mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
3926 	mem_len = pci_resource_len(pdev, 0);
3927 	DEBUG2(printk(KERN_INFO "%s: ioremap from %lx a size of %lx\n",
3928 	    __func__, mem_base, mem_len));
3929 
3930 	/* mapping of pcibase pointer */
3931 	ha->nx_pcibase = (unsigned long)ioremap(mem_base, mem_len);
3932 	if (!ha->nx_pcibase) {
3933 		printk(KERN_ERR
3934 		    "cannot remap MMIO (%s), aborting\n", pci_name(pdev));
3935 		pci_release_regions(ha->pdev);
3936 		goto iospace_error_exit;
3937 	}
3938 
3939 	/* Mapping of IO base pointer, door bell read and write pointer */
3940 
3941 	/* mapping of IO base pointer */
3942 	if (is_qla8022(ha)) {
3943 		ha->qla4_82xx_reg = (struct device_reg_82xx  __iomem *)
3944 				    ((uint8_t *)ha->nx_pcibase + 0xbc000 +
3945 				     (ha->pdev->devfn << 11));
3946 		ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3947 				    QLA82XX_CAM_RAM_DB2);
3948 	} else if (is_qla8032(ha)) {
3949 		ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
3950 				    ((uint8_t *)ha->nx_pcibase);
3951 	}
3952 
3953 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
3954 	db_len = pci_resource_len(pdev, 4);
3955 
3956 	return 0;
3957 iospace_error_exit:
3958 	return -ENOMEM;
3959 }
3960 
3961 /***
3962  * qla4xxx_iospace_config - maps registers
3963  * @ha: pointer to adapter structure
3964  *
3965  * This routines maps HBA's registers from the pci address space
3966  * into the kernel virtual address space for memory mapped i/o.
3967  **/
qla4xxx_iospace_config(struct scsi_qla_host * ha)3968 int qla4xxx_iospace_config(struct scsi_qla_host *ha)
3969 {
3970 	unsigned long pio, pio_len, pio_flags;
3971 	unsigned long mmio, mmio_len, mmio_flags;
3972 
3973 	pio = pci_resource_start(ha->pdev, 0);
3974 	pio_len = pci_resource_len(ha->pdev, 0);
3975 	pio_flags = pci_resource_flags(ha->pdev, 0);
3976 	if (pio_flags & IORESOURCE_IO) {
3977 		if (pio_len < MIN_IOBASE_LEN) {
3978 			ql4_printk(KERN_WARNING, ha,
3979 				"Invalid PCI I/O region size\n");
3980 			pio = 0;
3981 		}
3982 	} else {
3983 		ql4_printk(KERN_WARNING, ha, "region #0 not a PIO resource\n");
3984 		pio = 0;
3985 	}
3986 
3987 	/* Use MMIO operations for all accesses. */
3988 	mmio = pci_resource_start(ha->pdev, 1);
3989 	mmio_len = pci_resource_len(ha->pdev, 1);
3990 	mmio_flags = pci_resource_flags(ha->pdev, 1);
3991 
3992 	if (!(mmio_flags & IORESOURCE_MEM)) {
3993 		ql4_printk(KERN_ERR, ha,
3994 		    "region #0 not an MMIO resource, aborting\n");
3995 
3996 		goto iospace_error_exit;
3997 	}
3998 
3999 	if (mmio_len < MIN_IOBASE_LEN) {
4000 		ql4_printk(KERN_ERR, ha,
4001 		    "Invalid PCI mem region size, aborting\n");
4002 		goto iospace_error_exit;
4003 	}
4004 
4005 	if (pci_request_regions(ha->pdev, DRIVER_NAME)) {
4006 		ql4_printk(KERN_WARNING, ha,
4007 		    "Failed to reserve PIO/MMIO regions\n");
4008 
4009 		goto iospace_error_exit;
4010 	}
4011 
4012 	ha->pio_address = pio;
4013 	ha->pio_length = pio_len;
4014 	ha->reg = ioremap(mmio, MIN_IOBASE_LEN);
4015 	if (!ha->reg) {
4016 		ql4_printk(KERN_ERR, ha,
4017 		    "cannot remap MMIO, aborting\n");
4018 
4019 		goto iospace_error_exit;
4020 	}
4021 
4022 	return 0;
4023 
4024 iospace_error_exit:
4025 	return -ENOMEM;
4026 }
4027 
4028 static struct isp_operations qla4xxx_isp_ops = {
4029 	.iospace_config         = qla4xxx_iospace_config,
4030 	.pci_config             = qla4xxx_pci_config,
4031 	.disable_intrs          = qla4xxx_disable_intrs,
4032 	.enable_intrs           = qla4xxx_enable_intrs,
4033 	.start_firmware         = qla4xxx_start_firmware,
4034 	.intr_handler           = qla4xxx_intr_handler,
4035 	.interrupt_service_routine = qla4xxx_interrupt_service_routine,
4036 	.reset_chip             = qla4xxx_soft_reset,
4037 	.reset_firmware         = qla4xxx_hw_reset,
4038 	.queue_iocb             = qla4xxx_queue_iocb,
4039 	.complete_iocb          = qla4xxx_complete_iocb,
4040 	.rd_shdw_req_q_out      = qla4xxx_rd_shdw_req_q_out,
4041 	.rd_shdw_rsp_q_in       = qla4xxx_rd_shdw_rsp_q_in,
4042 	.get_sys_info           = qla4xxx_get_sys_info,
4043 	.queue_mailbox_command	= qla4xxx_queue_mbox_cmd,
4044 	.process_mailbox_interrupt = qla4xxx_process_mbox_intr,
4045 };
4046 
4047 static struct isp_operations qla4_82xx_isp_ops = {
4048 	.iospace_config         = qla4_8xxx_iospace_config,
4049 	.pci_config             = qla4_8xxx_pci_config,
4050 	.disable_intrs          = qla4_82xx_disable_intrs,
4051 	.enable_intrs           = qla4_82xx_enable_intrs,
4052 	.start_firmware         = qla4_8xxx_load_risc,
4053 	.restart_firmware	= qla4_82xx_try_start_fw,
4054 	.intr_handler           = qla4_82xx_intr_handler,
4055 	.interrupt_service_routine = qla4_82xx_interrupt_service_routine,
4056 	.need_reset		= qla4_8xxx_need_reset,
4057 	.reset_chip             = qla4_82xx_isp_reset,
4058 	.reset_firmware         = qla4_8xxx_stop_firmware,
4059 	.queue_iocb             = qla4_82xx_queue_iocb,
4060 	.complete_iocb          = qla4_82xx_complete_iocb,
4061 	.rd_shdw_req_q_out      = qla4_82xx_rd_shdw_req_q_out,
4062 	.rd_shdw_rsp_q_in       = qla4_82xx_rd_shdw_rsp_q_in,
4063 	.get_sys_info           = qla4_8xxx_get_sys_info,
4064 	.rd_reg_direct		= qla4_82xx_rd_32,
4065 	.wr_reg_direct		= qla4_82xx_wr_32,
4066 	.rd_reg_indirect	= qla4_82xx_md_rd_32,
4067 	.wr_reg_indirect	= qla4_82xx_md_wr_32,
4068 	.idc_lock		= qla4_82xx_idc_lock,
4069 	.idc_unlock		= qla4_82xx_idc_unlock,
4070 	.rom_lock_recovery	= qla4_82xx_rom_lock_recovery,
4071 	.queue_mailbox_command	= qla4_82xx_queue_mbox_cmd,
4072 	.process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
4073 };
4074 
4075 static struct isp_operations qla4_83xx_isp_ops = {
4076 	.iospace_config		= qla4_8xxx_iospace_config,
4077 	.pci_config		= qla4_8xxx_pci_config,
4078 	.disable_intrs		= qla4_83xx_disable_intrs,
4079 	.enable_intrs		= qla4_83xx_enable_intrs,
4080 	.start_firmware		= qla4_8xxx_load_risc,
4081 	.restart_firmware	= qla4_83xx_start_firmware,
4082 	.intr_handler		= qla4_83xx_intr_handler,
4083 	.interrupt_service_routine = qla4_83xx_interrupt_service_routine,
4084 	.need_reset		= qla4_8xxx_need_reset,
4085 	.reset_chip		= qla4_83xx_isp_reset,
4086 	.reset_firmware		= qla4_8xxx_stop_firmware,
4087 	.queue_iocb		= qla4_83xx_queue_iocb,
4088 	.complete_iocb		= qla4_83xx_complete_iocb,
4089 	.rd_shdw_req_q_out	= qla4xxx_rd_shdw_req_q_out,
4090 	.rd_shdw_rsp_q_in	= qla4xxx_rd_shdw_rsp_q_in,
4091 	.get_sys_info		= qla4_8xxx_get_sys_info,
4092 	.rd_reg_direct		= qla4_83xx_rd_reg,
4093 	.wr_reg_direct		= qla4_83xx_wr_reg,
4094 	.rd_reg_indirect	= qla4_83xx_rd_reg_indirect,
4095 	.wr_reg_indirect	= qla4_83xx_wr_reg_indirect,
4096 	.idc_lock		= qla4_83xx_drv_lock,
4097 	.idc_unlock		= qla4_83xx_drv_unlock,
4098 	.rom_lock_recovery	= qla4_83xx_rom_lock_recovery,
4099 	.queue_mailbox_command	= qla4_83xx_queue_mbox_cmd,
4100 	.process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
4101 };
4102 
qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host * ha)4103 uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4104 {
4105 	return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
4106 }
4107 
qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host * ha)4108 uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
4109 {
4110 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
4111 }
4112 
qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host * ha)4113 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4114 {
4115 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
4116 }
4117 
qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host * ha)4118 uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
4119 {
4120 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
4121 }
4122 
qla4xxx_show_boot_eth_info(void * data,int type,char * buf)4123 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
4124 {
4125 	struct scsi_qla_host *ha = data;
4126 	char *str = buf;
4127 	int rc;
4128 
4129 	switch (type) {
4130 	case ISCSI_BOOT_ETH_FLAGS:
4131 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4132 		break;
4133 	case ISCSI_BOOT_ETH_INDEX:
4134 		rc = sprintf(str, "0\n");
4135 		break;
4136 	case ISCSI_BOOT_ETH_MAC:
4137 		rc = sysfs_format_mac(str, ha->my_mac,
4138 				      MAC_ADDR_LEN);
4139 		break;
4140 	default:
4141 		rc = -ENOSYS;
4142 		break;
4143 	}
4144 	return rc;
4145 }
4146 
qla4xxx_eth_get_attr_visibility(void * data,int type)4147 static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
4148 {
4149 	int rc;
4150 
4151 	switch (type) {
4152 	case ISCSI_BOOT_ETH_FLAGS:
4153 	case ISCSI_BOOT_ETH_MAC:
4154 	case ISCSI_BOOT_ETH_INDEX:
4155 		rc = S_IRUGO;
4156 		break;
4157 	default:
4158 		rc = 0;
4159 		break;
4160 	}
4161 	return rc;
4162 }
4163 
qla4xxx_show_boot_ini_info(void * data,int type,char * buf)4164 static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
4165 {
4166 	struct scsi_qla_host *ha = data;
4167 	char *str = buf;
4168 	int rc;
4169 
4170 	switch (type) {
4171 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4172 		rc = sprintf(str, "%s\n", ha->name_string);
4173 		break;
4174 	default:
4175 		rc = -ENOSYS;
4176 		break;
4177 	}
4178 	return rc;
4179 }
4180 
qla4xxx_ini_get_attr_visibility(void * data,int type)4181 static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
4182 {
4183 	int rc;
4184 
4185 	switch (type) {
4186 	case ISCSI_BOOT_INI_INITIATOR_NAME:
4187 		rc = S_IRUGO;
4188 		break;
4189 	default:
4190 		rc = 0;
4191 		break;
4192 	}
4193 	return rc;
4194 }
4195 
4196 static ssize_t
qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info * boot_sess,int type,char * buf)4197 qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
4198 			   char *buf)
4199 {
4200 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4201 	char *str = buf;
4202 	int rc;
4203 
4204 	switch (type) {
4205 	case ISCSI_BOOT_TGT_NAME:
4206 		rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
4207 		break;
4208 	case ISCSI_BOOT_TGT_IP_ADDR:
4209 		if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
4210 			rc = sprintf(buf, "%pI4\n",
4211 				     &boot_conn->dest_ipaddr.ip_address);
4212 		else
4213 			rc = sprintf(str, "%pI6\n",
4214 				     &boot_conn->dest_ipaddr.ip_address);
4215 		break;
4216 	case ISCSI_BOOT_TGT_PORT:
4217 			rc = sprintf(str, "%d\n", boot_conn->dest_port);
4218 		break;
4219 	case ISCSI_BOOT_TGT_CHAP_NAME:
4220 		rc = sprintf(str,  "%.*s\n",
4221 			     boot_conn->chap.target_chap_name_length,
4222 			     (char *)&boot_conn->chap.target_chap_name);
4223 		break;
4224 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4225 		rc = sprintf(str,  "%.*s\n",
4226 			     boot_conn->chap.target_secret_length,
4227 			     (char *)&boot_conn->chap.target_secret);
4228 		break;
4229 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4230 		rc = sprintf(str,  "%.*s\n",
4231 			     boot_conn->chap.intr_chap_name_length,
4232 			     (char *)&boot_conn->chap.intr_chap_name);
4233 		break;
4234 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4235 		rc = sprintf(str,  "%.*s\n",
4236 			     boot_conn->chap.intr_secret_length,
4237 			     (char *)&boot_conn->chap.intr_secret);
4238 		break;
4239 	case ISCSI_BOOT_TGT_FLAGS:
4240 		rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
4241 		break;
4242 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4243 		rc = sprintf(str, "0\n");
4244 		break;
4245 	default:
4246 		rc = -ENOSYS;
4247 		break;
4248 	}
4249 	return rc;
4250 }
4251 
qla4xxx_show_boot_tgt_pri_info(void * data,int type,char * buf)4252 static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
4253 {
4254 	struct scsi_qla_host *ha = data;
4255 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
4256 
4257 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4258 }
4259 
qla4xxx_show_boot_tgt_sec_info(void * data,int type,char * buf)4260 static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
4261 {
4262 	struct scsi_qla_host *ha = data;
4263 	struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
4264 
4265 	return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
4266 }
4267 
qla4xxx_tgt_get_attr_visibility(void * data,int type)4268 static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
4269 {
4270 	int rc;
4271 
4272 	switch (type) {
4273 	case ISCSI_BOOT_TGT_NAME:
4274 	case ISCSI_BOOT_TGT_IP_ADDR:
4275 	case ISCSI_BOOT_TGT_PORT:
4276 	case ISCSI_BOOT_TGT_CHAP_NAME:
4277 	case ISCSI_BOOT_TGT_CHAP_SECRET:
4278 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
4279 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
4280 	case ISCSI_BOOT_TGT_NIC_ASSOC:
4281 	case ISCSI_BOOT_TGT_FLAGS:
4282 		rc = S_IRUGO;
4283 		break;
4284 	default:
4285 		rc = 0;
4286 		break;
4287 	}
4288 	return rc;
4289 }
4290 
qla4xxx_boot_release(void * data)4291 static void qla4xxx_boot_release(void *data)
4292 {
4293 	struct scsi_qla_host *ha = data;
4294 
4295 	scsi_host_put(ha->host);
4296 }
4297 
get_fw_boot_info(struct scsi_qla_host * ha,uint16_t ddb_index[])4298 static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
4299 {
4300 	dma_addr_t buf_dma;
4301 	uint32_t addr, pri_addr, sec_addr;
4302 	uint32_t offset;
4303 	uint16_t func_num;
4304 	uint8_t val;
4305 	uint8_t *buf = NULL;
4306 	size_t size = 13 * sizeof(uint8_t);
4307 	int ret = QLA_SUCCESS;
4308 
4309 	func_num = PCI_FUNC(ha->pdev->devfn);
4310 
4311 	ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
4312 		   __func__, ha->pdev->device, func_num);
4313 
4314 	if (is_qla40XX(ha)) {
4315 		if (func_num == 1) {
4316 			addr = NVRAM_PORT0_BOOT_MODE;
4317 			pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
4318 			sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
4319 		} else if (func_num == 3) {
4320 			addr = NVRAM_PORT1_BOOT_MODE;
4321 			pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
4322 			sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
4323 		} else {
4324 			ret = QLA_ERROR;
4325 			goto exit_boot_info;
4326 		}
4327 
4328 		/* Check Boot Mode */
4329 		val = rd_nvram_byte(ha, addr);
4330 		if (!(val & 0x07)) {
4331 			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
4332 					  "options : 0x%x\n", __func__, val));
4333 			ret = QLA_ERROR;
4334 			goto exit_boot_info;
4335 		}
4336 
4337 		/* get primary valid target index */
4338 		val = rd_nvram_byte(ha, pri_addr);
4339 		if (val & BIT_7)
4340 			ddb_index[0] = (val & 0x7f);
4341 
4342 		/* get secondary valid target index */
4343 		val = rd_nvram_byte(ha, sec_addr);
4344 		if (val & BIT_7)
4345 			ddb_index[1] = (val & 0x7f);
4346 
4347 	} else if (is_qla80XX(ha)) {
4348 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
4349 					 &buf_dma, GFP_KERNEL);
4350 		if (!buf) {
4351 			DEBUG2(ql4_printk(KERN_ERR, ha,
4352 					  "%s: Unable to allocate dma buffer\n",
4353 					   __func__));
4354 			ret = QLA_ERROR;
4355 			goto exit_boot_info;
4356 		}
4357 
4358 		if (ha->port_num == 0)
4359 			offset = BOOT_PARAM_OFFSET_PORT0;
4360 		else if (ha->port_num == 1)
4361 			offset = BOOT_PARAM_OFFSET_PORT1;
4362 		else {
4363 			ret = QLA_ERROR;
4364 			goto exit_boot_info_free;
4365 		}
4366 		addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
4367 		       offset;
4368 		if (qla4xxx_get_flash(ha, buf_dma, addr,
4369 				      13 * sizeof(uint8_t)) != QLA_SUCCESS) {
4370 			DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
4371 					  " failed\n", ha->host_no, __func__));
4372 			ret = QLA_ERROR;
4373 			goto exit_boot_info_free;
4374 		}
4375 		/* Check Boot Mode */
4376 		if (!(buf[1] & 0x07)) {
4377 			DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
4378 					  " : 0x%x\n", buf[1]));
4379 			ret = QLA_ERROR;
4380 			goto exit_boot_info_free;
4381 		}
4382 
4383 		/* get primary valid target index */
4384 		if (buf[2] & BIT_7)
4385 			ddb_index[0] = buf[2] & 0x7f;
4386 
4387 		/* get secondary valid target index */
4388 		if (buf[11] & BIT_7)
4389 			ddb_index[1] = buf[11] & 0x7f;
4390 	} else {
4391 		ret = QLA_ERROR;
4392 		goto exit_boot_info;
4393 	}
4394 
4395 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
4396 			  " target ID %d\n", __func__, ddb_index[0],
4397 			  ddb_index[1]));
4398 
4399 exit_boot_info_free:
4400 	dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
4401 exit_boot_info:
4402 	ha->pri_ddb_idx = ddb_index[0];
4403 	ha->sec_ddb_idx = ddb_index[1];
4404 	return ret;
4405 }
4406 
4407 /**
4408  * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
4409  * @ha: pointer to adapter structure
4410  * @username: CHAP username to be returned
4411  * @password: CHAP password to be returned
4412  *
4413  * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
4414  * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
4415  * So from the CHAP cache find the first BIDI CHAP entry and set it
4416  * to the boot record in sysfs.
4417  **/
qla4xxx_get_bidi_chap(struct scsi_qla_host * ha,char * username,char * password)4418 static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
4419 			    char *password)
4420 {
4421 	int i, ret = -EINVAL;
4422 	int max_chap_entries = 0;
4423 	struct ql4_chap_table *chap_table;
4424 
4425 	if (is_qla80XX(ha))
4426 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
4427 						sizeof(struct ql4_chap_table);
4428 	else
4429 		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
4430 
4431 	if (!ha->chap_list) {
4432 		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
4433 		return ret;
4434 	}
4435 
4436 	mutex_lock(&ha->chap_sem);
4437 	for (i = 0; i < max_chap_entries; i++) {
4438 		chap_table = (struct ql4_chap_table *)ha->chap_list + i;
4439 		if (chap_table->cookie !=
4440 		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
4441 			continue;
4442 		}
4443 
4444 		if (chap_table->flags & BIT_7) /* local */
4445 			continue;
4446 
4447 		if (!(chap_table->flags & BIT_6)) /* Not BIDI */
4448 			continue;
4449 
4450 		strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
4451 		strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
4452 		ret = 0;
4453 		break;
4454 	}
4455 	mutex_unlock(&ha->chap_sem);
4456 
4457 	return ret;
4458 }
4459 
4460 
qla4xxx_get_boot_target(struct scsi_qla_host * ha,struct ql4_boot_session_info * boot_sess,uint16_t ddb_index)4461 static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
4462 				   struct ql4_boot_session_info *boot_sess,
4463 				   uint16_t ddb_index)
4464 {
4465 	struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
4466 	struct dev_db_entry *fw_ddb_entry;
4467 	dma_addr_t fw_ddb_entry_dma;
4468 	uint16_t idx;
4469 	uint16_t options;
4470 	int ret = QLA_SUCCESS;
4471 
4472 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4473 					  &fw_ddb_entry_dma, GFP_KERNEL);
4474 	if (!fw_ddb_entry) {
4475 		DEBUG2(ql4_printk(KERN_ERR, ha,
4476 				  "%s: Unable to allocate dma buffer.\n",
4477 				  __func__));
4478 		ret = QLA_ERROR;
4479 		return ret;
4480 	}
4481 
4482 	if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
4483 				   fw_ddb_entry_dma, ddb_index)) {
4484 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
4485 				  "index [%d]\n", __func__, ddb_index));
4486 		ret = QLA_ERROR;
4487 		goto exit_boot_target;
4488 	}
4489 
4490 	/* Update target name and IP from DDB */
4491 	memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
4492 	       min(sizeof(boot_sess->target_name),
4493 		   sizeof(fw_ddb_entry->iscsi_name)));
4494 
4495 	options = le16_to_cpu(fw_ddb_entry->options);
4496 	if (options & DDB_OPT_IPV6_DEVICE) {
4497 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4498 		       &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
4499 	} else {
4500 		boot_conn->dest_ipaddr.ip_type = 0x1;
4501 		memcpy(&boot_conn->dest_ipaddr.ip_address,
4502 		       &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
4503 	}
4504 
4505 	boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
4506 
4507 	/* update chap information */
4508 	idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
4509 
4510 	if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options))	{
4511 
4512 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
4513 
4514 		ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
4515 				       target_chap_name,
4516 				       (char *)&boot_conn->chap.target_secret,
4517 				       idx);
4518 		if (ret) {
4519 			ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
4520 			ret = QLA_ERROR;
4521 			goto exit_boot_target;
4522 		}
4523 
4524 		boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4525 		boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4526 	}
4527 
4528 	if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
4529 
4530 		DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
4531 
4532 		ret = qla4xxx_get_bidi_chap(ha,
4533 				    (char *)&boot_conn->chap.intr_chap_name,
4534 				    (char *)&boot_conn->chap.intr_secret);
4535 
4536 		if (ret) {
4537 			ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
4538 			ret = QLA_ERROR;
4539 			goto exit_boot_target;
4540 		}
4541 
4542 		boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
4543 		boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
4544 	}
4545 
4546 exit_boot_target:
4547 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
4548 			  fw_ddb_entry, fw_ddb_entry_dma);
4549 	return ret;
4550 }
4551 
qla4xxx_get_boot_info(struct scsi_qla_host * ha)4552 static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
4553 {
4554 	uint16_t ddb_index[2];
4555 	int ret = QLA_ERROR;
4556 	int rval;
4557 
4558 	memset(ddb_index, 0, sizeof(ddb_index));
4559 	ddb_index[0] = 0xffff;
4560 	ddb_index[1] = 0xffff;
4561 	ret = get_fw_boot_info(ha, ddb_index);
4562 	if (ret != QLA_SUCCESS) {
4563 		DEBUG2(ql4_printk(KERN_INFO, ha,
4564 				"%s: No boot target configured.\n", __func__));
4565 		return ret;
4566 	}
4567 
4568 	if (ql4xdisablesysfsboot)
4569 		return QLA_SUCCESS;
4570 
4571 	if (ddb_index[0] == 0xffff)
4572 		goto sec_target;
4573 
4574 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
4575 				      ddb_index[0]);
4576 	if (rval != QLA_SUCCESS) {
4577 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
4578 				  "configured\n", __func__));
4579 	} else
4580 		ret = QLA_SUCCESS;
4581 
4582 sec_target:
4583 	if (ddb_index[1] == 0xffff)
4584 		goto exit_get_boot_info;
4585 
4586 	rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
4587 				      ddb_index[1]);
4588 	if (rval != QLA_SUCCESS) {
4589 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
4590 				  " configured\n", __func__));
4591 	} else
4592 		ret = QLA_SUCCESS;
4593 
4594 exit_get_boot_info:
4595 	return ret;
4596 }
4597 
qla4xxx_setup_boot_info(struct scsi_qla_host * ha)4598 static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
4599 {
4600 	struct iscsi_boot_kobj *boot_kobj;
4601 
4602 	if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
4603 		return QLA_ERROR;
4604 
4605 	if (ql4xdisablesysfsboot) {
4606 		ql4_printk(KERN_INFO, ha,
4607 			   "%s: syfsboot disabled - driver will trigger login "
4608 			   "and publish session for discovery .\n", __func__);
4609 		return QLA_SUCCESS;
4610 	}
4611 
4612 
4613 	ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
4614 	if (!ha->boot_kset)
4615 		goto kset_free;
4616 
4617 	if (!scsi_host_get(ha->host))
4618 		goto kset_free;
4619 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
4620 					     qla4xxx_show_boot_tgt_pri_info,
4621 					     qla4xxx_tgt_get_attr_visibility,
4622 					     qla4xxx_boot_release);
4623 	if (!boot_kobj)
4624 		goto put_host;
4625 
4626 	if (!scsi_host_get(ha->host))
4627 		goto kset_free;
4628 	boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
4629 					     qla4xxx_show_boot_tgt_sec_info,
4630 					     qla4xxx_tgt_get_attr_visibility,
4631 					     qla4xxx_boot_release);
4632 	if (!boot_kobj)
4633 		goto put_host;
4634 
4635 	if (!scsi_host_get(ha->host))
4636 		goto kset_free;
4637 	boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
4638 					       qla4xxx_show_boot_ini_info,
4639 					       qla4xxx_ini_get_attr_visibility,
4640 					       qla4xxx_boot_release);
4641 	if (!boot_kobj)
4642 		goto put_host;
4643 
4644 	if (!scsi_host_get(ha->host))
4645 		goto kset_free;
4646 	boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
4647 					       qla4xxx_show_boot_eth_info,
4648 					       qla4xxx_eth_get_attr_visibility,
4649 					       qla4xxx_boot_release);
4650 	if (!boot_kobj)
4651 		goto put_host;
4652 
4653 	return QLA_SUCCESS;
4654 
4655 put_host:
4656 	scsi_host_put(ha->host);
4657 kset_free:
4658 	iscsi_boot_destroy_kset(ha->boot_kset);
4659 	return -ENOMEM;
4660 }
4661 
4662 
4663 /**
4664  * qla4xxx_create chap_list - Create CHAP list from FLASH
4665  * @ha: pointer to adapter structure
4666  *
4667  * Read flash and make a list of CHAP entries, during login when a CHAP entry
4668  * is received, it will be checked in this list. If entry exist then the CHAP
4669  * entry index is set in the DDB. If CHAP entry does not exist in this list
4670  * then a new entry is added in FLASH in CHAP table and the index obtained is
4671  * used in the DDB.
4672  **/
qla4xxx_create_chap_list(struct scsi_qla_host * ha)4673 static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
4674 {
4675 	int rval = 0;
4676 	uint8_t *chap_flash_data = NULL;
4677 	uint32_t offset;
4678 	dma_addr_t chap_dma;
4679 	uint32_t chap_size = 0;
4680 
4681 	if (is_qla40XX(ha))
4682 		chap_size = MAX_CHAP_ENTRIES_40XX  *
4683 					sizeof(struct ql4_chap_table);
4684 	else	/* Single region contains CHAP info for both
4685 		 * ports which is divided into half for each port.
4686 		 */
4687 		chap_size = ha->hw.flt_chap_size / 2;
4688 
4689 	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
4690 					  &chap_dma, GFP_KERNEL);
4691 	if (!chap_flash_data) {
4692 		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
4693 		return;
4694 	}
4695 	if (is_qla40XX(ha))
4696 		offset = FLASH_CHAP_OFFSET;
4697 	else {
4698 		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
4699 		if (ha->port_num == 1)
4700 			offset += chap_size;
4701 	}
4702 
4703 	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
4704 	if (rval != QLA_SUCCESS)
4705 		goto exit_chap_list;
4706 
4707 	if (ha->chap_list == NULL)
4708 		ha->chap_list = vmalloc(chap_size);
4709 	if (ha->chap_list == NULL) {
4710 		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
4711 		goto exit_chap_list;
4712 	}
4713 
4714 	memcpy(ha->chap_list, chap_flash_data, chap_size);
4715 
4716 exit_chap_list:
4717 	dma_free_coherent(&ha->pdev->dev, chap_size,
4718 			chap_flash_data, chap_dma);
4719 }
4720 
qla4xxx_get_param_ddb(struct ddb_entry * ddb_entry,struct ql4_tuple_ddb * tddb)4721 static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
4722 				  struct ql4_tuple_ddb *tddb)
4723 {
4724 	struct scsi_qla_host *ha;
4725 	struct iscsi_cls_session *cls_sess;
4726 	struct iscsi_cls_conn *cls_conn;
4727 	struct iscsi_session *sess;
4728 	struct iscsi_conn *conn;
4729 
4730 	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
4731 	ha = ddb_entry->ha;
4732 	cls_sess = ddb_entry->sess;
4733 	sess = cls_sess->dd_data;
4734 	cls_conn = ddb_entry->conn;
4735 	conn = cls_conn->dd_data;
4736 
4737 	tddb->tpgt = sess->tpgt;
4738 	tddb->port = conn->persistent_port;
4739 	strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
4740 	strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
4741 }
4742 
qla4xxx_convert_param_ddb(struct dev_db_entry * fw_ddb_entry,struct ql4_tuple_ddb * tddb,uint8_t * flash_isid)4743 static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
4744 				      struct ql4_tuple_ddb *tddb,
4745 				      uint8_t *flash_isid)
4746 {
4747 	uint16_t options = 0;
4748 
4749 	tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
4750 	memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
4751 	       min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
4752 
4753 	options = le16_to_cpu(fw_ddb_entry->options);
4754 	if (options & DDB_OPT_IPV6_DEVICE)
4755 		sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
4756 	else
4757 		sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
4758 
4759 	tddb->port = le16_to_cpu(fw_ddb_entry->port);
4760 
4761 	if (flash_isid == NULL)
4762 		memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
4763 		       sizeof(tddb->isid));
4764 	else
4765 		memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
4766 }
4767 
qla4xxx_compare_tuple_ddb(struct scsi_qla_host * ha,struct ql4_tuple_ddb * old_tddb,struct ql4_tuple_ddb * new_tddb,uint8_t is_isid_compare)4768 static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
4769 				     struct ql4_tuple_ddb *old_tddb,
4770 				     struct ql4_tuple_ddb *new_tddb,
4771 				     uint8_t is_isid_compare)
4772 {
4773 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4774 		return QLA_ERROR;
4775 
4776 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
4777 		return QLA_ERROR;
4778 
4779 	if (old_tddb->port != new_tddb->port)
4780 		return QLA_ERROR;
4781 
4782 	/* For multi sessions, driver generates the ISID, so do not compare
4783 	 * ISID in reset path since it would be a comparison between the
4784 	 * driver generated ISID and firmware generated ISID. This could
4785 	 * lead to adding duplicated DDBs in the list as driver generated
4786 	 * ISID would not match firmware generated ISID.
4787 	 */
4788 	if (is_isid_compare) {
4789 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4790 			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4791 			__func__, old_tddb->isid[5], old_tddb->isid[4],
4792 			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4793 			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4794 			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4795 			new_tddb->isid[0]));
4796 
4797 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4798 			   sizeof(old_tddb->isid)))
4799 			return QLA_ERROR;
4800 	}
4801 
4802 	DEBUG2(ql4_printk(KERN_INFO, ha,
4803 			  "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
4804 			  old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
4805 			  old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
4806 			  new_tddb->ip_addr, new_tddb->iscsi_name));
4807 
4808 	return QLA_SUCCESS;
4809 }
4810 
qla4xxx_is_session_exists(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry)4811 static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
4812 				     struct dev_db_entry *fw_ddb_entry)
4813 {
4814 	struct ddb_entry *ddb_entry;
4815 	struct ql4_tuple_ddb *fw_tddb = NULL;
4816 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4817 	int idx;
4818 	int ret = QLA_ERROR;
4819 
4820 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4821 	if (!fw_tddb) {
4822 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4823 				  "Memory Allocation failed.\n"));
4824 		ret = QLA_SUCCESS;
4825 		goto exit_check;
4826 	}
4827 
4828 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4829 	if (!tmp_tddb) {
4830 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4831 				  "Memory Allocation failed.\n"));
4832 		ret = QLA_SUCCESS;
4833 		goto exit_check;
4834 	}
4835 
4836 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4837 
4838 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
4839 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
4840 		if (ddb_entry == NULL)
4841 			continue;
4842 
4843 		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
4844 		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
4845 			ret = QLA_SUCCESS; /* found */
4846 			goto exit_check;
4847 		}
4848 	}
4849 
4850 exit_check:
4851 	if (fw_tddb)
4852 		vfree(fw_tddb);
4853 	if (tmp_tddb)
4854 		vfree(tmp_tddb);
4855 	return ret;
4856 }
4857 
4858 /**
4859  * qla4xxx_check_existing_isid - check if target with same isid exist
4860  *				 in target list
4861  * @list_nt: list of target
4862  * @isid: isid to check
4863  *
4864  * This routine return QLA_SUCCESS if target with same isid exist
4865  **/
qla4xxx_check_existing_isid(struct list_head * list_nt,uint8_t * isid)4866 static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
4867 {
4868 	struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
4869 	struct dev_db_entry *fw_ddb_entry;
4870 
4871 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4872 		fw_ddb_entry = &nt_ddb_idx->fw_ddb;
4873 
4874 		if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
4875 			   sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
4876 			return QLA_SUCCESS;
4877 		}
4878 	}
4879 	return QLA_ERROR;
4880 }
4881 
4882 /**
4883  * qla4xxx_update_isid - compare ddbs and updated isid
4884  * @ha: Pointer to host adapter structure.
4885  * @list_nt: list of nt target
4886  * @fw_ddb_entry: firmware ddb entry
4887  *
4888  * This routine update isid if ddbs have same iqn, same isid and
4889  * different IP addr.
4890  * Return QLA_SUCCESS if isid is updated.
4891  **/
qla4xxx_update_isid(struct scsi_qla_host * ha,struct list_head * list_nt,struct dev_db_entry * fw_ddb_entry)4892 static int qla4xxx_update_isid(struct scsi_qla_host *ha,
4893 			       struct list_head *list_nt,
4894 			       struct dev_db_entry *fw_ddb_entry)
4895 {
4896 	uint8_t base_value, i;
4897 
4898 	base_value = fw_ddb_entry->isid[1] & 0x1f;
4899 	for (i = 0; i < 8; i++) {
4900 		fw_ddb_entry->isid[1] = (base_value | (i << 5));
4901 		if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4902 			break;
4903 	}
4904 
4905 	if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
4906 		return QLA_ERROR;
4907 
4908 	return QLA_SUCCESS;
4909 }
4910 
4911 /**
4912  * qla4xxx_should_update_isid - check if isid need to update
4913  * @ha: Pointer to host adapter structure.
4914  * @old_tddb: ddb tuple
4915  * @new_tddb: ddb tuple
4916  *
4917  * Return QLA_SUCCESS if different IP, different PORT, same iqn,
4918  * same isid
4919  **/
qla4xxx_should_update_isid(struct scsi_qla_host * ha,struct ql4_tuple_ddb * old_tddb,struct ql4_tuple_ddb * new_tddb)4920 static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
4921 				      struct ql4_tuple_ddb *old_tddb,
4922 				      struct ql4_tuple_ddb *new_tddb)
4923 {
4924 	if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
4925 		/* Same ip */
4926 		if (old_tddb->port == new_tddb->port)
4927 			return QLA_ERROR;
4928 	}
4929 
4930 	if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
4931 		/* different iqn */
4932 		return QLA_ERROR;
4933 
4934 	if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4935 		   sizeof(old_tddb->isid)))
4936 		/* different isid */
4937 		return QLA_ERROR;
4938 
4939 	return QLA_SUCCESS;
4940 }
4941 
4942 /**
4943  * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
4944  * @ha: Pointer to host adapter structure.
4945  * @list_nt: list of nt target.
4946  * @fw_ddb_entry: firmware ddb entry.
4947  *
4948  * This routine check if fw_ddb_entry already exists in list_nt to avoid
4949  * duplicate ddb in list_nt.
4950  * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
4951  * Note: This function also update isid of DDB if required.
4952  **/
4953 
qla4xxx_is_flash_ddb_exists(struct scsi_qla_host * ha,struct list_head * list_nt,struct dev_db_entry * fw_ddb_entry)4954 static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
4955 				       struct list_head *list_nt,
4956 				       struct dev_db_entry *fw_ddb_entry)
4957 {
4958 	struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
4959 	struct ql4_tuple_ddb *fw_tddb = NULL;
4960 	struct ql4_tuple_ddb *tmp_tddb = NULL;
4961 	int rval, ret = QLA_ERROR;
4962 
4963 	fw_tddb = vzalloc(sizeof(*fw_tddb));
4964 	if (!fw_tddb) {
4965 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4966 				  "Memory Allocation failed.\n"));
4967 		ret = QLA_SUCCESS;
4968 		goto exit_check;
4969 	}
4970 
4971 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
4972 	if (!tmp_tddb) {
4973 		DEBUG2(ql4_printk(KERN_WARNING, ha,
4974 				  "Memory Allocation failed.\n"));
4975 		ret = QLA_SUCCESS;
4976 		goto exit_check;
4977 	}
4978 
4979 	qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
4980 
4981 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4982 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
4983 					  nt_ddb_idx->flash_isid);
4984 		ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
4985 		/* found duplicate ddb */
4986 		if (ret == QLA_SUCCESS)
4987 			goto exit_check;
4988 	}
4989 
4990 	list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
4991 		qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
4992 
4993 		ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
4994 		if (ret == QLA_SUCCESS) {
4995 			rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
4996 			if (rval == QLA_SUCCESS)
4997 				ret = QLA_ERROR;
4998 			else
4999 				ret = QLA_SUCCESS;
5000 
5001 			goto exit_check;
5002 		}
5003 	}
5004 
5005 exit_check:
5006 	if (fw_tddb)
5007 		vfree(fw_tddb);
5008 	if (tmp_tddb)
5009 		vfree(tmp_tddb);
5010 	return ret;
5011 }
5012 
qla4xxx_free_ddb_list(struct list_head * list_ddb)5013 static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
5014 {
5015 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5016 
5017 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5018 		list_del_init(&ddb_idx->list);
5019 		vfree(ddb_idx);
5020 	}
5021 }
5022 
qla4xxx_get_ep_fwdb(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry)5023 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
5024 					struct dev_db_entry *fw_ddb_entry)
5025 {
5026 	struct iscsi_endpoint *ep;
5027 	struct sockaddr_in *addr;
5028 	struct sockaddr_in6 *addr6;
5029 	struct sockaddr *t_addr;
5030 	struct sockaddr_storage *dst_addr;
5031 	char *ip;
5032 
5033 	/* TODO: need to destroy on unload iscsi_endpoint*/
5034 	dst_addr = vmalloc(sizeof(*dst_addr));
5035 	if (!dst_addr)
5036 		return NULL;
5037 
5038 	if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
5039 		t_addr = (struct sockaddr *)dst_addr;
5040 		t_addr->sa_family = AF_INET6;
5041 		addr6 = (struct sockaddr_in6 *)dst_addr;
5042 		ip = (char *)&addr6->sin6_addr;
5043 		memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
5044 		addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
5045 
5046 	} else {
5047 		t_addr = (struct sockaddr *)dst_addr;
5048 		t_addr->sa_family = AF_INET;
5049 		addr = (struct sockaddr_in *)dst_addr;
5050 		ip = (char *)&addr->sin_addr;
5051 		memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
5052 		addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
5053 	}
5054 
5055 	ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
5056 	vfree(dst_addr);
5057 	return ep;
5058 }
5059 
qla4xxx_verify_boot_idx(struct scsi_qla_host * ha,uint16_t idx)5060 static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
5061 {
5062 	if (ql4xdisablesysfsboot)
5063 		return QLA_SUCCESS;
5064 	if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
5065 		return QLA_ERROR;
5066 	return QLA_SUCCESS;
5067 }
5068 
qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host * ha,struct ddb_entry * ddb_entry,uint16_t idx)5069 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
5070 					  struct ddb_entry *ddb_entry,
5071 					  uint16_t idx)
5072 {
5073 	uint16_t def_timeout;
5074 
5075 	ddb_entry->ddb_type = FLASH_DDB;
5076 	ddb_entry->fw_ddb_index = INVALID_ENTRY;
5077 	ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
5078 	ddb_entry->ha = ha;
5079 	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
5080 	ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
5081 
5082 	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
5083 	atomic_set(&ddb_entry->relogin_timer, 0);
5084 	atomic_set(&ddb_entry->relogin_retry_count, 0);
5085 	def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
5086 	ddb_entry->default_relogin_timeout =
5087 		(def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
5088 		def_timeout : LOGIN_TOV;
5089 	ddb_entry->default_time2wait =
5090 		le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
5091 
5092 	if (ql4xdisablesysfsboot &&
5093 	    (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
5094 		set_bit(DF_BOOT_TGT, &ddb_entry->flags);
5095 }
5096 
qla4xxx_wait_for_ip_configuration(struct scsi_qla_host * ha)5097 static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
5098 {
5099 	uint32_t idx = 0;
5100 	uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
5101 	uint32_t sts[MBOX_REG_COUNT];
5102 	uint32_t ip_state;
5103 	unsigned long wtime;
5104 	int ret;
5105 
5106 	wtime = jiffies + (HZ * IP_CONFIG_TOV);
5107 	do {
5108 		for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
5109 			if (ip_idx[idx] == -1)
5110 				continue;
5111 
5112 			ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
5113 
5114 			if (ret == QLA_ERROR) {
5115 				ip_idx[idx] = -1;
5116 				continue;
5117 			}
5118 
5119 			ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
5120 
5121 			DEBUG2(ql4_printk(KERN_INFO, ha,
5122 					  "Waiting for IP state for idx = %d, state = 0x%x\n",
5123 					  ip_idx[idx], ip_state));
5124 			if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
5125 			    ip_state == IP_ADDRSTATE_INVALID ||
5126 			    ip_state == IP_ADDRSTATE_PREFERRED ||
5127 			    ip_state == IP_ADDRSTATE_DEPRICATED ||
5128 			    ip_state == IP_ADDRSTATE_DISABLING)
5129 				ip_idx[idx] = -1;
5130 		}
5131 
5132 		/* Break if all IP states checked */
5133 		if ((ip_idx[0] == -1) &&
5134 		    (ip_idx[1] == -1) &&
5135 		    (ip_idx[2] == -1) &&
5136 		    (ip_idx[3] == -1))
5137 			break;
5138 		schedule_timeout_uninterruptible(HZ);
5139 	} while (time_after(wtime, jiffies));
5140 }
5141 
qla4xxx_build_st_list(struct scsi_qla_host * ha,struct list_head * list_st)5142 static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
5143 				  struct list_head *list_st)
5144 {
5145 	struct qla_ddb_index  *st_ddb_idx;
5146 	int max_ddbs;
5147 	int fw_idx_size;
5148 	struct dev_db_entry *fw_ddb_entry;
5149 	dma_addr_t fw_ddb_dma;
5150 	int ret;
5151 	uint32_t idx = 0, next_idx = 0;
5152 	uint32_t state = 0, conn_err = 0;
5153 	uint16_t conn_id = 0;
5154 
5155 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5156 				      &fw_ddb_dma);
5157 	if (fw_ddb_entry == NULL) {
5158 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5159 		goto exit_st_list;
5160 	}
5161 
5162 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5163 				     MAX_DEV_DB_ENTRIES;
5164 	fw_idx_size = sizeof(struct qla_ddb_index);
5165 
5166 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5167 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5168 					      NULL, &next_idx, &state,
5169 					      &conn_err, NULL, &conn_id);
5170 		if (ret == QLA_ERROR)
5171 			break;
5172 
5173 		/* Ignore DDB if invalid state (unassigned) */
5174 		if (state == DDB_DS_UNASSIGNED)
5175 			goto continue_next_st;
5176 
5177 		/* Check if ST, add to the list_st */
5178 		if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
5179 			goto continue_next_st;
5180 
5181 		st_ddb_idx = vzalloc(fw_idx_size);
5182 		if (!st_ddb_idx)
5183 			break;
5184 
5185 		st_ddb_idx->fw_ddb_idx = idx;
5186 
5187 		list_add_tail(&st_ddb_idx->list, list_st);
5188 continue_next_st:
5189 		if (next_idx == 0)
5190 			break;
5191 	}
5192 
5193 exit_st_list:
5194 	if (fw_ddb_entry)
5195 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5196 }
5197 
5198 /**
5199  * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
5200  * @ha: pointer to adapter structure
5201  * @list_ddb: List from which failed ddb to be removed
5202  *
5203  * Iterate over the list of DDBs and find and remove DDBs that are either in
5204  * no connection active state or failed state
5205  **/
qla4xxx_remove_failed_ddb(struct scsi_qla_host * ha,struct list_head * list_ddb)5206 static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
5207 				      struct list_head *list_ddb)
5208 {
5209 	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
5210 	uint32_t next_idx = 0;
5211 	uint32_t state = 0, conn_err = 0;
5212 	int ret;
5213 
5214 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
5215 		ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
5216 					      NULL, 0, NULL, &next_idx, &state,
5217 					      &conn_err, NULL, NULL);
5218 		if (ret == QLA_ERROR)
5219 			continue;
5220 
5221 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5222 		    state == DDB_DS_SESSION_FAILED) {
5223 			list_del_init(&ddb_idx->list);
5224 			vfree(ddb_idx);
5225 		}
5226 	}
5227 }
5228 
qla4xxx_sess_conn_setup(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry,int is_reset,uint16_t idx)5229 static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
5230 				   struct dev_db_entry *fw_ddb_entry,
5231 				   int is_reset, uint16_t idx)
5232 {
5233 	struct iscsi_cls_session *cls_sess;
5234 	struct iscsi_session *sess;
5235 	struct iscsi_cls_conn *cls_conn;
5236 	struct iscsi_endpoint *ep;
5237 	uint16_t cmds_max = 32;
5238 	uint16_t conn_id = 0;
5239 	uint32_t initial_cmdsn = 0;
5240 	int ret = QLA_SUCCESS;
5241 
5242 	struct ddb_entry *ddb_entry = NULL;
5243 
5244 	/* Create session object, with INVALID_ENTRY,
5245 	 * the targer_id would get set when we issue the login
5246 	 */
5247 	cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
5248 				       cmds_max, sizeof(struct ddb_entry),
5249 				       sizeof(struct ql4_task_data),
5250 				       initial_cmdsn, INVALID_ENTRY);
5251 	if (!cls_sess) {
5252 		ret = QLA_ERROR;
5253 		goto exit_setup;
5254 	}
5255 
5256 	/*
5257 	 * so calling module_put function to decrement the
5258 	 * reference count.
5259 	 **/
5260 	module_put(qla4xxx_iscsi_transport.owner);
5261 	sess = cls_sess->dd_data;
5262 	ddb_entry = sess->dd_data;
5263 	ddb_entry->sess = cls_sess;
5264 
5265 	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
5266 	memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
5267 	       sizeof(struct dev_db_entry));
5268 
5269 	qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
5270 
5271 	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
5272 
5273 	if (!cls_conn) {
5274 		ret = QLA_ERROR;
5275 		goto exit_setup;
5276 	}
5277 
5278 	ddb_entry->conn = cls_conn;
5279 
5280 	/* Setup ep, for displaying attributes in sysfs */
5281 	ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
5282 	if (ep) {
5283 		ep->conn = cls_conn;
5284 		cls_conn->ep = ep;
5285 	} else {
5286 		DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
5287 		ret = QLA_ERROR;
5288 		goto exit_setup;
5289 	}
5290 
5291 	/* Update sess/conn params */
5292 	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
5293 
5294 	if (is_reset == RESET_ADAPTER) {
5295 		iscsi_block_session(cls_sess);
5296 		/* Use the relogin path to discover new devices
5297 		 *  by short-circuting the logic of setting
5298 		 *  timer to relogin - instead set the flags
5299 		 *  to initiate login right away.
5300 		 */
5301 		set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
5302 		set_bit(DF_RELOGIN, &ddb_entry->flags);
5303 	}
5304 
5305 exit_setup:
5306 	return ret;
5307 }
5308 
qla4xxx_build_nt_list(struct scsi_qla_host * ha,struct list_head * list_nt,int is_reset)5309 static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
5310 				  struct list_head *list_nt, int is_reset)
5311 {
5312 	struct dev_db_entry *fw_ddb_entry;
5313 	dma_addr_t fw_ddb_dma;
5314 	int max_ddbs;
5315 	int fw_idx_size;
5316 	int ret;
5317 	uint32_t idx = 0, next_idx = 0;
5318 	uint32_t state = 0, conn_err = 0;
5319 	uint16_t conn_id = 0;
5320 	struct qla_ddb_index  *nt_ddb_idx;
5321 
5322 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5323 				      &fw_ddb_dma);
5324 	if (fw_ddb_entry == NULL) {
5325 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5326 		goto exit_nt_list;
5327 	}
5328 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5329 				     MAX_DEV_DB_ENTRIES;
5330 	fw_idx_size = sizeof(struct qla_ddb_index);
5331 
5332 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5333 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5334 					      NULL, &next_idx, &state,
5335 					      &conn_err, NULL, &conn_id);
5336 		if (ret == QLA_ERROR)
5337 			break;
5338 
5339 		if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
5340 			goto continue_next_nt;
5341 
5342 		/* Check if NT, then add to list it */
5343 		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
5344 			goto continue_next_nt;
5345 
5346 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
5347 		    state == DDB_DS_SESSION_FAILED))
5348 			goto continue_next_nt;
5349 
5350 		DEBUG2(ql4_printk(KERN_INFO, ha,
5351 				  "Adding  DDB to session = 0x%x\n", idx));
5352 		if (is_reset == INIT_ADAPTER) {
5353 			nt_ddb_idx = vmalloc(fw_idx_size);
5354 			if (!nt_ddb_idx)
5355 				break;
5356 
5357 			nt_ddb_idx->fw_ddb_idx = idx;
5358 
5359 			/* Copy original isid as it may get updated in function
5360 			 * qla4xxx_update_isid(). We need original isid in
5361 			 * function qla4xxx_compare_tuple_ddb to find duplicate
5362 			 * target */
5363 			memcpy(&nt_ddb_idx->flash_isid[0],
5364 			       &fw_ddb_entry->isid[0],
5365 			       sizeof(nt_ddb_idx->flash_isid));
5366 
5367 			ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
5368 							  fw_ddb_entry);
5369 			if (ret == QLA_SUCCESS) {
5370 				/* free nt_ddb_idx and do not add to list_nt */
5371 				vfree(nt_ddb_idx);
5372 				goto continue_next_nt;
5373 			}
5374 
5375 			/* Copy updated isid */
5376 			memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
5377 			       sizeof(struct dev_db_entry));
5378 
5379 			list_add_tail(&nt_ddb_idx->list, list_nt);
5380 		} else if (is_reset == RESET_ADAPTER) {
5381 			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
5382 								QLA_SUCCESS)
5383 				goto continue_next_nt;
5384 		}
5385 
5386 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
5387 		if (ret == QLA_ERROR)
5388 			goto exit_nt_list;
5389 
5390 continue_next_nt:
5391 		if (next_idx == 0)
5392 			break;
5393 	}
5394 
5395 exit_nt_list:
5396 	if (fw_ddb_entry)
5397 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5398 }
5399 
qla4xxx_build_new_nt_list(struct scsi_qla_host * ha,struct list_head * list_nt)5400 static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
5401 				      struct list_head *list_nt)
5402 {
5403 	struct dev_db_entry *fw_ddb_entry;
5404 	dma_addr_t fw_ddb_dma;
5405 	int max_ddbs;
5406 	int fw_idx_size;
5407 	int ret;
5408 	uint32_t idx = 0, next_idx = 0;
5409 	uint32_t state = 0, conn_err = 0;
5410 	uint16_t conn_id = 0;
5411 	struct qla_ddb_index  *nt_ddb_idx;
5412 
5413 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
5414 				      &fw_ddb_dma);
5415 	if (fw_ddb_entry == NULL) {
5416 		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
5417 		goto exit_new_nt_list;
5418 	}
5419 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
5420 				     MAX_DEV_DB_ENTRIES;
5421 	fw_idx_size = sizeof(struct qla_ddb_index);
5422 
5423 	for (idx = 0; idx < max_ddbs; idx = next_idx) {
5424 		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
5425 					      NULL, &next_idx, &state,
5426 					      &conn_err, NULL, &conn_id);
5427 		if (ret == QLA_ERROR)
5428 			break;
5429 
5430 		/* Check if NT, then add it to list */
5431 		if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5432 			goto continue_next_new_nt;
5433 
5434 		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
5435 			goto continue_next_new_nt;
5436 
5437 		DEBUG2(ql4_printk(KERN_INFO, ha,
5438 				  "Adding  DDB to session = 0x%x\n", idx));
5439 
5440 		nt_ddb_idx = vmalloc(fw_idx_size);
5441 		if (!nt_ddb_idx)
5442 			break;
5443 
5444 		nt_ddb_idx->fw_ddb_idx = idx;
5445 
5446 		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
5447 		if (ret == QLA_SUCCESS) {
5448 			/* free nt_ddb_idx and do not add to list_nt */
5449 			vfree(nt_ddb_idx);
5450 			goto continue_next_new_nt;
5451 		}
5452 
5453 		list_add_tail(&nt_ddb_idx->list, list_nt);
5454 
5455 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5456 					      idx);
5457 		if (ret == QLA_ERROR)
5458 			goto exit_new_nt_list;
5459 
5460 continue_next_new_nt:
5461 		if (next_idx == 0)
5462 			break;
5463 	}
5464 
5465 exit_new_nt_list:
5466 	if (fw_ddb_entry)
5467 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
5468 }
5469 
5470 /**
5471  * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
5472  * @dev: dev associated with the sysfs entry
5473  * @data: pointer to flashnode session object
5474  *
5475  * Returns:
5476  *	1: if flashnode entry is non-persistent
5477  *	0: if flashnode entry is persistent
5478  **/
qla4xxx_sysfs_ddb_is_non_persistent(struct device * dev,void * data)5479 static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
5480 {
5481 	struct iscsi_bus_flash_session *fnode_sess;
5482 
5483 	if (!iscsi_flashnode_bus_match(dev, NULL))
5484 		return 0;
5485 
5486 	fnode_sess = iscsi_dev_to_flash_session(dev);
5487 
5488 	return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
5489 }
5490 
5491 /**
5492  * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
5493  * @ha: pointer to host
5494  * @fw_ddb_entry: flash ddb data
5495  * @idx: target index
5496  * @user: if set then this call is made from userland else from kernel
5497  *
5498  * Returns:
5499  * On sucess: QLA_SUCCESS
5500  * On failure: QLA_ERROR
5501  *
5502  * This create separate sysfs entries for session and connection attributes of
5503  * the given fw ddb entry.
5504  * If this is invoked as a result of a userspace call then the entry is marked
5505  * as nonpersistent using flash_state field.
5506  **/
qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry,uint16_t * idx,int user)5507 static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
5508 					struct dev_db_entry *fw_ddb_entry,
5509 					uint16_t *idx, int user)
5510 {
5511 	struct iscsi_bus_flash_session *fnode_sess = NULL;
5512 	struct iscsi_bus_flash_conn *fnode_conn = NULL;
5513 	int rc = QLA_ERROR;
5514 
5515 	fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
5516 						 &qla4xxx_iscsi_transport, 0);
5517 	if (!fnode_sess) {
5518 		ql4_printk(KERN_ERR, ha,
5519 			   "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
5520 			   __func__, *idx, ha->host_no);
5521 		goto exit_tgt_create;
5522 	}
5523 
5524 	fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
5525 						 &qla4xxx_iscsi_transport, 0);
5526 	if (!fnode_conn) {
5527 		ql4_printk(KERN_ERR, ha,
5528 			   "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
5529 			   __func__, *idx, ha->host_no);
5530 		goto free_sess;
5531 	}
5532 
5533 	if (user) {
5534 		fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
5535 	} else {
5536 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
5537 
5538 		if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
5539 			fnode_sess->is_boot_target = 1;
5540 		else
5541 			fnode_sess->is_boot_target = 0;
5542 	}
5543 
5544 	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
5545 					   fw_ddb_entry);
5546 
5547 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5548 		   __func__, fnode_sess->dev.kobj.name);
5549 
5550 	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
5551 		   __func__, fnode_conn->dev.kobj.name);
5552 
5553 	return QLA_SUCCESS;
5554 
5555 free_sess:
5556 	iscsi_destroy_flashnode_sess(fnode_sess);
5557 
5558 exit_tgt_create:
5559 	return QLA_ERROR;
5560 }
5561 
5562 /**
5563  * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
5564  * @shost: pointer to host
5565  * @buf: type of ddb entry (ipv4/ipv6)
5566  * @len: length of buf
5567  *
5568  * This creates new ddb entry in the flash by finding first free index and
5569  * storing default ddb there. And then create sysfs entry for the new ddb entry.
5570  **/
qla4xxx_sysfs_ddb_add(struct Scsi_Host * shost,const char * buf,int len)5571 static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
5572 				 int len)
5573 {
5574 	struct scsi_qla_host *ha = to_qla_host(shost);
5575 	struct dev_db_entry *fw_ddb_entry = NULL;
5576 	dma_addr_t fw_ddb_entry_dma;
5577 	struct device *dev;
5578 	uint16_t idx = 0;
5579 	uint16_t max_ddbs = 0;
5580 	uint32_t options = 0;
5581 	uint32_t rval = QLA_ERROR;
5582 
5583 	if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
5584 	    strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
5585 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
5586 				  __func__));
5587 		goto exit_ddb_add;
5588 	}
5589 
5590 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
5591 				     MAX_DEV_DB_ENTRIES;
5592 
5593 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5594 					  &fw_ddb_entry_dma, GFP_KERNEL);
5595 	if (!fw_ddb_entry) {
5596 		DEBUG2(ql4_printk(KERN_ERR, ha,
5597 				  "%s: Unable to allocate dma buffer\n",
5598 				  __func__));
5599 		goto exit_ddb_add;
5600 	}
5601 
5602 	dev = iscsi_find_flashnode_sess(ha->host, NULL,
5603 					qla4xxx_sysfs_ddb_is_non_persistent);
5604 	if (dev) {
5605 		ql4_printk(KERN_ERR, ha,
5606 			   "%s: A non-persistent entry %s found\n",
5607 			   __func__, dev->kobj.name);
5608 		put_device(dev);
5609 		goto exit_ddb_add;
5610 	}
5611 
5612 	for (idx = 0; idx < max_ddbs; idx++) {
5613 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
5614 					     fw_ddb_entry_dma, idx))
5615 			break;
5616 	}
5617 
5618 	if (idx == max_ddbs)
5619 		goto exit_ddb_add;
5620 
5621 	if (!strncasecmp("ipv6", buf, 4))
5622 		options |= IPV6_DEFAULT_DDB_ENTRY;
5623 
5624 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5625 	if (rval == QLA_ERROR)
5626 		goto exit_ddb_add;
5627 
5628 	rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
5629 
5630 exit_ddb_add:
5631 	if (fw_ddb_entry)
5632 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5633 				  fw_ddb_entry, fw_ddb_entry_dma);
5634 	if (rval == QLA_SUCCESS)
5635 		return idx;
5636 	else
5637 		return -EIO;
5638 }
5639 
5640 /**
5641  * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
5642  * @fnode_sess: pointer to session attrs of flash ddb entry
5643  * @fnode_conn: pointer to connection attrs of flash ddb entry
5644  *
5645  * This writes the contents of target ddb buffer to Flash with a valid cookie
5646  * value in order to make the ddb entry persistent.
5647  **/
qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session * fnode_sess,struct iscsi_bus_flash_conn * fnode_conn)5648 static int  qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
5649 				    struct iscsi_bus_flash_conn *fnode_conn)
5650 {
5651 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5652 	struct scsi_qla_host *ha = to_qla_host(shost);
5653 	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
5654 	struct dev_db_entry *fw_ddb_entry = NULL;
5655 	dma_addr_t fw_ddb_entry_dma;
5656 	uint32_t options = 0;
5657 	int rval = 0;
5658 
5659 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5660 					  &fw_ddb_entry_dma, GFP_KERNEL);
5661 	if (!fw_ddb_entry) {
5662 		DEBUG2(ql4_printk(KERN_ERR, ha,
5663 				  "%s: Unable to allocate dma buffer\n",
5664 				  __func__));
5665 		rval = -ENOMEM;
5666 		goto exit_ddb_apply;
5667 	}
5668 
5669 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5670 		options |= IPV6_DEFAULT_DDB_ENTRY;
5671 
5672 	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5673 	if (rval == QLA_ERROR)
5674 		goto exit_ddb_apply;
5675 
5676 	dev_db_start_offset += (fnode_sess->target_id *
5677 				sizeof(*fw_ddb_entry));
5678 
5679 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5680 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5681 
5682 	rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
5683 				 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
5684 
5685 	if (rval == QLA_SUCCESS) {
5686 		fnode_sess->flash_state = DEV_DB_PERSISTENT;
5687 		ql4_printk(KERN_INFO, ha,
5688 			   "%s: flash node %u of host %lu written to flash\n",
5689 			   __func__, fnode_sess->target_id, ha->host_no);
5690 	} else {
5691 		rval = -EIO;
5692 		ql4_printk(KERN_ERR, ha,
5693 			   "%s: Error while writing flash node %u of host %lu to flash\n",
5694 			   __func__, fnode_sess->target_id, ha->host_no);
5695 	}
5696 
5697 exit_ddb_apply:
5698 	if (fw_ddb_entry)
5699 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5700 				  fw_ddb_entry, fw_ddb_entry_dma);
5701 	return rval;
5702 }
5703 
qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry,uint16_t idx)5704 static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
5705 					   struct dev_db_entry *fw_ddb_entry,
5706 					   uint16_t idx)
5707 {
5708 	struct dev_db_entry *ddb_entry = NULL;
5709 	dma_addr_t ddb_entry_dma;
5710 	unsigned long wtime;
5711 	uint32_t mbx_sts = 0;
5712 	uint32_t state = 0, conn_err = 0;
5713 	uint16_t tmo = 0;
5714 	int ret = 0;
5715 
5716 	ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
5717 				       &ddb_entry_dma, GFP_KERNEL);
5718 	if (!ddb_entry) {
5719 		DEBUG2(ql4_printk(KERN_ERR, ha,
5720 				  "%s: Unable to allocate dma buffer\n",
5721 				  __func__));
5722 		return QLA_ERROR;
5723 	}
5724 
5725 	memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
5726 
5727 	ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
5728 	if (ret != QLA_SUCCESS) {
5729 		DEBUG2(ql4_printk(KERN_ERR, ha,
5730 				  "%s: Unable to set ddb entry for index %d\n",
5731 				  __func__, idx));
5732 		goto exit_ddb_conn_open;
5733 	}
5734 
5735 	qla4xxx_conn_open(ha, idx);
5736 
5737 	/* To ensure that sendtargets is done, wait for at least 12 secs */
5738 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
5739 	       (ha->def_timeout < LOGIN_TOV * 10) ?
5740 	       ha->def_timeout : LOGIN_TOV);
5741 
5742 	DEBUG2(ql4_printk(KERN_INFO, ha,
5743 			  "Default time to wait for login to ddb %d\n", tmo));
5744 
5745 	wtime = jiffies + (HZ * tmo);
5746 	do {
5747 		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
5748 					      NULL, &state, &conn_err, NULL,
5749 					      NULL);
5750 		if (ret == QLA_ERROR)
5751 			continue;
5752 
5753 		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
5754 		    state == DDB_DS_SESSION_FAILED)
5755 			break;
5756 
5757 		schedule_timeout_uninterruptible(HZ / 10);
5758 	} while (time_after(wtime, jiffies));
5759 
5760 exit_ddb_conn_open:
5761 	if (ddb_entry)
5762 		dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
5763 				  ddb_entry, ddb_entry_dma);
5764 	return ret;
5765 }
5766 
qla4xxx_ddb_login_st(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry)5767 static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
5768 				struct dev_db_entry *fw_ddb_entry)
5769 {
5770 	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
5771 	struct list_head list_nt;
5772 	uint16_t ddb_index;
5773 	int ret = 0;
5774 
5775 	if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
5776 		ql4_printk(KERN_WARNING, ha,
5777 			   "%s: A discovery already in progress!\n", __func__);
5778 		return QLA_ERROR;
5779 	}
5780 
5781 	INIT_LIST_HEAD(&list_nt);
5782 
5783 	set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
5784 
5785 	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
5786 	if (ret == QLA_ERROR)
5787 		goto exit_login_st_clr_bit;
5788 
5789 	ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
5790 	if (ret == QLA_ERROR)
5791 		goto exit_login_st;
5792 
5793 	qla4xxx_build_new_nt_list(ha, &list_nt);
5794 
5795 	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
5796 		list_del_init(&ddb_idx->list);
5797 		qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
5798 		vfree(ddb_idx);
5799 	}
5800 
5801 exit_login_st:
5802 	if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
5803 		ql4_printk(KERN_ERR, ha,
5804 			   "Unable to clear DDB index = 0x%x\n", ddb_index);
5805 	}
5806 
5807 	clear_bit(ddb_index, ha->ddb_idx_map);
5808 
5809 exit_login_st_clr_bit:
5810 	clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
5811 	return ret;
5812 }
5813 
qla4xxx_ddb_login_nt(struct scsi_qla_host * ha,struct dev_db_entry * fw_ddb_entry,uint16_t idx)5814 static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
5815 				struct dev_db_entry *fw_ddb_entry,
5816 				uint16_t idx)
5817 {
5818 	int ret = QLA_ERROR;
5819 
5820 	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
5821 	if (ret != QLA_SUCCESS)
5822 		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
5823 					      idx);
5824 	else
5825 		ret = -EPERM;
5826 
5827 	return ret;
5828 }
5829 
5830 /**
5831  * qla4xxx_sysfs_ddb_login - Login to the specified target
5832  * @fnode_sess: pointer to session attrs of flash ddb entry
5833  * @fnode_conn: pointer to connection attrs of flash ddb entry
5834  *
5835  * This logs in to the specified target
5836  **/
qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session * fnode_sess,struct iscsi_bus_flash_conn * fnode_conn)5837 static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
5838 				   struct iscsi_bus_flash_conn *fnode_conn)
5839 {
5840 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5841 	struct scsi_qla_host *ha = to_qla_host(shost);
5842 	struct dev_db_entry *fw_ddb_entry = NULL;
5843 	dma_addr_t fw_ddb_entry_dma;
5844 	uint32_t options = 0;
5845 	int ret = 0;
5846 
5847 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
5848 		ql4_printk(KERN_ERR, ha,
5849 			   "%s: Target info is not persistent\n", __func__);
5850 		ret = -EIO;
5851 		goto exit_ddb_login;
5852 	}
5853 
5854 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5855 					  &fw_ddb_entry_dma, GFP_KERNEL);
5856 	if (!fw_ddb_entry) {
5857 		DEBUG2(ql4_printk(KERN_ERR, ha,
5858 				  "%s: Unable to allocate dma buffer\n",
5859 				  __func__));
5860 		ret = -ENOMEM;
5861 		goto exit_ddb_login;
5862 	}
5863 
5864 	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
5865 		options |= IPV6_DEFAULT_DDB_ENTRY;
5866 
5867 	ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
5868 	if (ret == QLA_ERROR)
5869 		goto exit_ddb_login;
5870 
5871 	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
5872 	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
5873 
5874 	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
5875 		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
5876 	else
5877 		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
5878 					   fnode_sess->target_id);
5879 
5880 	if (ret > 0)
5881 		ret = -EIO;
5882 
5883 exit_ddb_login:
5884 	if (fw_ddb_entry)
5885 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5886 				  fw_ddb_entry, fw_ddb_entry_dma);
5887 	return ret;
5888 }
5889 
5890 /**
5891  * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
5892  * @cls_sess: pointer to session to be logged out
5893  *
5894  * This performs session log out from the specified target
5895  **/
qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session * cls_sess)5896 static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
5897 {
5898 	struct iscsi_session *sess;
5899 	struct ddb_entry *ddb_entry = NULL;
5900 	struct scsi_qla_host *ha;
5901 	struct dev_db_entry *fw_ddb_entry = NULL;
5902 	dma_addr_t fw_ddb_entry_dma;
5903 	unsigned long flags;
5904 	unsigned long wtime;
5905 	uint32_t ddb_state;
5906 	int options;
5907 	int ret = 0;
5908 
5909 	sess = cls_sess->dd_data;
5910 	ddb_entry = sess->dd_data;
5911 	ha = ddb_entry->ha;
5912 
5913 	if (ddb_entry->ddb_type != FLASH_DDB) {
5914 		ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
5915 			   __func__);
5916 		ret = -ENXIO;
5917 		goto exit_ddb_logout;
5918 	}
5919 
5920 	if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
5921 		ql4_printk(KERN_ERR, ha,
5922 			   "%s: Logout from boot target entry is not permitted.\n",
5923 			   __func__);
5924 		ret = -EPERM;
5925 		goto exit_ddb_logout;
5926 	}
5927 
5928 	options = LOGOUT_OPTION_CLOSE_SESSION;
5929 	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
5930 		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
5931 		ret = -EIO;
5932 		goto exit_ddb_logout;
5933 	}
5934 
5935 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5936 					  &fw_ddb_entry_dma, GFP_KERNEL);
5937 	if (!fw_ddb_entry) {
5938 		ql4_printk(KERN_ERR, ha,
5939 			   "%s: Unable to allocate dma buffer\n", __func__);
5940 		ret = -ENOMEM;
5941 		goto exit_ddb_logout;
5942 	}
5943 
5944 	wtime = jiffies + (HZ * LOGOUT_TOV);
5945 	do {
5946 		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
5947 					      fw_ddb_entry, fw_ddb_entry_dma,
5948 					      NULL, NULL, &ddb_state, NULL,
5949 					      NULL, NULL);
5950 		if (ret == QLA_ERROR)
5951 			goto ddb_logout_clr_sess;
5952 
5953 		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
5954 		    (ddb_state == DDB_DS_SESSION_FAILED))
5955 			goto ddb_logout_clr_sess;
5956 
5957 		schedule_timeout_uninterruptible(HZ);
5958 	} while ((time_after(wtime, jiffies)));
5959 
5960 ddb_logout_clr_sess:
5961 	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
5962 	/*
5963 	 * we have decremented the reference count of the driver
5964 	 * when we setup the session to have the driver unload
5965 	 * to be seamless without actually destroying the
5966 	 * session
5967 	 **/
5968 	try_module_get(qla4xxx_iscsi_transport.owner);
5969 	iscsi_destroy_endpoint(ddb_entry->conn->ep);
5970 
5971 	spin_lock_irqsave(&ha->hardware_lock, flags);
5972 	qla4xxx_free_ddb(ha, ddb_entry);
5973 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
5974 
5975 	iscsi_session_teardown(ddb_entry->sess);
5976 
5977 	ret = QLA_SUCCESS;
5978 
5979 exit_ddb_logout:
5980 	if (fw_ddb_entry)
5981 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
5982 				  fw_ddb_entry, fw_ddb_entry_dma);
5983 	return ret;
5984 }
5985 
5986 /**
5987  * qla4xxx_sysfs_ddb_logout - Logout from the specified target
5988  * @fnode_sess: pointer to session attrs of flash ddb entry
5989  * @fnode_conn: pointer to connection attrs of flash ddb entry
5990  *
5991  * This performs log out from the specified target
5992  **/
qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session * fnode_sess,struct iscsi_bus_flash_conn * fnode_conn)5993 static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
5994 				    struct iscsi_bus_flash_conn *fnode_conn)
5995 {
5996 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
5997 	struct scsi_qla_host *ha = to_qla_host(shost);
5998 	struct ql4_tuple_ddb *flash_tddb = NULL;
5999 	struct ql4_tuple_ddb *tmp_tddb = NULL;
6000 	struct dev_db_entry *fw_ddb_entry = NULL;
6001 	struct ddb_entry *ddb_entry = NULL;
6002 	dma_addr_t fw_ddb_dma;
6003 	uint32_t next_idx = 0;
6004 	uint32_t state = 0, conn_err = 0;
6005 	uint16_t conn_id = 0;
6006 	int idx, index;
6007 	int status, ret = 0;
6008 
6009 	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
6010 				      &fw_ddb_dma);
6011 	if (fw_ddb_entry == NULL) {
6012 		ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
6013 		ret = -ENOMEM;
6014 		goto exit_ddb_logout;
6015 	}
6016 
6017 	flash_tddb = vzalloc(sizeof(*flash_tddb));
6018 	if (!flash_tddb) {
6019 		ql4_printk(KERN_WARNING, ha,
6020 			   "%s:Memory Allocation failed.\n", __func__);
6021 		ret = -ENOMEM;
6022 		goto exit_ddb_logout;
6023 	}
6024 
6025 	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
6026 	if (!tmp_tddb) {
6027 		ql4_printk(KERN_WARNING, ha,
6028 			   "%s:Memory Allocation failed.\n", __func__);
6029 		ret = -ENOMEM;
6030 		goto exit_ddb_logout;
6031 	}
6032 
6033 	if (!fnode_sess->targetname) {
6034 		ql4_printk(KERN_ERR, ha,
6035 			   "%s:Cannot logout from SendTarget entry\n",
6036 			   __func__);
6037 		ret = -EPERM;
6038 		goto exit_ddb_logout;
6039 	}
6040 
6041 	if (fnode_sess->is_boot_target) {
6042 		ql4_printk(KERN_ERR, ha,
6043 			   "%s: Logout from boot target entry is not permitted.\n",
6044 			   __func__);
6045 		ret = -EPERM;
6046 		goto exit_ddb_logout;
6047 	}
6048 
6049 	strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
6050 		ISCSI_NAME_SIZE);
6051 
6052 	if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6053 		sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
6054 	else
6055 		sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
6056 
6057 	flash_tddb->tpgt = fnode_sess->tpgt;
6058 	flash_tddb->port = fnode_conn->port;
6059 
6060 	COPY_ISID(flash_tddb->isid, fnode_sess->isid);
6061 
6062 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
6063 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6064 		if (ddb_entry == NULL)
6065 			continue;
6066 
6067 		if (ddb_entry->ddb_type != FLASH_DDB)
6068 			continue;
6069 
6070 		index = ddb_entry->sess->target_id;
6071 		status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
6072 						 fw_ddb_dma, NULL, &next_idx,
6073 						 &state, &conn_err, NULL,
6074 						 &conn_id);
6075 		if (status == QLA_ERROR) {
6076 			ret = -ENOMEM;
6077 			break;
6078 		}
6079 
6080 		qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
6081 
6082 		status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
6083 						   true);
6084 		if (status == QLA_SUCCESS) {
6085 			ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
6086 			break;
6087 		}
6088 	}
6089 
6090 	if (idx == MAX_DDB_ENTRIES)
6091 		ret = -ESRCH;
6092 
6093 exit_ddb_logout:
6094 	if (flash_tddb)
6095 		vfree(flash_tddb);
6096 	if (tmp_tddb)
6097 		vfree(tmp_tddb);
6098 	if (fw_ddb_entry)
6099 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
6100 
6101 	return ret;
6102 }
6103 
6104 static int
qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session * fnode_sess,int param,char * buf)6105 qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
6106 			    int param, char *buf)
6107 {
6108 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6109 	struct scsi_qla_host *ha = to_qla_host(shost);
6110 	struct iscsi_bus_flash_conn *fnode_conn;
6111 	struct ql4_chap_table chap_tbl;
6112 	struct device *dev;
6113 	int parent_type, parent_index = 0xffff;
6114 	int rc = 0;
6115 
6116 	dev = iscsi_find_flashnode_conn(fnode_sess);
6117 	if (!dev)
6118 		return -EIO;
6119 
6120 	fnode_conn = iscsi_dev_to_flash_conn(dev);
6121 
6122 	switch (param) {
6123 	case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6124 		rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
6125 		break;
6126 	case ISCSI_FLASHNODE_PORTAL_TYPE:
6127 		rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
6128 		break;
6129 	case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6130 		rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
6131 		break;
6132 	case ISCSI_FLASHNODE_DISCOVERY_SESS:
6133 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
6134 		break;
6135 	case ISCSI_FLASHNODE_ENTRY_EN:
6136 		rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
6137 		break;
6138 	case ISCSI_FLASHNODE_HDR_DGST_EN:
6139 		rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
6140 		break;
6141 	case ISCSI_FLASHNODE_DATA_DGST_EN:
6142 		rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
6143 		break;
6144 	case ISCSI_FLASHNODE_IMM_DATA_EN:
6145 		rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
6146 		break;
6147 	case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6148 		rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
6149 		break;
6150 	case ISCSI_FLASHNODE_DATASEQ_INORDER:
6151 		rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
6152 		break;
6153 	case ISCSI_FLASHNODE_PDU_INORDER:
6154 		rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
6155 		break;
6156 	case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6157 		rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
6158 		break;
6159 	case ISCSI_FLASHNODE_SNACK_REQ_EN:
6160 		rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
6161 		break;
6162 	case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6163 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
6164 		break;
6165 	case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6166 		rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
6167 		break;
6168 	case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6169 		rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
6170 		break;
6171 	case ISCSI_FLASHNODE_ERL:
6172 		rc = sprintf(buf, "%u\n", fnode_sess->erl);
6173 		break;
6174 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6175 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
6176 		break;
6177 	case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6178 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
6179 		break;
6180 	case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6181 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
6182 		break;
6183 	case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6184 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
6185 		break;
6186 	case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6187 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
6188 		break;
6189 	case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6190 		rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
6191 		break;
6192 	case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6193 		rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
6194 		break;
6195 	case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6196 		rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
6197 		break;
6198 	case ISCSI_FLASHNODE_FIRST_BURST:
6199 		rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
6200 		break;
6201 	case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6202 		rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
6203 		break;
6204 	case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6205 		rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
6206 		break;
6207 	case ISCSI_FLASHNODE_MAX_R2T:
6208 		rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
6209 		break;
6210 	case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6211 		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
6212 		break;
6213 	case ISCSI_FLASHNODE_ISID:
6214 		rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
6215 			     fnode_sess->isid[0], fnode_sess->isid[1],
6216 			     fnode_sess->isid[2], fnode_sess->isid[3],
6217 			     fnode_sess->isid[4], fnode_sess->isid[5]);
6218 		break;
6219 	case ISCSI_FLASHNODE_TSID:
6220 		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
6221 		break;
6222 	case ISCSI_FLASHNODE_PORT:
6223 		rc = sprintf(buf, "%d\n", fnode_conn->port);
6224 		break;
6225 	case ISCSI_FLASHNODE_MAX_BURST:
6226 		rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
6227 		break;
6228 	case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6229 		rc = sprintf(buf, "%u\n",
6230 			     fnode_sess->default_taskmgmt_timeout);
6231 		break;
6232 	case ISCSI_FLASHNODE_IPADDR:
6233 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6234 			rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
6235 		else
6236 			rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
6237 		break;
6238 	case ISCSI_FLASHNODE_ALIAS:
6239 		if (fnode_sess->targetalias)
6240 			rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
6241 		else
6242 			rc = sprintf(buf, "\n");
6243 		break;
6244 	case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6245 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6246 			rc = sprintf(buf, "%pI6\n",
6247 				     fnode_conn->redirect_ipaddr);
6248 		else
6249 			rc = sprintf(buf, "%pI4\n",
6250 				     fnode_conn->redirect_ipaddr);
6251 		break;
6252 	case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6253 		rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
6254 		break;
6255 	case ISCSI_FLASHNODE_LOCAL_PORT:
6256 		rc = sprintf(buf, "%u\n", fnode_conn->local_port);
6257 		break;
6258 	case ISCSI_FLASHNODE_IPV4_TOS:
6259 		rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
6260 		break;
6261 	case ISCSI_FLASHNODE_IPV6_TC:
6262 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6263 			rc = sprintf(buf, "%u\n",
6264 				     fnode_conn->ipv6_traffic_class);
6265 		else
6266 			rc = sprintf(buf, "\n");
6267 		break;
6268 	case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6269 		rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
6270 		break;
6271 	case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6272 		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
6273 			rc = sprintf(buf, "%pI6\n",
6274 				     fnode_conn->link_local_ipv6_addr);
6275 		else
6276 			rc = sprintf(buf, "\n");
6277 		break;
6278 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
6279 		if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
6280 			parent_index = fnode_sess->discovery_parent_idx;
6281 
6282 		rc = sprintf(buf, "%u\n", parent_index);
6283 		break;
6284 	case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6285 		if (fnode_sess->discovery_parent_type == DDB_ISNS)
6286 			parent_type = ISCSI_DISC_PARENT_ISNS;
6287 		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
6288 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6289 		else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
6290 			parent_type = ISCSI_DISC_PARENT_SENDTGT;
6291 		else
6292 			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
6293 
6294 		rc = sprintf(buf, "%s\n",
6295 			     iscsi_get_discovery_parent_name(parent_type));
6296 		break;
6297 	case ISCSI_FLASHNODE_NAME:
6298 		if (fnode_sess->targetname)
6299 			rc = sprintf(buf, "%s\n", fnode_sess->targetname);
6300 		else
6301 			rc = sprintf(buf, "\n");
6302 		break;
6303 	case ISCSI_FLASHNODE_TPGT:
6304 		rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
6305 		break;
6306 	case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6307 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
6308 		break;
6309 	case ISCSI_FLASHNODE_TCP_RECV_WSF:
6310 		rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
6311 		break;
6312 	case ISCSI_FLASHNODE_CHAP_OUT_IDX:
6313 		rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
6314 		break;
6315 	case ISCSI_FLASHNODE_USERNAME:
6316 		if (fnode_sess->chap_auth_en) {
6317 			qla4xxx_get_uni_chap_at_index(ha,
6318 						      chap_tbl.name,
6319 						      chap_tbl.secret,
6320 						      fnode_sess->chap_out_idx);
6321 			rc = sprintf(buf, "%s\n", chap_tbl.name);
6322 		} else {
6323 			rc = sprintf(buf, "\n");
6324 		}
6325 		break;
6326 	case ISCSI_FLASHNODE_PASSWORD:
6327 		if (fnode_sess->chap_auth_en) {
6328 			qla4xxx_get_uni_chap_at_index(ha,
6329 						      chap_tbl.name,
6330 						      chap_tbl.secret,
6331 						      fnode_sess->chap_out_idx);
6332 			rc = sprintf(buf, "%s\n", chap_tbl.secret);
6333 		} else {
6334 			rc = sprintf(buf, "\n");
6335 		}
6336 		break;
6337 	case ISCSI_FLASHNODE_STATSN:
6338 		rc = sprintf(buf, "%u\n", fnode_conn->statsn);
6339 		break;
6340 	case ISCSI_FLASHNODE_EXP_STATSN:
6341 		rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
6342 		break;
6343 	case ISCSI_FLASHNODE_IS_BOOT_TGT:
6344 		rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
6345 		break;
6346 	default:
6347 		rc = -ENOSYS;
6348 		break;
6349 	}
6350 
6351 	put_device(dev);
6352 	return rc;
6353 }
6354 
6355 /**
6356  * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
6357  * @fnode_sess: pointer to session attrs of flash ddb entry
6358  * @fnode_conn: pointer to connection attrs of flash ddb entry
6359  * @data: Parameters and their values to update
6360  * @len: len of data
6361  *
6362  * This sets the parameter of flash ddb entry and writes them to flash
6363  **/
6364 static int
qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session * fnode_sess,struct iscsi_bus_flash_conn * fnode_conn,void * data,int len)6365 qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
6366 			    struct iscsi_bus_flash_conn *fnode_conn,
6367 			    void *data, int len)
6368 {
6369 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6370 	struct scsi_qla_host *ha = to_qla_host(shost);
6371 	struct iscsi_flashnode_param_info *fnode_param;
6372 	struct nlattr *attr;
6373 	int rc = QLA_ERROR;
6374 	uint32_t rem = len;
6375 
6376 	nla_for_each_attr(attr, data, len, rem) {
6377 		fnode_param = nla_data(attr);
6378 
6379 		switch (fnode_param->param) {
6380 		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
6381 			fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
6382 			break;
6383 		case ISCSI_FLASHNODE_PORTAL_TYPE:
6384 			memcpy(fnode_sess->portal_type, fnode_param->value,
6385 			       strlen(fnode_sess->portal_type));
6386 			break;
6387 		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
6388 			fnode_sess->auto_snd_tgt_disable =
6389 							fnode_param->value[0];
6390 			break;
6391 		case ISCSI_FLASHNODE_DISCOVERY_SESS:
6392 			fnode_sess->discovery_sess = fnode_param->value[0];
6393 			break;
6394 		case ISCSI_FLASHNODE_ENTRY_EN:
6395 			fnode_sess->entry_state = fnode_param->value[0];
6396 			break;
6397 		case ISCSI_FLASHNODE_HDR_DGST_EN:
6398 			fnode_conn->hdrdgst_en = fnode_param->value[0];
6399 			break;
6400 		case ISCSI_FLASHNODE_DATA_DGST_EN:
6401 			fnode_conn->datadgst_en = fnode_param->value[0];
6402 			break;
6403 		case ISCSI_FLASHNODE_IMM_DATA_EN:
6404 			fnode_sess->imm_data_en = fnode_param->value[0];
6405 			break;
6406 		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
6407 			fnode_sess->initial_r2t_en = fnode_param->value[0];
6408 			break;
6409 		case ISCSI_FLASHNODE_DATASEQ_INORDER:
6410 			fnode_sess->dataseq_inorder_en = fnode_param->value[0];
6411 			break;
6412 		case ISCSI_FLASHNODE_PDU_INORDER:
6413 			fnode_sess->pdu_inorder_en = fnode_param->value[0];
6414 			break;
6415 		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
6416 			fnode_sess->chap_auth_en = fnode_param->value[0];
6417 			break;
6418 		case ISCSI_FLASHNODE_SNACK_REQ_EN:
6419 			fnode_conn->snack_req_en = fnode_param->value[0];
6420 			break;
6421 		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
6422 			fnode_sess->discovery_logout_en = fnode_param->value[0];
6423 			break;
6424 		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
6425 			fnode_sess->bidi_chap_en = fnode_param->value[0];
6426 			break;
6427 		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
6428 			fnode_sess->discovery_auth_optional =
6429 							fnode_param->value[0];
6430 			break;
6431 		case ISCSI_FLASHNODE_ERL:
6432 			fnode_sess->erl = fnode_param->value[0];
6433 			break;
6434 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
6435 			fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
6436 			break;
6437 		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
6438 			fnode_conn->tcp_nagle_disable = fnode_param->value[0];
6439 			break;
6440 		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
6441 			fnode_conn->tcp_wsf_disable = fnode_param->value[0];
6442 			break;
6443 		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
6444 			fnode_conn->tcp_timer_scale = fnode_param->value[0];
6445 			break;
6446 		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
6447 			fnode_conn->tcp_timestamp_en = fnode_param->value[0];
6448 			break;
6449 		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
6450 			fnode_conn->fragment_disable = fnode_param->value[0];
6451 			break;
6452 		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
6453 			fnode_conn->max_recv_dlength =
6454 					*(unsigned *)fnode_param->value;
6455 			break;
6456 		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
6457 			fnode_conn->max_xmit_dlength =
6458 					*(unsigned *)fnode_param->value;
6459 			break;
6460 		case ISCSI_FLASHNODE_FIRST_BURST:
6461 			fnode_sess->first_burst =
6462 					*(unsigned *)fnode_param->value;
6463 			break;
6464 		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
6465 			fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
6466 			break;
6467 		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
6468 			fnode_sess->time2retain =
6469 						*(uint16_t *)fnode_param->value;
6470 			break;
6471 		case ISCSI_FLASHNODE_MAX_R2T:
6472 			fnode_sess->max_r2t =
6473 					*(uint16_t *)fnode_param->value;
6474 			break;
6475 		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
6476 			fnode_conn->keepalive_timeout =
6477 				*(uint16_t *)fnode_param->value;
6478 			break;
6479 		case ISCSI_FLASHNODE_ISID:
6480 			memcpy(fnode_sess->isid, fnode_param->value,
6481 			       sizeof(fnode_sess->isid));
6482 			break;
6483 		case ISCSI_FLASHNODE_TSID:
6484 			fnode_sess->tsid = *(uint16_t *)fnode_param->value;
6485 			break;
6486 		case ISCSI_FLASHNODE_PORT:
6487 			fnode_conn->port = *(uint16_t *)fnode_param->value;
6488 			break;
6489 		case ISCSI_FLASHNODE_MAX_BURST:
6490 			fnode_sess->max_burst = *(unsigned *)fnode_param->value;
6491 			break;
6492 		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
6493 			fnode_sess->default_taskmgmt_timeout =
6494 						*(uint16_t *)fnode_param->value;
6495 			break;
6496 		case ISCSI_FLASHNODE_IPADDR:
6497 			memcpy(fnode_conn->ipaddress, fnode_param->value,
6498 			       IPv6_ADDR_LEN);
6499 			break;
6500 		case ISCSI_FLASHNODE_ALIAS:
6501 			rc = iscsi_switch_str_param(&fnode_sess->targetalias,
6502 						    (char *)fnode_param->value);
6503 			break;
6504 		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
6505 			memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
6506 			       IPv6_ADDR_LEN);
6507 			break;
6508 		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
6509 			fnode_conn->max_segment_size =
6510 					*(unsigned *)fnode_param->value;
6511 			break;
6512 		case ISCSI_FLASHNODE_LOCAL_PORT:
6513 			fnode_conn->local_port =
6514 						*(uint16_t *)fnode_param->value;
6515 			break;
6516 		case ISCSI_FLASHNODE_IPV4_TOS:
6517 			fnode_conn->ipv4_tos = fnode_param->value[0];
6518 			break;
6519 		case ISCSI_FLASHNODE_IPV6_TC:
6520 			fnode_conn->ipv6_traffic_class = fnode_param->value[0];
6521 			break;
6522 		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
6523 			fnode_conn->ipv6_flow_label = fnode_param->value[0];
6524 			break;
6525 		case ISCSI_FLASHNODE_NAME:
6526 			rc = iscsi_switch_str_param(&fnode_sess->targetname,
6527 						    (char *)fnode_param->value);
6528 			break;
6529 		case ISCSI_FLASHNODE_TPGT:
6530 			fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
6531 			break;
6532 		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
6533 			memcpy(fnode_conn->link_local_ipv6_addr,
6534 			       fnode_param->value, IPv6_ADDR_LEN);
6535 			break;
6536 		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
6537 			fnode_sess->discovery_parent_type =
6538 						*(uint16_t *)fnode_param->value;
6539 			break;
6540 		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
6541 			fnode_conn->tcp_xmit_wsf =
6542 						*(uint8_t *)fnode_param->value;
6543 			break;
6544 		case ISCSI_FLASHNODE_TCP_RECV_WSF:
6545 			fnode_conn->tcp_recv_wsf =
6546 						*(uint8_t *)fnode_param->value;
6547 			break;
6548 		case ISCSI_FLASHNODE_STATSN:
6549 			fnode_conn->statsn = *(uint32_t *)fnode_param->value;
6550 			break;
6551 		case ISCSI_FLASHNODE_EXP_STATSN:
6552 			fnode_conn->exp_statsn =
6553 						*(uint32_t *)fnode_param->value;
6554 			break;
6555 		default:
6556 			ql4_printk(KERN_ERR, ha,
6557 				   "%s: No such sysfs attribute\n", __func__);
6558 			rc = -ENOSYS;
6559 			goto exit_set_param;
6560 		}
6561 	}
6562 
6563 	rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
6564 
6565 exit_set_param:
6566 	return rc;
6567 }
6568 
6569 /**
6570  * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
6571  * @fnode_sess: pointer to session attrs of flash ddb entry
6572  *
6573  * This invalidates the flash ddb entry at the given index
6574  **/
qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session * fnode_sess)6575 static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
6576 {
6577 	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
6578 	struct scsi_qla_host *ha = to_qla_host(shost);
6579 	uint32_t dev_db_start_offset;
6580 	uint32_t dev_db_end_offset;
6581 	struct dev_db_entry *fw_ddb_entry = NULL;
6582 	dma_addr_t fw_ddb_entry_dma;
6583 	uint16_t *ddb_cookie = NULL;
6584 	size_t ddb_size = 0;
6585 	void *pddb = NULL;
6586 	int target_id;
6587 	int rc = 0;
6588 
6589 	if (fnode_sess->is_boot_target) {
6590 		rc = -EPERM;
6591 		DEBUG2(ql4_printk(KERN_ERR, ha,
6592 				  "%s: Deletion of boot target entry is not permitted.\n",
6593 				  __func__));
6594 		goto exit_ddb_del;
6595 	}
6596 
6597 	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
6598 		goto sysfs_ddb_del;
6599 
6600 	if (is_qla40XX(ha)) {
6601 		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
6602 		dev_db_end_offset = FLASH_OFFSET_DB_END;
6603 		dev_db_start_offset += (fnode_sess->target_id *
6604 				       sizeof(*fw_ddb_entry));
6605 		ddb_size = sizeof(*fw_ddb_entry);
6606 	} else {
6607 		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
6608 				      (ha->hw.flt_region_ddb << 2);
6609 		/* flt_ddb_size is DDB table size for both ports
6610 		 * so divide it by 2 to calculate the offset for second port
6611 		 */
6612 		if (ha->port_num == 1)
6613 			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
6614 
6615 		dev_db_end_offset = dev_db_start_offset +
6616 				    (ha->hw.flt_ddb_size / 2);
6617 
6618 		dev_db_start_offset += (fnode_sess->target_id *
6619 				       sizeof(*fw_ddb_entry));
6620 		dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
6621 
6622 		ddb_size = sizeof(*ddb_cookie);
6623 	}
6624 
6625 	DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
6626 			  __func__, dev_db_start_offset, dev_db_end_offset));
6627 
6628 	if (dev_db_start_offset > dev_db_end_offset) {
6629 		rc = -EIO;
6630 		DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
6631 				  __func__, fnode_sess->target_id));
6632 		goto exit_ddb_del;
6633 	}
6634 
6635 	pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
6636 				  &fw_ddb_entry_dma, GFP_KERNEL);
6637 	if (!pddb) {
6638 		rc = -ENOMEM;
6639 		DEBUG2(ql4_printk(KERN_ERR, ha,
6640 				  "%s: Unable to allocate dma buffer\n",
6641 				  __func__));
6642 		goto exit_ddb_del;
6643 	}
6644 
6645 	if (is_qla40XX(ha)) {
6646 		fw_ddb_entry = pddb;
6647 		memset(fw_ddb_entry, 0, ddb_size);
6648 		ddb_cookie = &fw_ddb_entry->cookie;
6649 	} else {
6650 		ddb_cookie = pddb;
6651 	}
6652 
6653 	/* invalidate the cookie */
6654 	*ddb_cookie = 0xFFEE;
6655 	qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
6656 			  ddb_size, FLASH_OPT_RMW_COMMIT);
6657 
6658 sysfs_ddb_del:
6659 	target_id = fnode_sess->target_id;
6660 	iscsi_destroy_flashnode_sess(fnode_sess);
6661 	ql4_printk(KERN_INFO, ha,
6662 		   "%s: session and conn entries for flashnode %u of host %lu deleted\n",
6663 		   __func__, target_id, ha->host_no);
6664 exit_ddb_del:
6665 	if (pddb)
6666 		dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
6667 				  fw_ddb_entry_dma);
6668 	return rc;
6669 }
6670 
6671 /**
6672  * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
6673  * @ha: pointer to adapter structure
6674  *
6675  * Export the firmware DDB for all send targets and normal targets to sysfs.
6676  **/
qla4xxx_sysfs_ddb_export(struct scsi_qla_host * ha)6677 static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
6678 {
6679 	struct dev_db_entry *fw_ddb_entry = NULL;
6680 	dma_addr_t fw_ddb_entry_dma;
6681 	uint16_t max_ddbs;
6682 	uint16_t idx = 0;
6683 	int ret = QLA_SUCCESS;
6684 
6685 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
6686 					  sizeof(*fw_ddb_entry),
6687 					  &fw_ddb_entry_dma, GFP_KERNEL);
6688 	if (!fw_ddb_entry) {
6689 		DEBUG2(ql4_printk(KERN_ERR, ha,
6690 				  "%s: Unable to allocate dma buffer\n",
6691 				  __func__));
6692 		return -ENOMEM;
6693 	}
6694 
6695 	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
6696 				     MAX_DEV_DB_ENTRIES;
6697 
6698 	for (idx = 0; idx < max_ddbs; idx++) {
6699 		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
6700 					     idx))
6701 			continue;
6702 
6703 		ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
6704 		if (ret) {
6705 			ret = -EIO;
6706 			break;
6707 		}
6708 	}
6709 
6710 	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
6711 			  fw_ddb_entry_dma);
6712 
6713 	return ret;
6714 }
6715 
qla4xxx_sysfs_ddb_remove(struct scsi_qla_host * ha)6716 static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
6717 {
6718 	iscsi_destroy_all_flashnode(ha->host);
6719 }
6720 
6721 /**
6722  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
6723  * @ha: pointer to adapter structure
6724  * @is_reset: Is this init path or reset path
6725  *
6726  * Create a list of sendtargets (st) from firmware DDBs, issue send targets
6727  * using connection open, then create the list of normal targets (nt)
6728  * from firmware DDBs. Based on the list of nt setup session and connection
6729  * objects.
6730  **/
qla4xxx_build_ddb_list(struct scsi_qla_host * ha,int is_reset)6731 void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
6732 {
6733 	uint16_t tmo = 0;
6734 	struct list_head list_st, list_nt;
6735 	struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
6736 	unsigned long wtime;
6737 
6738 	if (!test_bit(AF_LINK_UP, &ha->flags)) {
6739 		set_bit(AF_BUILD_DDB_LIST, &ha->flags);
6740 		ha->is_reset = is_reset;
6741 		return;
6742 	}
6743 
6744 	INIT_LIST_HEAD(&list_st);
6745 	INIT_LIST_HEAD(&list_nt);
6746 
6747 	qla4xxx_build_st_list(ha, &list_st);
6748 
6749 	/* Before issuing conn open mbox, ensure all IPs states are configured
6750 	 * Note, conn open fails if IPs are not configured
6751 	 */
6752 	qla4xxx_wait_for_ip_configuration(ha);
6753 
6754 	/* Go thru the STs and fire the sendtargets by issuing conn open mbx */
6755 	list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
6756 		qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
6757 	}
6758 
6759 	/* Wait to ensure all sendtargets are done for min 12 sec wait */
6760 	tmo = ((ha->def_timeout > LOGIN_TOV) &&
6761 	       (ha->def_timeout < LOGIN_TOV * 10) ?
6762 	       ha->def_timeout : LOGIN_TOV);
6763 
6764 	DEBUG2(ql4_printk(KERN_INFO, ha,
6765 			  "Default time to wait for build ddb %d\n", tmo));
6766 
6767 	wtime = jiffies + (HZ * tmo);
6768 	do {
6769 		if (list_empty(&list_st))
6770 			break;
6771 
6772 		qla4xxx_remove_failed_ddb(ha, &list_st);
6773 		schedule_timeout_uninterruptible(HZ / 10);
6774 	} while (time_after(wtime, jiffies));
6775 
6776 	/* Free up the sendtargets list */
6777 	qla4xxx_free_ddb_list(&list_st);
6778 
6779 	qla4xxx_build_nt_list(ha, &list_nt, is_reset);
6780 
6781 	qla4xxx_free_ddb_list(&list_nt);
6782 
6783 	qla4xxx_free_ddb_index(ha);
6784 }
6785 
6786 /**
6787  * qla4xxx_wait_login_resp_boot_tgt -  Wait for iSCSI boot target login
6788  * response.
6789  * @ha: pointer to adapter structure
6790  *
6791  * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
6792  * set in DDB and we will wait for login response of boot targets during
6793  * probe.
6794  **/
qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host * ha)6795 static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
6796 {
6797 	struct ddb_entry *ddb_entry;
6798 	struct dev_db_entry *fw_ddb_entry = NULL;
6799 	dma_addr_t fw_ddb_entry_dma;
6800 	unsigned long wtime;
6801 	uint32_t ddb_state;
6802 	int max_ddbs, idx, ret;
6803 
6804 	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
6805 				     MAX_DEV_DB_ENTRIES;
6806 
6807 	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6808 					  &fw_ddb_entry_dma, GFP_KERNEL);
6809 	if (!fw_ddb_entry) {
6810 		ql4_printk(KERN_ERR, ha,
6811 			   "%s: Unable to allocate dma buffer\n", __func__);
6812 		goto exit_login_resp;
6813 	}
6814 
6815 	wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
6816 
6817 	for (idx = 0; idx < max_ddbs; idx++) {
6818 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
6819 		if (ddb_entry == NULL)
6820 			continue;
6821 
6822 		if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
6823 			DEBUG2(ql4_printk(KERN_INFO, ha,
6824 					  "%s: DDB index [%d]\n", __func__,
6825 					  ddb_entry->fw_ddb_index));
6826 			do {
6827 				ret = qla4xxx_get_fwddb_entry(ha,
6828 						ddb_entry->fw_ddb_index,
6829 						fw_ddb_entry, fw_ddb_entry_dma,
6830 						NULL, NULL, &ddb_state, NULL,
6831 						NULL, NULL);
6832 				if (ret == QLA_ERROR)
6833 					goto exit_login_resp;
6834 
6835 				if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
6836 				    (ddb_state == DDB_DS_SESSION_FAILED))
6837 					break;
6838 
6839 				schedule_timeout_uninterruptible(HZ);
6840 
6841 			} while ((time_after(wtime, jiffies)));
6842 
6843 			if (!time_after(wtime, jiffies)) {
6844 				DEBUG2(ql4_printk(KERN_INFO, ha,
6845 						  "%s: Login response wait timer expired\n",
6846 						  __func__));
6847 				 goto exit_login_resp;
6848 			}
6849 		}
6850 	}
6851 
6852 exit_login_resp:
6853 	if (fw_ddb_entry)
6854 		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
6855 				  fw_ddb_entry, fw_ddb_entry_dma);
6856 }
6857 
6858 /**
6859  * qla4xxx_probe_adapter - callback function to probe HBA
6860  * @pdev: pointer to pci_dev structure
6861  * @pci_device_id: pointer to pci_device entry
6862  *
6863  * This routine will probe for Qlogic 4xxx iSCSI host adapters.
6864  * It returns zero if successful. It also initializes all data necessary for
6865  * the driver.
6866  **/
qla4xxx_probe_adapter(struct pci_dev * pdev,const struct pci_device_id * ent)6867 static int qla4xxx_probe_adapter(struct pci_dev *pdev,
6868 				 const struct pci_device_id *ent)
6869 {
6870 	int ret = -ENODEV, status;
6871 	struct Scsi_Host *host;
6872 	struct scsi_qla_host *ha;
6873 	uint8_t init_retry_count = 0;
6874 	char buf[34];
6875 	struct qla4_8xxx_legacy_intr_set *nx_legacy_intr;
6876 	uint32_t dev_state;
6877 
6878 	if (pci_enable_device(pdev))
6879 		return -1;
6880 
6881 	host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
6882 	if (host == NULL) {
6883 		printk(KERN_WARNING
6884 		       "qla4xxx: Couldn't allocate host from scsi layer!\n");
6885 		goto probe_disable_device;
6886 	}
6887 
6888 	/* Clear our data area */
6889 	ha = to_qla_host(host);
6890 	memset(ha, 0, sizeof(*ha));
6891 
6892 	/* Save the information from PCI BIOS.	*/
6893 	ha->pdev = pdev;
6894 	ha->host = host;
6895 	ha->host_no = host->host_no;
6896 	ha->func_num = PCI_FUNC(ha->pdev->devfn);
6897 
6898 	pci_enable_pcie_error_reporting(pdev);
6899 
6900 	/* Setup Runtime configurable options */
6901 	if (is_qla8022(ha)) {
6902 		ha->isp_ops = &qla4_82xx_isp_ops;
6903 		ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
6904 		ha->qdr_sn_window = -1;
6905 		ha->ddr_mn_window = -1;
6906 		ha->curr_window = 255;
6907 		nx_legacy_intr = &legacy_intr[ha->func_num];
6908 		ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
6909 		ha->nx_legacy_intr.tgt_status_reg =
6910 			nx_legacy_intr->tgt_status_reg;
6911 		ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
6912 		ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
6913 	} else if (is_qla8032(ha)) {
6914 		ha->isp_ops = &qla4_83xx_isp_ops;
6915 		ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
6916 	} else {
6917 		ha->isp_ops = &qla4xxx_isp_ops;
6918 	}
6919 
6920 	if (is_qla80XX(ha)) {
6921 		rwlock_init(&ha->hw_lock);
6922 		ha->pf_bit = ha->func_num << 16;
6923 		/* Set EEH reset type to fundamental if required by hba */
6924 		pdev->needs_freset = 1;
6925 	}
6926 
6927 	/* Configure PCI I/O space. */
6928 	ret = ha->isp_ops->iospace_config(ha);
6929 	if (ret)
6930 		goto probe_failed_ioconfig;
6931 
6932 	ql4_printk(KERN_INFO, ha, "Found an ISP%04x, irq %d, iobase 0x%p\n",
6933 		   pdev->device, pdev->irq, ha->reg);
6934 
6935 	qla4xxx_config_dma_addressing(ha);
6936 
6937 	/* Initialize lists and spinlocks. */
6938 	INIT_LIST_HEAD(&ha->free_srb_q);
6939 
6940 	mutex_init(&ha->mbox_sem);
6941 	mutex_init(&ha->chap_sem);
6942 	init_completion(&ha->mbx_intr_comp);
6943 	init_completion(&ha->disable_acb_comp);
6944 
6945 	spin_lock_init(&ha->hardware_lock);
6946 	spin_lock_init(&ha->work_lock);
6947 
6948 	/* Initialize work list */
6949 	INIT_LIST_HEAD(&ha->work_list);
6950 
6951 	/* Allocate dma buffers */
6952 	if (qla4xxx_mem_alloc(ha)) {
6953 		ql4_printk(KERN_WARNING, ha,
6954 		    "[ERROR] Failed to allocate memory for adapter\n");
6955 
6956 		ret = -ENOMEM;
6957 		goto probe_failed;
6958 	}
6959 
6960 	host->cmd_per_lun = 3;
6961 	host->max_channel = 0;
6962 	host->max_lun = MAX_LUNS - 1;
6963 	host->max_id = MAX_TARGETS;
6964 	host->max_cmd_len = IOCB_MAX_CDB_LEN;
6965 	host->can_queue = MAX_SRBS ;
6966 	host->transportt = qla4xxx_scsi_transport;
6967 
6968 	ret = scsi_init_shared_tag_map(host, MAX_SRBS);
6969 	if (ret) {
6970 		ql4_printk(KERN_WARNING, ha,
6971 			   "%s: scsi_init_shared_tag_map failed\n", __func__);
6972 		goto probe_failed;
6973 	}
6974 
6975 	pci_set_drvdata(pdev, ha);
6976 
6977 	ret = scsi_add_host(host, &pdev->dev);
6978 	if (ret)
6979 		goto probe_failed;
6980 
6981 	if (is_qla80XX(ha))
6982 		qla4_8xxx_get_flash_info(ha);
6983 
6984 	if (is_qla8032(ha)) {
6985 		qla4_83xx_read_reset_template(ha);
6986 		/*
6987 		 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
6988 		 * If DONRESET_BIT0 is set, drivers should not set dev_state
6989 		 * to NEED_RESET. But if NEED_RESET is set, drivers should
6990 		 * should honor the reset.
6991 		 */
6992 		if (ql4xdontresethba == 1)
6993 			qla4_83xx_set_idc_dontreset(ha);
6994 	}
6995 
6996 	/*
6997 	 * Initialize the Host adapter request/response queues and
6998 	 * firmware
6999 	 * NOTE: interrupts enabled upon successful completion
7000 	 */
7001 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7002 
7003 	/* Dont retry adapter initialization if IRQ allocation failed */
7004 	if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
7005 		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
7006 			   __func__);
7007 		goto skip_retry_init;
7008 	}
7009 
7010 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
7011 	    init_retry_count++ < MAX_INIT_RETRIES) {
7012 
7013 		if (is_qla80XX(ha)) {
7014 			ha->isp_ops->idc_lock(ha);
7015 			dev_state = qla4_8xxx_rd_direct(ha,
7016 							QLA8XXX_CRB_DEV_STATE);
7017 			ha->isp_ops->idc_unlock(ha);
7018 			if (dev_state == QLA8XXX_DEV_FAILED) {
7019 				ql4_printk(KERN_WARNING, ha, "%s: don't retry "
7020 				    "initialize adapter. H/W is in failed state\n",
7021 				    __func__);
7022 				break;
7023 			}
7024 		}
7025 		DEBUG2(printk("scsi: %s: retrying adapter initialization "
7026 			      "(%d)\n", __func__, init_retry_count));
7027 
7028 		if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
7029 			continue;
7030 
7031 		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
7032 	}
7033 
7034 skip_retry_init:
7035 	if (!test_bit(AF_ONLINE, &ha->flags)) {
7036 		ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
7037 
7038 		if ((is_qla8022(ha) && ql4xdontresethba) ||
7039 		    (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
7040 			/* Put the device in failed state. */
7041 			DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
7042 			ha->isp_ops->idc_lock(ha);
7043 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7044 					    QLA8XXX_DEV_FAILED);
7045 			ha->isp_ops->idc_unlock(ha);
7046 		}
7047 		ret = -ENODEV;
7048 		goto remove_host;
7049 	}
7050 
7051 	/* Startup the kernel thread for this host adapter. */
7052 	DEBUG2(printk("scsi: %s: Starting kernel thread for "
7053 		      "qla4xxx_dpc\n", __func__));
7054 	sprintf(buf, "qla4xxx_%lu_dpc", ha->host_no);
7055 	ha->dpc_thread = create_singlethread_workqueue(buf);
7056 	if (!ha->dpc_thread) {
7057 		ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
7058 		ret = -ENODEV;
7059 		goto remove_host;
7060 	}
7061 	INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
7062 
7063 	sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
7064 	ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
7065 	if (!ha->task_wq) {
7066 		ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
7067 		ret = -ENODEV;
7068 		goto remove_host;
7069 	}
7070 
7071 	/*
7072 	 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
7073 	 * (which is called indirectly by qla4xxx_initialize_adapter),
7074 	 * so that irqs will be registered after crbinit but before
7075 	 * mbx_intr_enable.
7076 	 */
7077 	if (is_qla40XX(ha)) {
7078 		ret = qla4xxx_request_irqs(ha);
7079 		if (ret) {
7080 			ql4_printk(KERN_WARNING, ha, "Failed to reserve "
7081 			    "interrupt %d already in use.\n", pdev->irq);
7082 			goto remove_host;
7083 		}
7084 	}
7085 
7086 	pci_save_state(ha->pdev);
7087 	ha->isp_ops->enable_intrs(ha);
7088 
7089 	/* Start timer thread. */
7090 	qla4xxx_start_timer(ha, qla4xxx_timer, 1);
7091 
7092 	set_bit(AF_INIT_DONE, &ha->flags);
7093 
7094 	qla4_8xxx_alloc_sysfs_attr(ha);
7095 
7096 	printk(KERN_INFO
7097 	       " QLogic iSCSI HBA Driver version: %s\n"
7098 	       "  QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
7099 	       qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
7100 	       ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
7101 	       ha->patch_number, ha->build_number);
7102 
7103 	/* Set the driver version */
7104 	if (is_qla80XX(ha))
7105 		qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
7106 
7107 	if (qla4xxx_setup_boot_info(ha))
7108 		ql4_printk(KERN_ERR, ha,
7109 			   "%s: No iSCSI boot target configured\n", __func__);
7110 
7111 	if (qla4xxx_sysfs_ddb_export(ha))
7112 		ql4_printk(KERN_ERR, ha,
7113 			   "%s: Error exporting ddb to sysfs\n", __func__);
7114 
7115 		/* Perform the build ddb list and login to each */
7116 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
7117 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
7118 	qla4xxx_wait_login_resp_boot_tgt(ha);
7119 
7120 	qla4xxx_create_chap_list(ha);
7121 
7122 	qla4xxx_create_ifaces(ha);
7123 	return 0;
7124 
7125 remove_host:
7126 	scsi_remove_host(ha->host);
7127 
7128 probe_failed:
7129 	qla4xxx_free_adapter(ha);
7130 
7131 probe_failed_ioconfig:
7132 	pci_disable_pcie_error_reporting(pdev);
7133 	scsi_host_put(ha->host);
7134 
7135 probe_disable_device:
7136 	pci_disable_device(pdev);
7137 
7138 	return ret;
7139 }
7140 
7141 /**
7142  * qla4xxx_prevent_other_port_reinit - prevent other port from re-initialize
7143  * @ha: pointer to adapter structure
7144  *
7145  * Mark the other ISP-4xxx port to indicate that the driver is being removed,
7146  * so that the other port will not re-initialize while in the process of
7147  * removing the ha due to driver unload or hba hotplug.
7148  **/
qla4xxx_prevent_other_port_reinit(struct scsi_qla_host * ha)7149 static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
7150 {
7151 	struct scsi_qla_host *other_ha = NULL;
7152 	struct pci_dev *other_pdev = NULL;
7153 	int fn = ISP4XXX_PCI_FN_2;
7154 
7155 	/*iscsi function numbers for ISP4xxx is 1 and 3*/
7156 	if (PCI_FUNC(ha->pdev->devfn) & BIT_1)
7157 		fn = ISP4XXX_PCI_FN_1;
7158 
7159 	other_pdev =
7160 		pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7161 		ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7162 		fn));
7163 
7164 	/* Get other_ha if other_pdev is valid and state is enable*/
7165 	if (other_pdev) {
7166 		if (atomic_read(&other_pdev->enable_cnt)) {
7167 			other_ha = pci_get_drvdata(other_pdev);
7168 			if (other_ha) {
7169 				set_bit(AF_HA_REMOVAL, &other_ha->flags);
7170 				DEBUG2(ql4_printk(KERN_INFO, ha, "%s: "
7171 				    "Prevent %s reinit\n", __func__,
7172 				    dev_name(&other_ha->pdev->dev)));
7173 			}
7174 		}
7175 		pci_dev_put(other_pdev);
7176 	}
7177 }
7178 
qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host * ha)7179 static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
7180 {
7181 	struct ddb_entry *ddb_entry;
7182 	int options;
7183 	int idx;
7184 
7185 	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
7186 
7187 		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
7188 		if ((ddb_entry != NULL) &&
7189 		    (ddb_entry->ddb_type == FLASH_DDB)) {
7190 
7191 			options = LOGOUT_OPTION_CLOSE_SESSION;
7192 			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
7193 			    == QLA_ERROR)
7194 				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
7195 					   __func__);
7196 
7197 			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
7198 			/*
7199 			 * we have decremented the reference count of the driver
7200 			 * when we setup the session to have the driver unload
7201 			 * to be seamless without actually destroying the
7202 			 * session
7203 			 **/
7204 			try_module_get(qla4xxx_iscsi_transport.owner);
7205 			iscsi_destroy_endpoint(ddb_entry->conn->ep);
7206 			qla4xxx_free_ddb(ha, ddb_entry);
7207 			iscsi_session_teardown(ddb_entry->sess);
7208 		}
7209 	}
7210 }
7211 /**
7212  * qla4xxx_remove_adapter - callback function to remove adapter.
7213  * @pci_dev: PCI device pointer
7214  **/
qla4xxx_remove_adapter(struct pci_dev * pdev)7215 static void qla4xxx_remove_adapter(struct pci_dev *pdev)
7216 {
7217 	struct scsi_qla_host *ha;
7218 
7219 	/*
7220 	 * If the PCI device is disabled then it means probe_adapter had
7221 	 * failed and resources already cleaned up on probe_adapter exit.
7222 	 */
7223 	if (!pci_is_enabled(pdev))
7224 		return;
7225 
7226 	ha = pci_get_drvdata(pdev);
7227 
7228 	if (is_qla40XX(ha))
7229 		qla4xxx_prevent_other_port_reinit(ha);
7230 
7231 	/* destroy iface from sysfs */
7232 	qla4xxx_destroy_ifaces(ha);
7233 
7234 	if ((!ql4xdisablesysfsboot) && ha->boot_kset)
7235 		iscsi_boot_destroy_kset(ha->boot_kset);
7236 
7237 	qla4xxx_destroy_fw_ddb_session(ha);
7238 	qla4_8xxx_free_sysfs_attr(ha);
7239 
7240 	qla4xxx_sysfs_ddb_remove(ha);
7241 	scsi_remove_host(ha->host);
7242 
7243 	qla4xxx_free_adapter(ha);
7244 
7245 	scsi_host_put(ha->host);
7246 
7247 	pci_disable_pcie_error_reporting(pdev);
7248 	pci_disable_device(pdev);
7249 	pci_set_drvdata(pdev, NULL);
7250 }
7251 
7252 /**
7253  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
7254  * @ha: HA context
7255  *
7256  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
7257  * supported addressing method.
7258  */
qla4xxx_config_dma_addressing(struct scsi_qla_host * ha)7259 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
7260 {
7261 	int retval;
7262 
7263 	/* Update our PCI device dma_mask for full 64 bit mask */
7264 	if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
7265 		if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
7266 			dev_dbg(&ha->pdev->dev,
7267 				  "Failed to set 64 bit PCI consistent mask; "
7268 				   "using 32 bit.\n");
7269 			retval = pci_set_consistent_dma_mask(ha->pdev,
7270 							     DMA_BIT_MASK(32));
7271 		}
7272 	} else
7273 		retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
7274 }
7275 
qla4xxx_slave_alloc(struct scsi_device * sdev)7276 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
7277 {
7278 	struct iscsi_cls_session *cls_sess;
7279 	struct iscsi_session *sess;
7280 	struct ddb_entry *ddb;
7281 	int queue_depth = QL4_DEF_QDEPTH;
7282 
7283 	cls_sess = starget_to_session(sdev->sdev_target);
7284 	sess = cls_sess->dd_data;
7285 	ddb = sess->dd_data;
7286 
7287 	sdev->hostdata = ddb;
7288 	sdev->tagged_supported = 1;
7289 
7290 	if (ql4xmaxqdepth != 0 && ql4xmaxqdepth <= 0xffffU)
7291 		queue_depth = ql4xmaxqdepth;
7292 
7293 	scsi_activate_tcq(sdev, queue_depth);
7294 	return 0;
7295 }
7296 
qla4xxx_slave_configure(struct scsi_device * sdev)7297 static int qla4xxx_slave_configure(struct scsi_device *sdev)
7298 {
7299 	sdev->tagged_supported = 1;
7300 	return 0;
7301 }
7302 
qla4xxx_slave_destroy(struct scsi_device * sdev)7303 static void qla4xxx_slave_destroy(struct scsi_device *sdev)
7304 {
7305 	scsi_deactivate_tcq(sdev, 1);
7306 }
7307 
qla4xxx_change_queue_depth(struct scsi_device * sdev,int qdepth,int reason)7308 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
7309 				      int reason)
7310 {
7311 	if (!ql4xqfulltracking)
7312 		return -EOPNOTSUPP;
7313 
7314 	return iscsi_change_queue_depth(sdev, qdepth, reason);
7315 }
7316 
7317 /**
7318  * qla4xxx_del_from_active_array - returns an active srb
7319  * @ha: Pointer to host adapter structure.
7320  * @index: index into the active_array
7321  *
7322  * This routine removes and returns the srb at the specified index
7323  **/
qla4xxx_del_from_active_array(struct scsi_qla_host * ha,uint32_t index)7324 struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
7325     uint32_t index)
7326 {
7327 	struct srb *srb = NULL;
7328 	struct scsi_cmnd *cmd = NULL;
7329 
7330 	cmd = scsi_host_find_tag(ha->host, index);
7331 	if (!cmd)
7332 		return srb;
7333 
7334 	srb = (struct srb *)CMD_SP(cmd);
7335 	if (!srb)
7336 		return srb;
7337 
7338 	/* update counters */
7339 	if (srb->flags & SRB_DMA_VALID) {
7340 		ha->iocb_cnt -= srb->iocb_cnt;
7341 		if (srb->cmd)
7342 			srb->cmd->host_scribble =
7343 				(unsigned char *)(unsigned long) MAX_SRBS;
7344 	}
7345 	return srb;
7346 }
7347 
7348 /**
7349  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
7350  * @ha: Pointer to host adapter structure.
7351  * @cmd: Scsi Command to wait on.
7352  *
7353  * This routine waits for the command to be returned by the Firmware
7354  * for some max time.
7355  **/
qla4xxx_eh_wait_on_command(struct scsi_qla_host * ha,struct scsi_cmnd * cmd)7356 static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
7357 				      struct scsi_cmnd *cmd)
7358 {
7359 	int done = 0;
7360 	struct srb *rp;
7361 	uint32_t max_wait_time = EH_WAIT_CMD_TOV;
7362 	int ret = SUCCESS;
7363 
7364 	/* Dont wait on command if PCI error is being handled
7365 	 * by PCI AER driver
7366 	 */
7367 	if (unlikely(pci_channel_offline(ha->pdev)) ||
7368 	    (test_bit(AF_EEH_BUSY, &ha->flags))) {
7369 		ql4_printk(KERN_WARNING, ha, "scsi%ld: Return from %s\n",
7370 		    ha->host_no, __func__);
7371 		return ret;
7372 	}
7373 
7374 	do {
7375 		/* Checking to see if its returned to OS */
7376 		rp = (struct srb *) CMD_SP(cmd);
7377 		if (rp == NULL) {
7378 			done++;
7379 			break;
7380 		}
7381 
7382 		msleep(2000);
7383 	} while (max_wait_time--);
7384 
7385 	return done;
7386 }
7387 
7388 /**
7389  * qla4xxx_wait_for_hba_online - waits for HBA to come online
7390  * @ha: Pointer to host adapter structure
7391  **/
qla4xxx_wait_for_hba_online(struct scsi_qla_host * ha)7392 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha)
7393 {
7394 	unsigned long wait_online;
7395 
7396 	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
7397 	while (time_before(jiffies, wait_online)) {
7398 
7399 		if (adapter_up(ha))
7400 			return QLA_SUCCESS;
7401 
7402 		msleep(2000);
7403 	}
7404 
7405 	return QLA_ERROR;
7406 }
7407 
7408 /**
7409  * qla4xxx_eh_wait_for_commands - wait for active cmds to finish.
7410  * @ha: pointer to HBA
7411  * @t: target id
7412  * @l: lun id
7413  *
7414  * This function waits for all outstanding commands to a lun to complete. It
7415  * returns 0 if all pending commands are returned and 1 otherwise.
7416  **/
qla4xxx_eh_wait_for_commands(struct scsi_qla_host * ha,struct scsi_target * stgt,struct scsi_device * sdev)7417 static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha,
7418 					struct scsi_target *stgt,
7419 					struct scsi_device *sdev)
7420 {
7421 	int cnt;
7422 	int status = 0;
7423 	struct scsi_cmnd *cmd;
7424 
7425 	/*
7426 	 * Waiting for all commands for the designated target or dev
7427 	 * in the active array
7428 	 */
7429 	for (cnt = 0; cnt < ha->host->can_queue; cnt++) {
7430 		cmd = scsi_host_find_tag(ha->host, cnt);
7431 		if (cmd && stgt == scsi_target(cmd->device) &&
7432 		    (!sdev || sdev == cmd->device)) {
7433 			if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7434 				status++;
7435 				break;
7436 			}
7437 		}
7438 	}
7439 	return status;
7440 }
7441 
7442 /**
7443  * qla4xxx_eh_abort - callback for abort task.
7444  * @cmd: Pointer to Linux's SCSI command structure
7445  *
7446  * This routine is called by the Linux OS to abort the specified
7447  * command.
7448  **/
qla4xxx_eh_abort(struct scsi_cmnd * cmd)7449 static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
7450 {
7451 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7452 	unsigned int id = cmd->device->id;
7453 	unsigned int lun = cmd->device->lun;
7454 	unsigned long flags;
7455 	struct srb *srb = NULL;
7456 	int ret = SUCCESS;
7457 	int wait = 0;
7458 
7459 	ql4_printk(KERN_INFO, ha,
7460 	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n",
7461 	    ha->host_no, id, lun, cmd);
7462 
7463 	spin_lock_irqsave(&ha->hardware_lock, flags);
7464 	srb = (struct srb *) CMD_SP(cmd);
7465 	if (!srb) {
7466 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
7467 		return SUCCESS;
7468 	}
7469 	kref_get(&srb->srb_ref);
7470 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
7471 
7472 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
7473 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
7474 		    ha->host_no, id, lun));
7475 		ret = FAILED;
7476 	} else {
7477 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
7478 		    ha->host_no, id, lun));
7479 		wait = 1;
7480 	}
7481 
7482 	kref_put(&srb->srb_ref, qla4xxx_srb_compl);
7483 
7484 	/* Wait for command to complete */
7485 	if (wait) {
7486 		if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
7487 			DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
7488 			    ha->host_no, id, lun));
7489 			ret = FAILED;
7490 		}
7491 	}
7492 
7493 	ql4_printk(KERN_INFO, ha,
7494 	    "scsi%ld:%d:%d: Abort command - %s\n",
7495 	    ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
7496 
7497 	return ret;
7498 }
7499 
7500 /**
7501  * qla4xxx_eh_device_reset - callback for target reset.
7502  * @cmd: Pointer to Linux's SCSI command structure
7503  *
7504  * This routine is called by the Linux OS to reset all luns on the
7505  * specified target.
7506  **/
qla4xxx_eh_device_reset(struct scsi_cmnd * cmd)7507 static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
7508 {
7509 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7510 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
7511 	int ret = FAILED, stat;
7512 
7513 	if (!ddb_entry)
7514 		return ret;
7515 
7516 	ret = iscsi_block_scsi_eh(cmd);
7517 	if (ret)
7518 		return ret;
7519 	ret = FAILED;
7520 
7521 	ql4_printk(KERN_INFO, ha,
7522 		   "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
7523 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
7524 
7525 	DEBUG2(printk(KERN_INFO
7526 		      "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
7527 		      "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
7528 		      cmd, jiffies, cmd->request->timeout / HZ,
7529 		      ha->dpc_flags, cmd->result, cmd->allowed));
7530 
7531 	/* FIXME: wait for hba to go online */
7532 	stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
7533 	if (stat != QLA_SUCCESS) {
7534 		ql4_printk(KERN_INFO, ha, "DEVICE RESET FAILED. %d\n", stat);
7535 		goto eh_dev_reset_done;
7536 	}
7537 
7538 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7539 					 cmd->device)) {
7540 		ql4_printk(KERN_INFO, ha,
7541 			   "DEVICE RESET FAILED - waiting for "
7542 			   "commands.\n");
7543 		goto eh_dev_reset_done;
7544 	}
7545 
7546 	/* Send marker. */
7547 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7548 		MM_LUN_RESET) != QLA_SUCCESS)
7549 		goto eh_dev_reset_done;
7550 
7551 	ql4_printk(KERN_INFO, ha,
7552 		   "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
7553 		   ha->host_no, cmd->device->channel, cmd->device->id,
7554 		   cmd->device->lun);
7555 
7556 	ret = SUCCESS;
7557 
7558 eh_dev_reset_done:
7559 
7560 	return ret;
7561 }
7562 
7563 /**
7564  * qla4xxx_eh_target_reset - callback for target reset.
7565  * @cmd: Pointer to Linux's SCSI command structure
7566  *
7567  * This routine is called by the Linux OS to reset the target.
7568  **/
qla4xxx_eh_target_reset(struct scsi_cmnd * cmd)7569 static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
7570 {
7571 	struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
7572 	struct ddb_entry *ddb_entry = cmd->device->hostdata;
7573 	int stat, ret;
7574 
7575 	if (!ddb_entry)
7576 		return FAILED;
7577 
7578 	ret = iscsi_block_scsi_eh(cmd);
7579 	if (ret)
7580 		return ret;
7581 
7582 	starget_printk(KERN_INFO, scsi_target(cmd->device),
7583 		       "WARM TARGET RESET ISSUED.\n");
7584 
7585 	DEBUG2(printk(KERN_INFO
7586 		      "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
7587 		      "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
7588 		      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
7589 		      ha->dpc_flags, cmd->result, cmd->allowed));
7590 
7591 	stat = qla4xxx_reset_target(ha, ddb_entry);
7592 	if (stat != QLA_SUCCESS) {
7593 		starget_printk(KERN_INFO, scsi_target(cmd->device),
7594 			       "WARM TARGET RESET FAILED.\n");
7595 		return FAILED;
7596 	}
7597 
7598 	if (qla4xxx_eh_wait_for_commands(ha, scsi_target(cmd->device),
7599 					 NULL)) {
7600 		starget_printk(KERN_INFO, scsi_target(cmd->device),
7601 			       "WARM TARGET DEVICE RESET FAILED - "
7602 			       "waiting for commands.\n");
7603 		return FAILED;
7604 	}
7605 
7606 	/* Send marker. */
7607 	if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun,
7608 		MM_TGT_WARM_RESET) != QLA_SUCCESS) {
7609 		starget_printk(KERN_INFO, scsi_target(cmd->device),
7610 			       "WARM TARGET DEVICE RESET FAILED - "
7611 			       "marker iocb failed.\n");
7612 		return FAILED;
7613 	}
7614 
7615 	starget_printk(KERN_INFO, scsi_target(cmd->device),
7616 		       "WARM TARGET RESET SUCCEEDED.\n");
7617 	return SUCCESS;
7618 }
7619 
7620 /**
7621  * qla4xxx_is_eh_active - check if error handler is running
7622  * @shost: Pointer to SCSI Host struct
7623  *
7624  * This routine finds that if reset host is called in EH
7625  * scenario or from some application like sg_reset
7626  **/
qla4xxx_is_eh_active(struct Scsi_Host * shost)7627 static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
7628 {
7629 	if (shost->shost_state == SHOST_RECOVERY)
7630 		return 1;
7631 	return 0;
7632 }
7633 
7634 /**
7635  * qla4xxx_eh_host_reset - kernel callback
7636  * @cmd: Pointer to Linux's SCSI command structure
7637  *
7638  * This routine is invoked by the Linux kernel to perform fatal error
7639  * recovery on the specified adapter.
7640  **/
qla4xxx_eh_host_reset(struct scsi_cmnd * cmd)7641 static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
7642 {
7643 	int return_status = FAILED;
7644 	struct scsi_qla_host *ha;
7645 
7646 	ha = to_qla_host(cmd->device->host);
7647 
7648 	if (is_qla8032(ha) && ql4xdontresethba)
7649 		qla4_83xx_set_idc_dontreset(ha);
7650 
7651 	/*
7652 	 * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
7653 	 * protocol drivers, we should not set device_state to
7654 	 * NEED_RESET
7655 	 */
7656 	if (ql4xdontresethba ||
7657 	    (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
7658 		DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
7659 		     ha->host_no, __func__));
7660 
7661 		/* Clear outstanding srb in queues */
7662 		if (qla4xxx_is_eh_active(cmd->device->host))
7663 			qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
7664 
7665 		return FAILED;
7666 	}
7667 
7668 	ql4_printk(KERN_INFO, ha,
7669 		   "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
7670 		   cmd->device->channel, cmd->device->id, cmd->device->lun);
7671 
7672 	if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
7673 		DEBUG2(printk("scsi%ld:%d: %s: Unable to reset host.  Adapter "
7674 			      "DEAD.\n", ha->host_no, cmd->device->channel,
7675 			      __func__));
7676 
7677 		return FAILED;
7678 	}
7679 
7680 	if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7681 		if (is_qla80XX(ha))
7682 			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
7683 		else
7684 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
7685 	}
7686 
7687 	if (qla4xxx_recover_adapter(ha) == QLA_SUCCESS)
7688 		return_status = SUCCESS;
7689 
7690 	ql4_printk(KERN_INFO, ha, "HOST RESET %s.\n",
7691 		   return_status == FAILED ? "FAILED" : "SUCCEEDED");
7692 
7693 	return return_status;
7694 }
7695 
qla4xxx_context_reset(struct scsi_qla_host * ha)7696 static int qla4xxx_context_reset(struct scsi_qla_host *ha)
7697 {
7698 	uint32_t mbox_cmd[MBOX_REG_COUNT];
7699 	uint32_t mbox_sts[MBOX_REG_COUNT];
7700 	struct addr_ctrl_blk_def *acb = NULL;
7701 	uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
7702 	int rval = QLA_SUCCESS;
7703 	dma_addr_t acb_dma;
7704 
7705 	acb = dma_alloc_coherent(&ha->pdev->dev,
7706 				 sizeof(struct addr_ctrl_blk_def),
7707 				 &acb_dma, GFP_KERNEL);
7708 	if (!acb) {
7709 		ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
7710 			   __func__);
7711 		rval = -ENOMEM;
7712 		goto exit_port_reset;
7713 	}
7714 
7715 	memset(acb, 0, acb_len);
7716 
7717 	rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
7718 	if (rval != QLA_SUCCESS) {
7719 		rval = -EIO;
7720 		goto exit_free_acb;
7721 	}
7722 
7723 	rval = qla4xxx_disable_acb(ha);
7724 	if (rval != QLA_SUCCESS) {
7725 		rval = -EIO;
7726 		goto exit_free_acb;
7727 	}
7728 
7729 	wait_for_completion_timeout(&ha->disable_acb_comp,
7730 				    DISABLE_ACB_TOV * HZ);
7731 
7732 	rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
7733 	if (rval != QLA_SUCCESS) {
7734 		rval = -EIO;
7735 		goto exit_free_acb;
7736 	}
7737 
7738 exit_free_acb:
7739 	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
7740 			  acb, acb_dma);
7741 exit_port_reset:
7742 	DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
7743 			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
7744 	return rval;
7745 }
7746 
qla4xxx_host_reset(struct Scsi_Host * shost,int reset_type)7747 static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
7748 {
7749 	struct scsi_qla_host *ha = to_qla_host(shost);
7750 	int rval = QLA_SUCCESS;
7751 	uint32_t idc_ctrl;
7752 
7753 	if (ql4xdontresethba) {
7754 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
7755 				  __func__));
7756 		rval = -EPERM;
7757 		goto exit_host_reset;
7758 	}
7759 
7760 	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
7761 		goto recover_adapter;
7762 
7763 	switch (reset_type) {
7764 	case SCSI_ADAPTER_RESET:
7765 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
7766 		break;
7767 	case SCSI_FIRMWARE_RESET:
7768 		if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7769 			if (is_qla80XX(ha))
7770 				/* set firmware context reset */
7771 				set_bit(DPC_RESET_HA_FW_CONTEXT,
7772 					&ha->dpc_flags);
7773 			else {
7774 				rval = qla4xxx_context_reset(ha);
7775 				goto exit_host_reset;
7776 			}
7777 		}
7778 		break;
7779 	}
7780 
7781 recover_adapter:
7782 	/* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if
7783 	 * reset is issued by application */
7784 	if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
7785 		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
7786 		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
7787 				 (idc_ctrl | GRACEFUL_RESET_BIT1));
7788 	}
7789 
7790 	rval = qla4xxx_recover_adapter(ha);
7791 	if (rval != QLA_SUCCESS) {
7792 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
7793 				  __func__));
7794 		rval = -EIO;
7795 	}
7796 
7797 exit_host_reset:
7798 	return rval;
7799 }
7800 
7801 /* PCI AER driver recovers from all correctable errors w/o
7802  * driver intervention. For uncorrectable errors PCI AER
7803  * driver calls the following device driver's callbacks
7804  *
7805  * - Fatal Errors - link_reset
7806  * - Non-Fatal Errors - driver's pci_error_detected() which
7807  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
7808  *
7809  * PCI AER driver calls
7810  * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
7811  *               returns RECOVERED or NEED_RESET if fw_hung
7812  * NEED_RESET - driver's slot_reset()
7813  * DISCONNECT - device is dead & cannot recover
7814  * RECOVERED - driver's pci_resume()
7815  */
7816 static pci_ers_result_t
qla4xxx_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)7817 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7818 {
7819 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
7820 
7821 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: error detected:state %x\n",
7822 	    ha->host_no, __func__, state);
7823 
7824 	if (!is_aer_supported(ha))
7825 		return PCI_ERS_RESULT_NONE;
7826 
7827 	switch (state) {
7828 	case pci_channel_io_normal:
7829 		clear_bit(AF_EEH_BUSY, &ha->flags);
7830 		return PCI_ERS_RESULT_CAN_RECOVER;
7831 	case pci_channel_io_frozen:
7832 		set_bit(AF_EEH_BUSY, &ha->flags);
7833 		qla4xxx_mailbox_premature_completion(ha);
7834 		qla4xxx_free_irqs(ha);
7835 		pci_disable_device(pdev);
7836 		/* Return back all IOs */
7837 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
7838 		return PCI_ERS_RESULT_NEED_RESET;
7839 	case pci_channel_io_perm_failure:
7840 		set_bit(AF_EEH_BUSY, &ha->flags);
7841 		set_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags);
7842 		qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
7843 		return PCI_ERS_RESULT_DISCONNECT;
7844 	}
7845 	return PCI_ERS_RESULT_NEED_RESET;
7846 }
7847 
7848 /**
7849  * qla4xxx_pci_mmio_enabled() gets called if
7850  * qla4xxx_pci_error_detected() returns PCI_ERS_RESULT_CAN_RECOVER
7851  * and read/write to the device still works.
7852  **/
7853 static pci_ers_result_t
qla4xxx_pci_mmio_enabled(struct pci_dev * pdev)7854 qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
7855 {
7856 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
7857 
7858 	if (!is_aer_supported(ha))
7859 		return PCI_ERS_RESULT_NONE;
7860 
7861 	return PCI_ERS_RESULT_RECOVERED;
7862 }
7863 
qla4_8xxx_error_recovery(struct scsi_qla_host * ha)7864 static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
7865 {
7866 	uint32_t rval = QLA_ERROR;
7867 	int fn;
7868 	struct pci_dev *other_pdev = NULL;
7869 
7870 	ql4_printk(KERN_WARNING, ha, "scsi%ld: In %s\n", ha->host_no, __func__);
7871 
7872 	set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
7873 
7874 	if (test_bit(AF_ONLINE, &ha->flags)) {
7875 		clear_bit(AF_ONLINE, &ha->flags);
7876 		clear_bit(AF_LINK_UP, &ha->flags);
7877 		iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
7878 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
7879 	}
7880 
7881 	fn = PCI_FUNC(ha->pdev->devfn);
7882 	while (fn > 0) {
7883 		fn--;
7884 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at "
7885 		    "func %x\n", ha->host_no, __func__, fn);
7886 		/* Get the pci device given the domain, bus,
7887 		 * slot/function number */
7888 		other_pdev =
7889 		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
7890 		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
7891 		    fn));
7892 
7893 		if (!other_pdev)
7894 			continue;
7895 
7896 		if (atomic_read(&other_pdev->enable_cnt)) {
7897 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI "
7898 			    "func in enabled state%x\n", ha->host_no,
7899 			    __func__, fn);
7900 			pci_dev_put(other_pdev);
7901 			break;
7902 		}
7903 		pci_dev_put(other_pdev);
7904 	}
7905 
7906 	/* The first function on the card, the reset owner will
7907 	 * start & initialize the firmware. The other functions
7908 	 * on the card will reset the firmware context
7909 	 */
7910 	if (!fn) {
7911 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn being reset "
7912 		    "0x%x is the owner\n", ha->host_no, __func__,
7913 		    ha->pdev->devfn);
7914 
7915 		ha->isp_ops->idc_lock(ha);
7916 		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7917 				    QLA8XXX_DEV_COLD);
7918 		ha->isp_ops->idc_unlock(ha);
7919 
7920 		rval = qla4_8xxx_update_idc_reg(ha);
7921 		if (rval == QLA_ERROR) {
7922 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
7923 				   ha->host_no, __func__);
7924 			ha->isp_ops->idc_lock(ha);
7925 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7926 					    QLA8XXX_DEV_FAILED);
7927 			ha->isp_ops->idc_unlock(ha);
7928 			goto exit_error_recovery;
7929 		}
7930 
7931 		clear_bit(AF_FW_RECOVERY, &ha->flags);
7932 		rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
7933 
7934 		if (rval != QLA_SUCCESS) {
7935 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
7936 			    "FAILED\n", ha->host_no, __func__);
7937 			ha->isp_ops->idc_lock(ha);
7938 			qla4_8xxx_clear_drv_active(ha);
7939 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7940 					    QLA8XXX_DEV_FAILED);
7941 			ha->isp_ops->idc_unlock(ha);
7942 		} else {
7943 			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
7944 			    "READY\n", ha->host_no, __func__);
7945 			ha->isp_ops->idc_lock(ha);
7946 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
7947 					    QLA8XXX_DEV_READY);
7948 			/* Clear driver state register */
7949 			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
7950 			qla4_8xxx_set_drv_active(ha);
7951 			ha->isp_ops->idc_unlock(ha);
7952 			ha->isp_ops->enable_intrs(ha);
7953 		}
7954 	} else {
7955 		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
7956 		    "the reset owner\n", ha->host_no, __func__,
7957 		    ha->pdev->devfn);
7958 		if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
7959 		     QLA8XXX_DEV_READY)) {
7960 			clear_bit(AF_FW_RECOVERY, &ha->flags);
7961 			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
7962 			if (rval == QLA_SUCCESS)
7963 				ha->isp_ops->enable_intrs(ha);
7964 
7965 			ha->isp_ops->idc_lock(ha);
7966 			qla4_8xxx_set_drv_active(ha);
7967 			ha->isp_ops->idc_unlock(ha);
7968 		}
7969 	}
7970 exit_error_recovery:
7971 	clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
7972 	return rval;
7973 }
7974 
7975 static pci_ers_result_t
qla4xxx_pci_slot_reset(struct pci_dev * pdev)7976 qla4xxx_pci_slot_reset(struct pci_dev *pdev)
7977 {
7978 	pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
7979 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
7980 	int rc;
7981 
7982 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: slot_reset\n",
7983 	    ha->host_no, __func__);
7984 
7985 	if (!is_aer_supported(ha))
7986 		return PCI_ERS_RESULT_NONE;
7987 
7988 	/* Restore the saved state of PCIe device -
7989 	 * BAR registers, PCI Config space, PCIX, MSI,
7990 	 * IOV states
7991 	 */
7992 	pci_restore_state(pdev);
7993 
7994 	/* pci_restore_state() clears the saved_state flag of the device
7995 	 * save restored state which resets saved_state flag
7996 	 */
7997 	pci_save_state(pdev);
7998 
7999 	/* Initialize device or resume if in suspended state */
8000 	rc = pci_enable_device(pdev);
8001 	if (rc) {
8002 		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Can't re-enable "
8003 		    "device after reset\n", ha->host_no, __func__);
8004 		goto exit_slot_reset;
8005 	}
8006 
8007 	ha->isp_ops->disable_intrs(ha);
8008 
8009 	if (is_qla80XX(ha)) {
8010 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
8011 			ret = PCI_ERS_RESULT_RECOVERED;
8012 			goto exit_slot_reset;
8013 		} else
8014 			goto exit_slot_reset;
8015 	}
8016 
8017 exit_slot_reset:
8018 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Return=%x\n"
8019 	    "device after reset\n", ha->host_no, __func__, ret);
8020 	return ret;
8021 }
8022 
8023 static void
qla4xxx_pci_resume(struct pci_dev * pdev)8024 qla4xxx_pci_resume(struct pci_dev *pdev)
8025 {
8026 	struct scsi_qla_host *ha = pci_get_drvdata(pdev);
8027 	int ret;
8028 
8029 	ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: pci_resume\n",
8030 	    ha->host_no, __func__);
8031 
8032 	ret = qla4xxx_wait_for_hba_online(ha);
8033 	if (ret != QLA_SUCCESS) {
8034 		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: the device failed to "
8035 		    "resume I/O from slot/link_reset\n", ha->host_no,
8036 		     __func__);
8037 	}
8038 
8039 	pci_cleanup_aer_uncorrect_error_status(pdev);
8040 	clear_bit(AF_EEH_BUSY, &ha->flags);
8041 }
8042 
8043 static const struct pci_error_handlers qla4xxx_err_handler = {
8044 	.error_detected = qla4xxx_pci_error_detected,
8045 	.mmio_enabled = qla4xxx_pci_mmio_enabled,
8046 	.slot_reset = qla4xxx_pci_slot_reset,
8047 	.resume = qla4xxx_pci_resume,
8048 };
8049 
8050 static struct pci_device_id qla4xxx_pci_tbl[] = {
8051 	{
8052 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8053 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4010,
8054 		.subvendor	= PCI_ANY_ID,
8055 		.subdevice	= PCI_ANY_ID,
8056 	},
8057 	{
8058 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8059 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4022,
8060 		.subvendor	= PCI_ANY_ID,
8061 		.subdevice	= PCI_ANY_ID,
8062 	},
8063 	{
8064 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8065 		.device		= PCI_DEVICE_ID_QLOGIC_ISP4032,
8066 		.subvendor	= PCI_ANY_ID,
8067 		.subdevice	= PCI_ANY_ID,
8068 	},
8069 	{
8070 		.vendor         = PCI_VENDOR_ID_QLOGIC,
8071 		.device         = PCI_DEVICE_ID_QLOGIC_ISP8022,
8072 		.subvendor      = PCI_ANY_ID,
8073 		.subdevice      = PCI_ANY_ID,
8074 	},
8075 	{
8076 		.vendor		= PCI_VENDOR_ID_QLOGIC,
8077 		.device		= PCI_DEVICE_ID_QLOGIC_ISP8324,
8078 		.subvendor	= PCI_ANY_ID,
8079 		.subdevice	= PCI_ANY_ID,
8080 	},
8081 	{0, 0},
8082 };
8083 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
8084 
8085 static struct pci_driver qla4xxx_pci_driver = {
8086 	.name		= DRIVER_NAME,
8087 	.id_table	= qla4xxx_pci_tbl,
8088 	.probe		= qla4xxx_probe_adapter,
8089 	.remove		= qla4xxx_remove_adapter,
8090 	.err_handler = &qla4xxx_err_handler,
8091 };
8092 
qla4xxx_module_init(void)8093 static int __init qla4xxx_module_init(void)
8094 {
8095 	int ret;
8096 
8097 	/* Allocate cache for SRBs. */
8098 	srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0,
8099 				       SLAB_HWCACHE_ALIGN, NULL);
8100 	if (srb_cachep == NULL) {
8101 		printk(KERN_ERR
8102 		       "%s: Unable to allocate SRB cache..."
8103 		       "Failing load!\n", DRIVER_NAME);
8104 		ret = -ENOMEM;
8105 		goto no_srp_cache;
8106 	}
8107 
8108 	/* Derive version string. */
8109 	strcpy(qla4xxx_version_str, QLA4XXX_DRIVER_VERSION);
8110 	if (ql4xextended_error_logging)
8111 		strcat(qla4xxx_version_str, "-debug");
8112 
8113 	qla4xxx_scsi_transport =
8114 		iscsi_register_transport(&qla4xxx_iscsi_transport);
8115 	if (!qla4xxx_scsi_transport){
8116 		ret = -ENODEV;
8117 		goto release_srb_cache;
8118 	}
8119 
8120 	ret = pci_register_driver(&qla4xxx_pci_driver);
8121 	if (ret)
8122 		goto unregister_transport;
8123 
8124 	printk(KERN_INFO "QLogic iSCSI HBA Driver\n");
8125 	return 0;
8126 
8127 unregister_transport:
8128 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8129 release_srb_cache:
8130 	kmem_cache_destroy(srb_cachep);
8131 no_srp_cache:
8132 	return ret;
8133 }
8134 
qla4xxx_module_exit(void)8135 static void __exit qla4xxx_module_exit(void)
8136 {
8137 	pci_unregister_driver(&qla4xxx_pci_driver);
8138 	iscsi_unregister_transport(&qla4xxx_iscsi_transport);
8139 	kmem_cache_destroy(srb_cachep);
8140 }
8141 
8142 module_init(qla4xxx_module_init);
8143 module_exit(qla4xxx_module_exit);
8144 
8145 MODULE_AUTHOR("QLogic Corporation");
8146 MODULE_DESCRIPTION("QLogic iSCSI HBA Driver");
8147 MODULE_LICENSE("GPL");
8148 MODULE_VERSION(QLA4XXX_DRIVER_VERSION);
8149