• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/raid_class.h>
56 #include <linux/blk-mq-pci.h>
57 #include <linux/unaligned.h>
58 
59 #include "mpt3sas_base.h"
60 
61 #define RAID_CHANNEL 1
62 
63 #define PCIE_CHANNEL 2
64 
65 /* forward proto's */
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 	struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
69 
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 	struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 	u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 	struct _pcie_device *pcie_device);
77 static void
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
81 
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
86 
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
92 
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
104 
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
108 
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 	" bits for enabling additional logging info (default=0)");
113 
114 
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
118 
119 
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123 
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129 
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 		  1 - enumerates only SAS 2.0 generation HBAs\n \
135 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136 
137 /* diag_buffer_enable is bitwise
138  * bit 0 set = TRACE
139  * bit 1 set = SNAPSHOT
140  * bit 2 set = EXTENDED
141  *
142  * Either bit can be set, or both
143  */
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151 
152 
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 	"Enable sdev max qd as can_queue, def=disabled(0)");
162 
163 static int multipath_on_hba = -1;
164 module_param(multipath_on_hba, int, 0);
165 MODULE_PARM_DESC(multipath_on_hba,
166 	"Multipath support to add same target device\n\t\t"
167 	"as many times as it is visible to HBA from various paths\n\t\t"
168 	"(by default:\n\t\t"
169 	"\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 	"\t SAS 3.5 HBA - This will be enabled)");
171 
172 static int host_tagset_enable = 1;
173 module_param(host_tagset_enable, int, 0444);
174 MODULE_PARM_DESC(host_tagset_enable,
175 	"Shared host tagset enable/disable Default: enable(1)");
176 
177 /* raid transport support */
178 static struct raid_template *mpt3sas_raid_template;
179 static struct raid_template *mpt2sas_raid_template;
180 
181 
182 /**
183  * struct sense_info - common structure for obtaining sense keys
184  * @skey: sense key
185  * @asc: additional sense code
186  * @ascq: additional sense code qualifier
187  */
188 struct sense_info {
189 	u8 skey;
190 	u8 asc;
191 	u8 ascq;
192 };
193 
194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
199 
200 /*
201  * SAS Log info code for a NCQ collateral abort after an NCQ error:
202  * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
203  * See: drivers/message/fusion/lsi/mpi_log_sas.h
204  */
205 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR	0x31080000
206 
207 /**
208  * struct fw_event_work - firmware event struct
209  * @list: link list framework
210  * @work: work object (ioc->fault_reset_work_q)
211  * @ioc: per adapter object
212  * @device_handle: device handle
213  * @VF_ID: virtual function id
214  * @VP_ID: virtual port id
215  * @ignore: flag meaning this event has been marked to ignore
216  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
217  * @refcount: kref for this event
218  * @event_data: reply event data payload follows
219  *
220  * This object stored on ioc->fw_event_list.
221  */
222 struct fw_event_work {
223 	struct list_head	list;
224 	struct work_struct	work;
225 
226 	struct MPT3SAS_ADAPTER *ioc;
227 	u16			device_handle;
228 	u8			VF_ID;
229 	u8			VP_ID;
230 	u8			ignore;
231 	u16			event;
232 	struct kref		refcount;
233 	char			event_data[] __aligned(4);
234 };
235 
fw_event_work_free(struct kref * r)236 static void fw_event_work_free(struct kref *r)
237 {
238 	kfree(container_of(r, struct fw_event_work, refcount));
239 }
240 
fw_event_work_get(struct fw_event_work * fw_work)241 static void fw_event_work_get(struct fw_event_work *fw_work)
242 {
243 	kref_get(&fw_work->refcount);
244 }
245 
fw_event_work_put(struct fw_event_work * fw_work)246 static void fw_event_work_put(struct fw_event_work *fw_work)
247 {
248 	kref_put(&fw_work->refcount, fw_event_work_free);
249 }
250 
alloc_fw_event_work(int len)251 static struct fw_event_work *alloc_fw_event_work(int len)
252 {
253 	struct fw_event_work *fw_event;
254 
255 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
256 	if (!fw_event)
257 		return NULL;
258 
259 	kref_init(&fw_event->refcount);
260 	return fw_event;
261 }
262 
263 /**
264  * struct _scsi_io_transfer - scsi io transfer
265  * @handle: sas device handle (assigned by firmware)
266  * @is_raid: flag set for hidden raid components
267  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
268  * @data_length: data transfer length
269  * @data_dma: dma pointer to data
270  * @sense: sense data
271  * @lun: lun number
272  * @cdb_length: cdb length
273  * @cdb: cdb contents
274  * @timeout: timeout for this command
275  * @VF_ID: virtual function id
276  * @VP_ID: virtual port id
277  * @valid_reply: flag set for reply message
278  * @sense_length: sense length
279  * @ioc_status: ioc status
280  * @scsi_state: scsi state
281  * @scsi_status: scsi staus
282  * @log_info: log information
283  * @transfer_length: data length transfer when there is a reply message
284  *
285  * Used for sending internal scsi commands to devices within this module.
286  * Refer to _scsi_send_scsi_io().
287  */
288 struct _scsi_io_transfer {
289 	u16	handle;
290 	u8	is_raid;
291 	enum dma_data_direction dir;
292 	u32	data_length;
293 	dma_addr_t data_dma;
294 	u8	sense[SCSI_SENSE_BUFFERSIZE];
295 	u32	lun;
296 	u8	cdb_length;
297 	u8	cdb[32];
298 	u8	timeout;
299 	u8	VF_ID;
300 	u8	VP_ID;
301 	u8	valid_reply;
302   /* the following bits are only valid when 'valid_reply = 1' */
303 	u32	sense_length;
304 	u16	ioc_status;
305 	u8	scsi_state;
306 	u8	scsi_status;
307 	u32	log_info;
308 	u32	transfer_length;
309 };
310 
311 /**
312  * _scsih_set_debug_level - global setting of ioc->logging_level.
313  * @val: value of the parameter to be set
314  * @kp: pointer to kernel_param structure
315  *
316  * Note: The logging levels are defined in mpt3sas_debug.h.
317  */
318 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)319 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
320 {
321 	int ret = param_set_int(val, kp);
322 	struct MPT3SAS_ADAPTER *ioc;
323 
324 	if (ret)
325 		return ret;
326 
327 	pr_info("setting logging_level(0x%08x)\n", logging_level);
328 	spin_lock(&gioc_lock);
329 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
330 		ioc->logging_level = logging_level;
331 	spin_unlock(&gioc_lock);
332 	return 0;
333 }
334 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
335 	&logging_level, 0644);
336 
337 /**
338  * _scsih_srch_boot_sas_address - search based on sas_address
339  * @sas_address: sas address
340  * @boot_device: boot device object from bios page 2
341  *
342  * Return: 1 when there's a match, 0 means no match.
343  */
344 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)345 _scsih_srch_boot_sas_address(u64 sas_address,
346 	Mpi2BootDeviceSasWwid_t *boot_device)
347 {
348 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
349 }
350 
351 /**
352  * _scsih_srch_boot_device_name - search based on device name
353  * @device_name: device name specified in INDENTIFY fram
354  * @boot_device: boot device object from bios page 2
355  *
356  * Return: 1 when there's a match, 0 means no match.
357  */
358 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)359 _scsih_srch_boot_device_name(u64 device_name,
360 	Mpi2BootDeviceDeviceName_t *boot_device)
361 {
362 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
363 }
364 
365 /**
366  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
367  * @enclosure_logical_id: enclosure logical id
368  * @slot_number: slot number
369  * @boot_device: boot device object from bios page 2
370  *
371  * Return: 1 when there's a match, 0 means no match.
372  */
373 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)374 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
375 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
376 {
377 	return (enclosure_logical_id == le64_to_cpu(boot_device->
378 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
379 	    SlotNumber)) ? 1 : 0;
380 }
381 
382 /**
383  * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
384  *			  port number from port list
385  * @ioc: per adapter object
386  * @port_id: port number
387  * @bypass_dirty_port_flag: when set look the matching hba port entry even
388  *			if hba port entry is marked as dirty.
389  *
390  * Search for hba port entry corresponding to provided port number,
391  * if available return port object otherwise return NULL.
392  */
393 struct hba_port *
mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 bypass_dirty_port_flag)394 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
395 	u8 port_id, u8 bypass_dirty_port_flag)
396 {
397 	struct hba_port *port, *port_next;
398 
399 	/*
400 	 * When multipath_on_hba is disabled then
401 	 * search the hba_port entry using default
402 	 * port id i.e. 255
403 	 */
404 	if (!ioc->multipath_on_hba)
405 		port_id = MULTIPATH_DISABLED_PORT_ID;
406 
407 	list_for_each_entry_safe(port, port_next,
408 	    &ioc->port_table_list, list) {
409 		if (port->port_id != port_id)
410 			continue;
411 		if (bypass_dirty_port_flag)
412 			return port;
413 		if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
414 			continue;
415 		return port;
416 	}
417 
418 	/*
419 	 * Allocate hba_port object for default port id (i.e. 255)
420 	 * when multipath_on_hba is disabled for the HBA.
421 	 * And add this object to port_table_list.
422 	 */
423 	if (!ioc->multipath_on_hba) {
424 		port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
425 		if (!port)
426 			return NULL;
427 
428 		port->port_id = port_id;
429 		ioc_info(ioc,
430 		   "hba_port entry: %p, port: %d is added to hba_port list\n",
431 		   port, port->port_id);
432 		list_add_tail(&port->list,
433 		    &ioc->port_table_list);
434 		return port;
435 	}
436 	return NULL;
437 }
438 
439 /**
440  * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
441  * @ioc: per adapter object
442  * @port: hba_port object
443  * @phy: phy number
444  *
445  * Return virtual_phy object corresponding to phy number.
446  */
447 struct virtual_phy *
mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port,u32 phy)448 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
449 	struct hba_port *port, u32 phy)
450 {
451 	struct virtual_phy *vphy, *vphy_next;
452 
453 	if (!port->vphys_mask)
454 		return NULL;
455 
456 	list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
457 		if (vphy->phy_mask & (1 << phy))
458 			return vphy;
459 	}
460 	return NULL;
461 }
462 
463 /**
464  * _scsih_is_boot_device - search for matching boot device.
465  * @sas_address: sas address
466  * @device_name: device name specified in INDENTIFY fram
467  * @enclosure_logical_id: enclosure logical id
468  * @slot: slot number
469  * @form: specifies boot device form
470  * @boot_device: boot device object from bios page 2
471  *
472  * Return: 1 when there's a match, 0 means no match.
473  */
474 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)475 _scsih_is_boot_device(u64 sas_address, u64 device_name,
476 	u64 enclosure_logical_id, u16 slot, u8 form,
477 	Mpi2BiosPage2BootDevice_t *boot_device)
478 {
479 	int rc = 0;
480 
481 	switch (form) {
482 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
483 		if (!sas_address)
484 			break;
485 		rc = _scsih_srch_boot_sas_address(
486 		    sas_address, &boot_device->SasWwid);
487 		break;
488 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
489 		if (!enclosure_logical_id)
490 			break;
491 		rc = _scsih_srch_boot_encl_slot(
492 		    enclosure_logical_id,
493 		    slot, &boot_device->EnclosureSlot);
494 		break;
495 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
496 		if (!device_name)
497 			break;
498 		rc = _scsih_srch_boot_device_name(
499 		    device_name, &boot_device->DeviceName);
500 		break;
501 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
502 		break;
503 	}
504 
505 	return rc;
506 }
507 
508 /**
509  * _scsih_get_sas_address - set the sas_address for given device handle
510  * @ioc: ?
511  * @handle: device handle
512  * @sas_address: sas address
513  *
514  * Return: 0 success, non-zero when failure
515  */
516 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)517 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
518 	u64 *sas_address)
519 {
520 	Mpi2SasDevicePage0_t sas_device_pg0;
521 	Mpi2ConfigReply_t mpi_reply;
522 	u32 ioc_status;
523 
524 	*sas_address = 0;
525 
526 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
527 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
528 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
529 			__FILE__, __LINE__, __func__);
530 		return -ENXIO;
531 	}
532 
533 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
534 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
535 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
536 		 * vSES's sas address.
537 		 */
538 		if ((handle <= ioc->sas_hba.num_phys) &&
539 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
540 		   MPI2_SAS_DEVICE_INFO_SEP)))
541 			*sas_address = ioc->sas_hba.sas_address;
542 		else
543 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
544 		return 0;
545 	}
546 
547 	/* we hit this because the given parent handle doesn't exist */
548 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
549 		return -ENXIO;
550 
551 	/* else error case */
552 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
553 		handle, ioc_status, __FILE__, __LINE__, __func__);
554 	return -EIO;
555 }
556 
557 /**
558  * _scsih_determine_boot_device - determine boot device.
559  * @ioc: per adapter object
560  * @device: sas_device or pcie_device object
561  * @channel: SAS or PCIe channel
562  *
563  * Determines whether this device should be first reported device to
564  * to scsi-ml or sas transport, this purpose is for persistent boot device.
565  * There are primary, alternate, and current entries in bios page 2. The order
566  * priority is primary, alternate, then current.  This routine saves
567  * the corresponding device object.
568  * The saved data to be used later in _scsih_probe_boot_devices().
569  */
570 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)571 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
572 	u32 channel)
573 {
574 	struct _sas_device *sas_device;
575 	struct _pcie_device *pcie_device;
576 	struct _raid_device *raid_device;
577 	u64 sas_address;
578 	u64 device_name;
579 	u64 enclosure_logical_id;
580 	u16 slot;
581 
582 	 /* only process this function when driver loads */
583 	if (!ioc->is_driver_loading)
584 		return;
585 
586 	 /* no Bios, return immediately */
587 	if (!ioc->bios_pg3.BiosVersion)
588 		return;
589 
590 	if (channel == RAID_CHANNEL) {
591 		raid_device = device;
592 		sas_address = raid_device->wwid;
593 		device_name = 0;
594 		enclosure_logical_id = 0;
595 		slot = 0;
596 	} else if (channel == PCIE_CHANNEL) {
597 		pcie_device = device;
598 		sas_address = pcie_device->wwid;
599 		device_name = 0;
600 		enclosure_logical_id = 0;
601 		slot = 0;
602 	} else {
603 		sas_device = device;
604 		sas_address = sas_device->sas_address;
605 		device_name = sas_device->device_name;
606 		enclosure_logical_id = sas_device->enclosure_logical_id;
607 		slot = sas_device->slot;
608 	}
609 
610 	if (!ioc->req_boot_device.device) {
611 		if (_scsih_is_boot_device(sas_address, device_name,
612 		    enclosure_logical_id, slot,
613 		    (ioc->bios_pg2.ReqBootDeviceForm &
614 		    MPI2_BIOSPAGE2_FORM_MASK),
615 		    &ioc->bios_pg2.RequestedBootDevice)) {
616 			dinitprintk(ioc,
617 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
618 					     __func__, (u64)sas_address));
619 			ioc->req_boot_device.device = device;
620 			ioc->req_boot_device.channel = channel;
621 		}
622 	}
623 
624 	if (!ioc->req_alt_boot_device.device) {
625 		if (_scsih_is_boot_device(sas_address, device_name,
626 		    enclosure_logical_id, slot,
627 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
628 		    MPI2_BIOSPAGE2_FORM_MASK),
629 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
630 			dinitprintk(ioc,
631 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
632 					     __func__, (u64)sas_address));
633 			ioc->req_alt_boot_device.device = device;
634 			ioc->req_alt_boot_device.channel = channel;
635 		}
636 	}
637 
638 	if (!ioc->current_boot_device.device) {
639 		if (_scsih_is_boot_device(sas_address, device_name,
640 		    enclosure_logical_id, slot,
641 		    (ioc->bios_pg2.CurrentBootDeviceForm &
642 		    MPI2_BIOSPAGE2_FORM_MASK),
643 		    &ioc->bios_pg2.CurrentBootDevice)) {
644 			dinitprintk(ioc,
645 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
646 					     __func__, (u64)sas_address));
647 			ioc->current_boot_device.device = device;
648 			ioc->current_boot_device.channel = channel;
649 		}
650 	}
651 }
652 
653 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)654 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
655 		struct MPT3SAS_TARGET *tgt_priv)
656 {
657 	struct _sas_device *ret;
658 
659 	assert_spin_locked(&ioc->sas_device_lock);
660 
661 	ret = tgt_priv->sas_dev;
662 	if (ret)
663 		sas_device_get(ret);
664 
665 	return ret;
666 }
667 
668 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)669 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
670 		struct MPT3SAS_TARGET *tgt_priv)
671 {
672 	struct _sas_device *ret;
673 	unsigned long flags;
674 
675 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
676 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
677 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
678 
679 	return ret;
680 }
681 
682 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)683 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
684 	struct MPT3SAS_TARGET *tgt_priv)
685 {
686 	struct _pcie_device *ret;
687 
688 	assert_spin_locked(&ioc->pcie_device_lock);
689 
690 	ret = tgt_priv->pcie_dev;
691 	if (ret)
692 		pcie_device_get(ret);
693 
694 	return ret;
695 }
696 
697 /**
698  * mpt3sas_get_pdev_from_target - pcie device search
699  * @ioc: per adapter object
700  * @tgt_priv: starget private object
701  *
702  * Context: This function will acquire ioc->pcie_device_lock and will release
703  * before returning the pcie_device object.
704  *
705  * This searches for pcie_device from target, then return pcie_device object.
706  */
707 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)708 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
709 	struct MPT3SAS_TARGET *tgt_priv)
710 {
711 	struct _pcie_device *ret;
712 	unsigned long flags;
713 
714 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
715 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
716 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
717 
718 	return ret;
719 }
720 
721 
722 /**
723  * __mpt3sas_get_sdev_by_rphy - sas device search
724  * @ioc: per adapter object
725  * @rphy: sas_rphy pointer
726  *
727  * Context: This function will acquire ioc->sas_device_lock and will release
728  * before returning the sas_device object.
729  *
730  * This searches for sas_device from rphy object
731  * then return sas_device object.
732  */
733 struct _sas_device *
__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER * ioc,struct sas_rphy * rphy)734 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
735 	struct sas_rphy *rphy)
736 {
737 	struct _sas_device *sas_device;
738 
739 	assert_spin_locked(&ioc->sas_device_lock);
740 
741 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
742 		if (sas_device->rphy != rphy)
743 			continue;
744 		sas_device_get(sas_device);
745 		return sas_device;
746 	}
747 
748 	sas_device = NULL;
749 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
750 		if (sas_device->rphy != rphy)
751 			continue;
752 		sas_device_get(sas_device);
753 		return sas_device;
754 	}
755 
756 	return NULL;
757 }
758 
759 /**
760  * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
761  *				sas address from sas_device_list list
762  * @ioc: per adapter object
763  * @sas_address: device sas address
764  * @port: port number
765  *
766  * Search for _sas_device object corresponding to provided sas address,
767  * if available return _sas_device object address otherwise return NULL.
768  */
769 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)770 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
771 	u64 sas_address, struct hba_port *port)
772 {
773 	struct _sas_device *sas_device;
774 
775 	if (!port)
776 		return NULL;
777 
778 	assert_spin_locked(&ioc->sas_device_lock);
779 
780 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
781 		if (sas_device->sas_address != sas_address)
782 			continue;
783 		if (sas_device->port != port)
784 			continue;
785 		sas_device_get(sas_device);
786 		return sas_device;
787 	}
788 
789 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
790 		if (sas_device->sas_address != sas_address)
791 			continue;
792 		if (sas_device->port != port)
793 			continue;
794 		sas_device_get(sas_device);
795 		return sas_device;
796 	}
797 
798 	return NULL;
799 }
800 
801 /**
802  * mpt3sas_get_sdev_by_addr - sas device search
803  * @ioc: per adapter object
804  * @sas_address: sas address
805  * @port: hba port entry
806  * Context: Calling function should acquire ioc->sas_device_lock
807  *
808  * This searches for sas_device based on sas_address & port number,
809  * then return sas_device object.
810  */
811 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)812 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
813 	u64 sas_address, struct hba_port *port)
814 {
815 	struct _sas_device *sas_device;
816 	unsigned long flags;
817 
818 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
819 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
820 	    sas_address, port);
821 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
822 
823 	return sas_device;
824 }
825 
826 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)827 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
828 {
829 	struct _sas_device *sas_device;
830 
831 	assert_spin_locked(&ioc->sas_device_lock);
832 
833 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
834 		if (sas_device->handle == handle)
835 			goto found_device;
836 
837 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
838 		if (sas_device->handle == handle)
839 			goto found_device;
840 
841 	return NULL;
842 
843 found_device:
844 	sas_device_get(sas_device);
845 	return sas_device;
846 }
847 
848 /**
849  * mpt3sas_get_sdev_by_handle - sas device search
850  * @ioc: per adapter object
851  * @handle: sas device handle (assigned by firmware)
852  * Context: Calling function should acquire ioc->sas_device_lock
853  *
854  * This searches for sas_device based on sas_address, then return sas_device
855  * object.
856  */
857 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)858 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
859 {
860 	struct _sas_device *sas_device;
861 	unsigned long flags;
862 
863 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
864 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
865 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
866 
867 	return sas_device;
868 }
869 
870 /**
871  * _scsih_display_enclosure_chassis_info - display device location info
872  * @ioc: per adapter object
873  * @sas_device: per sas device object
874  * @sdev: scsi device struct
875  * @starget: scsi target struct
876  */
877 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)878 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
879 	struct _sas_device *sas_device, struct scsi_device *sdev,
880 	struct scsi_target *starget)
881 {
882 	if (sdev) {
883 		if (sas_device->enclosure_handle != 0)
884 			sdev_printk(KERN_INFO, sdev,
885 			    "enclosure logical id (0x%016llx), slot(%d) \n",
886 			    (unsigned long long)
887 			    sas_device->enclosure_logical_id,
888 			    sas_device->slot);
889 		if (sas_device->connector_name[0] != '\0')
890 			sdev_printk(KERN_INFO, sdev,
891 			    "enclosure level(0x%04x), connector name( %s)\n",
892 			    sas_device->enclosure_level,
893 			    sas_device->connector_name);
894 		if (sas_device->is_chassis_slot_valid)
895 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
896 			    sas_device->chassis_slot);
897 	} else if (starget) {
898 		if (sas_device->enclosure_handle != 0)
899 			starget_printk(KERN_INFO, starget,
900 			    "enclosure logical id(0x%016llx), slot(%d) \n",
901 			    (unsigned long long)
902 			    sas_device->enclosure_logical_id,
903 			    sas_device->slot);
904 		if (sas_device->connector_name[0] != '\0')
905 			starget_printk(KERN_INFO, starget,
906 			    "enclosure level(0x%04x), connector name( %s)\n",
907 			    sas_device->enclosure_level,
908 			    sas_device->connector_name);
909 		if (sas_device->is_chassis_slot_valid)
910 			starget_printk(KERN_INFO, starget,
911 			    "chassis slot(0x%04x)\n",
912 			    sas_device->chassis_slot);
913 	} else {
914 		if (sas_device->enclosure_handle != 0)
915 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
916 				 (u64)sas_device->enclosure_logical_id,
917 				 sas_device->slot);
918 		if (sas_device->connector_name[0] != '\0')
919 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
920 				 sas_device->enclosure_level,
921 				 sas_device->connector_name);
922 		if (sas_device->is_chassis_slot_valid)
923 			ioc_info(ioc, "chassis slot(0x%04x)\n",
924 				 sas_device->chassis_slot);
925 	}
926 }
927 
928 /**
929  * _scsih_sas_device_remove - remove sas_device from list.
930  * @ioc: per adapter object
931  * @sas_device: the sas_device object
932  * Context: This function will acquire ioc->sas_device_lock.
933  *
934  * If sas_device is on the list, remove it and decrement its reference count.
935  */
936 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)937 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
938 	struct _sas_device *sas_device)
939 {
940 	unsigned long flags;
941 
942 	if (!sas_device)
943 		return;
944 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
945 		 sas_device->handle, (u64)sas_device->sas_address);
946 
947 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
948 
949 	/*
950 	 * The lock serializes access to the list, but we still need to verify
951 	 * that nobody removed the entry while we were waiting on the lock.
952 	 */
953 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
954 	if (!list_empty(&sas_device->list)) {
955 		list_del_init(&sas_device->list);
956 		sas_device_put(sas_device);
957 	}
958 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
959 }
960 
961 /**
962  * _scsih_device_remove_by_handle - removing device object by handle
963  * @ioc: per adapter object
964  * @handle: device handle
965  */
966 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)967 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
968 {
969 	struct _sas_device *sas_device;
970 	unsigned long flags;
971 
972 	if (ioc->shost_recovery)
973 		return;
974 
975 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
976 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
977 	if (sas_device) {
978 		list_del_init(&sas_device->list);
979 		sas_device_put(sas_device);
980 	}
981 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
982 	if (sas_device) {
983 		_scsih_remove_device(ioc, sas_device);
984 		sas_device_put(sas_device);
985 	}
986 }
987 
988 /**
989  * mpt3sas_device_remove_by_sas_address - removing device object by
990  *					sas address & port number
991  * @ioc: per adapter object
992  * @sas_address: device sas_address
993  * @port: hba port entry
994  *
995  * Return nothing.
996  */
997 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)998 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
999 	u64 sas_address, struct hba_port *port)
1000 {
1001 	struct _sas_device *sas_device;
1002 	unsigned long flags;
1003 
1004 	if (ioc->shost_recovery)
1005 		return;
1006 
1007 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1008 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1009 	if (sas_device) {
1010 		list_del_init(&sas_device->list);
1011 		sas_device_put(sas_device);
1012 	}
1013 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1014 	if (sas_device) {
1015 		_scsih_remove_device(ioc, sas_device);
1016 		sas_device_put(sas_device);
1017 	}
1018 }
1019 
1020 /**
1021  * _scsih_sas_device_add - insert sas_device to the list.
1022  * @ioc: per adapter object
1023  * @sas_device: the sas_device object
1024  * Context: This function will acquire ioc->sas_device_lock.
1025  *
1026  * Adding new object to the ioc->sas_device_list.
1027  */
1028 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1029 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1030 	struct _sas_device *sas_device)
1031 {
1032 	unsigned long flags;
1033 
1034 	dewtprintk(ioc,
1035 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1036 			    __func__, sas_device->handle,
1037 			    (u64)sas_device->sas_address));
1038 
1039 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1040 	    NULL, NULL));
1041 
1042 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1043 	sas_device_get(sas_device);
1044 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
1045 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1046 
1047 	if (ioc->hide_drives) {
1048 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1049 		return;
1050 	}
1051 
1052 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1053 	     sas_device->sas_address_parent, sas_device->port)) {
1054 		_scsih_sas_device_remove(ioc, sas_device);
1055 	} else if (!sas_device->starget) {
1056 		/*
1057 		 * When asyn scanning is enabled, its not possible to remove
1058 		 * devices while scanning is turned on due to an oops in
1059 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1060 		 */
1061 		if (!ioc->is_driver_loading) {
1062 			mpt3sas_transport_port_remove(ioc,
1063 			    sas_device->sas_address,
1064 			    sas_device->sas_address_parent,
1065 			    sas_device->port);
1066 			_scsih_sas_device_remove(ioc, sas_device);
1067 		}
1068 	} else
1069 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1070 }
1071 
1072 /**
1073  * _scsih_sas_device_init_add - insert sas_device to the list.
1074  * @ioc: per adapter object
1075  * @sas_device: the sas_device object
1076  * Context: This function will acquire ioc->sas_device_lock.
1077  *
1078  * Adding new object at driver load time to the ioc->sas_device_init_list.
1079  */
1080 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1081 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1082 	struct _sas_device *sas_device)
1083 {
1084 	unsigned long flags;
1085 
1086 	dewtprintk(ioc,
1087 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1088 			    __func__, sas_device->handle,
1089 			    (u64)sas_device->sas_address));
1090 
1091 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1092 	    NULL, NULL));
1093 
1094 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1095 	sas_device_get(sas_device);
1096 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1097 	_scsih_determine_boot_device(ioc, sas_device, 0);
1098 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1099 }
1100 
1101 
1102 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1103 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1104 {
1105 	struct _pcie_device *pcie_device;
1106 
1107 	assert_spin_locked(&ioc->pcie_device_lock);
1108 
1109 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1110 		if (pcie_device->wwid == wwid)
1111 			goto found_device;
1112 
1113 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1114 		if (pcie_device->wwid == wwid)
1115 			goto found_device;
1116 
1117 	return NULL;
1118 
1119 found_device:
1120 	pcie_device_get(pcie_device);
1121 	return pcie_device;
1122 }
1123 
1124 
1125 /**
1126  * mpt3sas_get_pdev_by_wwid - pcie device search
1127  * @ioc: per adapter object
1128  * @wwid: wwid
1129  *
1130  * Context: This function will acquire ioc->pcie_device_lock and will release
1131  * before returning the pcie_device object.
1132  *
1133  * This searches for pcie_device based on wwid, then return pcie_device object.
1134  */
1135 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1136 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1137 {
1138 	struct _pcie_device *pcie_device;
1139 	unsigned long flags;
1140 
1141 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1142 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1143 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1144 
1145 	return pcie_device;
1146 }
1147 
1148 
1149 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1150 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1151 	int channel)
1152 {
1153 	struct _pcie_device *pcie_device;
1154 
1155 	assert_spin_locked(&ioc->pcie_device_lock);
1156 
1157 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1158 		if (pcie_device->id == id && pcie_device->channel == channel)
1159 			goto found_device;
1160 
1161 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1162 		if (pcie_device->id == id && pcie_device->channel == channel)
1163 			goto found_device;
1164 
1165 	return NULL;
1166 
1167 found_device:
1168 	pcie_device_get(pcie_device);
1169 	return pcie_device;
1170 }
1171 
1172 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1173 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1174 {
1175 	struct _pcie_device *pcie_device;
1176 
1177 	assert_spin_locked(&ioc->pcie_device_lock);
1178 
1179 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1180 		if (pcie_device->handle == handle)
1181 			goto found_device;
1182 
1183 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1184 		if (pcie_device->handle == handle)
1185 			goto found_device;
1186 
1187 	return NULL;
1188 
1189 found_device:
1190 	pcie_device_get(pcie_device);
1191 	return pcie_device;
1192 }
1193 
1194 
1195 /**
1196  * mpt3sas_get_pdev_by_handle - pcie device search
1197  * @ioc: per adapter object
1198  * @handle: Firmware device handle
1199  *
1200  * Context: This function will acquire ioc->pcie_device_lock and will release
1201  * before returning the pcie_device object.
1202  *
1203  * This searches for pcie_device based on handle, then return pcie_device
1204  * object.
1205  */
1206 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1207 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1208 {
1209 	struct _pcie_device *pcie_device;
1210 	unsigned long flags;
1211 
1212 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1213 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1214 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1215 
1216 	return pcie_device;
1217 }
1218 
1219 /**
1220  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1221  * @ioc: per adapter object
1222  * Context: This function will acquire ioc->pcie_device_lock
1223  *
1224  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1225  * which has reported maximum among all available NVMe drives.
1226  * Minimum max_shutdown_latency will be six seconds.
1227  */
1228 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1229 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1230 {
1231 	struct _pcie_device *pcie_device;
1232 	unsigned long flags;
1233 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1234 
1235 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1236 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1237 		if (pcie_device->shutdown_latency) {
1238 			if (shutdown_latency < pcie_device->shutdown_latency)
1239 				shutdown_latency =
1240 					pcie_device->shutdown_latency;
1241 		}
1242 	}
1243 	ioc->max_shutdown_latency = shutdown_latency;
1244 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1245 }
1246 
1247 /**
1248  * _scsih_pcie_device_remove - remove pcie_device from list.
1249  * @ioc: per adapter object
1250  * @pcie_device: the pcie_device object
1251  * Context: This function will acquire ioc->pcie_device_lock.
1252  *
1253  * If pcie_device is on the list, remove it and decrement its reference count.
1254  */
1255 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1256 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1257 	struct _pcie_device *pcie_device)
1258 {
1259 	unsigned long flags;
1260 	int was_on_pcie_device_list = 0;
1261 	u8 update_latency = 0;
1262 
1263 	if (!pcie_device)
1264 		return;
1265 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1266 		 pcie_device->handle, (u64)pcie_device->wwid);
1267 	if (pcie_device->enclosure_handle != 0)
1268 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1269 			 (u64)pcie_device->enclosure_logical_id,
1270 			 pcie_device->slot);
1271 	if (pcie_device->connector_name[0] != '\0')
1272 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1273 			 pcie_device->enclosure_level,
1274 			 pcie_device->connector_name);
1275 
1276 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1277 	if (!list_empty(&pcie_device->list)) {
1278 		list_del_init(&pcie_device->list);
1279 		was_on_pcie_device_list = 1;
1280 	}
1281 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1282 		update_latency = 1;
1283 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1284 	if (was_on_pcie_device_list) {
1285 		kfree(pcie_device->serial_number);
1286 		pcie_device_put(pcie_device);
1287 	}
1288 
1289 	/*
1290 	 * This device's RTD3 Entry Latency matches IOC's
1291 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1292 	 * from the available drives as current drive is getting removed.
1293 	 */
1294 	if (update_latency)
1295 		_scsih_set_nvme_max_shutdown_latency(ioc);
1296 }
1297 
1298 
1299 /**
1300  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1301  * @ioc: per adapter object
1302  * @handle: device handle
1303  */
1304 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1305 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1306 {
1307 	struct _pcie_device *pcie_device;
1308 	unsigned long flags;
1309 	int was_on_pcie_device_list = 0;
1310 	u8 update_latency = 0;
1311 
1312 	if (ioc->shost_recovery)
1313 		return;
1314 
1315 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1316 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1317 	if (pcie_device) {
1318 		if (!list_empty(&pcie_device->list)) {
1319 			list_del_init(&pcie_device->list);
1320 			was_on_pcie_device_list = 1;
1321 			pcie_device_put(pcie_device);
1322 		}
1323 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1324 			update_latency = 1;
1325 	}
1326 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1327 	if (was_on_pcie_device_list) {
1328 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1329 		pcie_device_put(pcie_device);
1330 	}
1331 
1332 	/*
1333 	 * This device's RTD3 Entry Latency matches IOC's
1334 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1335 	 * from the available drives as current drive is getting removed.
1336 	 */
1337 	if (update_latency)
1338 		_scsih_set_nvme_max_shutdown_latency(ioc);
1339 }
1340 
1341 /**
1342  * _scsih_pcie_device_add - add pcie_device object
1343  * @ioc: per adapter object
1344  * @pcie_device: pcie_device object
1345  *
1346  * This is added to the pcie_device_list link list.
1347  */
1348 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1349 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1350 	struct _pcie_device *pcie_device)
1351 {
1352 	unsigned long flags;
1353 
1354 	dewtprintk(ioc,
1355 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1356 			    __func__,
1357 			    pcie_device->handle, (u64)pcie_device->wwid));
1358 	if (pcie_device->enclosure_handle != 0)
1359 		dewtprintk(ioc,
1360 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1361 				    __func__,
1362 				    (u64)pcie_device->enclosure_logical_id,
1363 				    pcie_device->slot));
1364 	if (pcie_device->connector_name[0] != '\0')
1365 		dewtprintk(ioc,
1366 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1367 				    __func__, pcie_device->enclosure_level,
1368 				    pcie_device->connector_name));
1369 
1370 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1371 	pcie_device_get(pcie_device);
1372 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1373 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1374 
1375 	if (pcie_device->access_status ==
1376 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1377 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1378 		return;
1379 	}
1380 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1381 		_scsih_pcie_device_remove(ioc, pcie_device);
1382 	} else if (!pcie_device->starget) {
1383 		if (!ioc->is_driver_loading) {
1384 /*TODO-- Need to find out whether this condition will occur or not*/
1385 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1386 		}
1387 	} else
1388 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1389 }
1390 
1391 /*
1392  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1393  * @ioc: per adapter object
1394  * @pcie_device: the pcie_device object
1395  * Context: This function will acquire ioc->pcie_device_lock.
1396  *
1397  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1398  */
1399 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1400 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1401 				struct _pcie_device *pcie_device)
1402 {
1403 	unsigned long flags;
1404 
1405 	dewtprintk(ioc,
1406 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1407 			    __func__,
1408 			    pcie_device->handle, (u64)pcie_device->wwid));
1409 	if (pcie_device->enclosure_handle != 0)
1410 		dewtprintk(ioc,
1411 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1412 				    __func__,
1413 				    (u64)pcie_device->enclosure_logical_id,
1414 				    pcie_device->slot));
1415 	if (pcie_device->connector_name[0] != '\0')
1416 		dewtprintk(ioc,
1417 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1418 				    __func__, pcie_device->enclosure_level,
1419 				    pcie_device->connector_name));
1420 
1421 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1422 	pcie_device_get(pcie_device);
1423 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1424 	if (pcie_device->access_status !=
1425 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1426 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1427 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1428 }
1429 /**
1430  * _scsih_raid_device_find_by_id - raid device search
1431  * @ioc: per adapter object
1432  * @id: sas device target id
1433  * @channel: sas device channel
1434  * Context: Calling function should acquire ioc->raid_device_lock
1435  *
1436  * This searches for raid_device based on target id, then return raid_device
1437  * object.
1438  */
1439 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1440 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1441 {
1442 	struct _raid_device *raid_device, *r;
1443 
1444 	r = NULL;
1445 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1446 		if (raid_device->id == id && raid_device->channel == channel) {
1447 			r = raid_device;
1448 			goto out;
1449 		}
1450 	}
1451 
1452  out:
1453 	return r;
1454 }
1455 
1456 /**
1457  * mpt3sas_raid_device_find_by_handle - raid device search
1458  * @ioc: per adapter object
1459  * @handle: sas device handle (assigned by firmware)
1460  * Context: Calling function should acquire ioc->raid_device_lock
1461  *
1462  * This searches for raid_device based on handle, then return raid_device
1463  * object.
1464  */
1465 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1466 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1467 {
1468 	struct _raid_device *raid_device, *r;
1469 
1470 	r = NULL;
1471 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1472 		if (raid_device->handle != handle)
1473 			continue;
1474 		r = raid_device;
1475 		goto out;
1476 	}
1477 
1478  out:
1479 	return r;
1480 }
1481 
1482 /**
1483  * _scsih_raid_device_find_by_wwid - raid device search
1484  * @ioc: per adapter object
1485  * @wwid: ?
1486  * Context: Calling function should acquire ioc->raid_device_lock
1487  *
1488  * This searches for raid_device based on wwid, then return raid_device
1489  * object.
1490  */
1491 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1492 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1493 {
1494 	struct _raid_device *raid_device, *r;
1495 
1496 	r = NULL;
1497 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1498 		if (raid_device->wwid != wwid)
1499 			continue;
1500 		r = raid_device;
1501 		goto out;
1502 	}
1503 
1504  out:
1505 	return r;
1506 }
1507 
1508 /**
1509  * _scsih_raid_device_add - add raid_device object
1510  * @ioc: per adapter object
1511  * @raid_device: raid_device object
1512  *
1513  * This is added to the raid_device_list link list.
1514  */
1515 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1516 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1517 	struct _raid_device *raid_device)
1518 {
1519 	unsigned long flags;
1520 
1521 	dewtprintk(ioc,
1522 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1523 			    __func__,
1524 			    raid_device->handle, (u64)raid_device->wwid));
1525 
1526 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1527 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1528 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1529 }
1530 
1531 /**
1532  * _scsih_raid_device_remove - delete raid_device object
1533  * @ioc: per adapter object
1534  * @raid_device: raid_device object
1535  *
1536  */
1537 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1538 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1539 	struct _raid_device *raid_device)
1540 {
1541 	unsigned long flags;
1542 
1543 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1544 	list_del(&raid_device->list);
1545 	kfree(raid_device);
1546 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1547 }
1548 
1549 /**
1550  * mpt3sas_scsih_expander_find_by_handle - expander device search
1551  * @ioc: per adapter object
1552  * @handle: expander handle (assigned by firmware)
1553  * Context: Calling function should acquire ioc->sas_device_lock
1554  *
1555  * This searches for expander device based on handle, then returns the
1556  * sas_node object.
1557  */
1558 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1559 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1560 {
1561 	struct _sas_node *sas_expander, *r;
1562 
1563 	r = NULL;
1564 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1565 		if (sas_expander->handle != handle)
1566 			continue;
1567 		r = sas_expander;
1568 		goto out;
1569 	}
1570  out:
1571 	return r;
1572 }
1573 
1574 /**
1575  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1576  * @ioc: per adapter object
1577  * @handle: enclosure handle (assigned by firmware)
1578  * Context: Calling function should acquire ioc->sas_device_lock
1579  *
1580  * This searches for enclosure device based on handle, then returns the
1581  * enclosure object.
1582  */
1583 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1584 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1585 {
1586 	struct _enclosure_node *enclosure_dev, *r;
1587 
1588 	r = NULL;
1589 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1590 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1591 			continue;
1592 		r = enclosure_dev;
1593 		goto out;
1594 	}
1595 out:
1596 	return r;
1597 }
1598 /**
1599  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1600  * @ioc: per adapter object
1601  * @sas_address: sas address
1602  * @port: hba port entry
1603  * Context: Calling function should acquire ioc->sas_node_lock.
1604  *
1605  * This searches for expander device based on sas_address & port number,
1606  * then returns the sas_node object.
1607  */
1608 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1609 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1610 	u64 sas_address, struct hba_port *port)
1611 {
1612 	struct _sas_node *sas_expander, *r = NULL;
1613 
1614 	if (!port)
1615 		return r;
1616 
1617 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1618 		if (sas_expander->sas_address != sas_address)
1619 			continue;
1620 		if (sas_expander->port != port)
1621 			continue;
1622 		r = sas_expander;
1623 		goto out;
1624 	}
1625  out:
1626 	return r;
1627 }
1628 
1629 /**
1630  * _scsih_expander_node_add - insert expander device to the list.
1631  * @ioc: per adapter object
1632  * @sas_expander: the sas_device object
1633  * Context: This function will acquire ioc->sas_node_lock.
1634  *
1635  * Adding new object to the ioc->sas_expander_list.
1636  */
1637 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1638 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1639 	struct _sas_node *sas_expander)
1640 {
1641 	unsigned long flags;
1642 
1643 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1644 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1645 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1646 }
1647 
1648 /**
1649  * _scsih_is_end_device - determines if device is an end device
1650  * @device_info: bitfield providing information about the device.
1651  * Context: none
1652  *
1653  * Return: 1 if end device.
1654  */
1655 static int
_scsih_is_end_device(u32 device_info)1656 _scsih_is_end_device(u32 device_info)
1657 {
1658 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1659 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1660 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1661 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1662 		return 1;
1663 	else
1664 		return 0;
1665 }
1666 
1667 /**
1668  * _scsih_is_nvme_pciescsi_device - determines if
1669  *			device is an pcie nvme/scsi device
1670  * @device_info: bitfield providing information about the device.
1671  * Context: none
1672  *
1673  * Returns 1 if device is pcie device type nvme/scsi.
1674  */
1675 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1676 _scsih_is_nvme_pciescsi_device(u32 device_info)
1677 {
1678 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1679 	    == MPI26_PCIE_DEVINFO_NVME) ||
1680 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1681 	    == MPI26_PCIE_DEVINFO_SCSI))
1682 		return 1;
1683 	else
1684 		return 0;
1685 }
1686 
1687 /**
1688  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1689  * @ioc: per adapter object
1690  * @id: target id
1691  * @channel: channel
1692  * Context: This function will acquire ioc->scsi_lookup_lock.
1693  *
1694  * This will search for a matching channel:id in the scsi_lookup array,
1695  * returning 1 if found.
1696  */
1697 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1698 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1699 	int channel)
1700 {
1701 	int smid;
1702 	struct scsi_cmnd *scmd;
1703 
1704 	for (smid = 1;
1705 	     smid <= ioc->shost->can_queue; smid++) {
1706 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1707 		if (!scmd)
1708 			continue;
1709 		if (scmd->device->id == id &&
1710 		    scmd->device->channel == channel)
1711 			return 1;
1712 	}
1713 	return 0;
1714 }
1715 
1716 /**
1717  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1718  * @ioc: per adapter object
1719  * @id: target id
1720  * @lun: lun number
1721  * @channel: channel
1722  * Context: This function will acquire ioc->scsi_lookup_lock.
1723  *
1724  * This will search for a matching channel:id:lun in the scsi_lookup array,
1725  * returning 1 if found.
1726  */
1727 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1728 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1729 	unsigned int lun, int channel)
1730 {
1731 	int smid;
1732 	struct scsi_cmnd *scmd;
1733 
1734 	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1735 
1736 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1737 		if (!scmd)
1738 			continue;
1739 		if (scmd->device->id == id &&
1740 		    scmd->device->channel == channel &&
1741 		    scmd->device->lun == lun)
1742 			return 1;
1743 	}
1744 	return 0;
1745 }
1746 
1747 /**
1748  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1749  * @ioc: per adapter object
1750  * @smid: system request message index
1751  *
1752  * Return: the smid stored scmd pointer.
1753  * Then will dereference the stored scmd pointer.
1754  */
1755 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1756 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1757 {
1758 	struct scsi_cmnd *scmd = NULL;
1759 	struct scsiio_tracker *st;
1760 	Mpi25SCSIIORequest_t *mpi_request;
1761 	u16 tag = smid - 1;
1762 
1763 	if (smid > 0  &&
1764 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1765 		u32 unique_tag =
1766 		    ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1767 
1768 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1769 
1770 		/*
1771 		 * If SCSI IO request is outstanding at driver level then
1772 		 * DevHandle filed must be non-zero. If DevHandle is zero
1773 		 * then it means that this smid is free at driver level,
1774 		 * so return NULL.
1775 		 */
1776 		if (!mpi_request->DevHandle)
1777 			return scmd;
1778 
1779 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1780 		if (scmd) {
1781 			st = scsi_cmd_priv(scmd);
1782 			if (st->cb_idx == 0xFF || st->smid == 0)
1783 				scmd = NULL;
1784 		}
1785 	}
1786 	return scmd;
1787 }
1788 
1789 /**
1790  * scsih_change_queue_depth - setting device queue depth
1791  * @sdev: scsi device struct
1792  * @qdepth: requested queue depth
1793  *
1794  * Return: queue depth.
1795  */
1796 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1797 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1798 {
1799 	struct Scsi_Host *shost = sdev->host;
1800 	int max_depth;
1801 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1802 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1803 	struct MPT3SAS_TARGET *sas_target_priv_data;
1804 	struct _sas_device *sas_device;
1805 	unsigned long flags;
1806 
1807 	max_depth = shost->can_queue;
1808 
1809 	/*
1810 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1811 	 * is disabled.
1812 	 */
1813 	if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1814 		goto not_sata;
1815 
1816 	sas_device_priv_data = sdev->hostdata;
1817 	if (!sas_device_priv_data)
1818 		goto not_sata;
1819 	sas_target_priv_data = sas_device_priv_data->sas_target;
1820 	if (!sas_target_priv_data)
1821 		goto not_sata;
1822 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1823 		goto not_sata;
1824 
1825 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1826 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1827 	if (sas_device) {
1828 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1829 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1830 
1831 		sas_device_put(sas_device);
1832 	}
1833 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1834 
1835  not_sata:
1836 
1837 	if (!sdev->tagged_supported)
1838 		max_depth = 1;
1839 	if (qdepth > max_depth)
1840 		qdepth = max_depth;
1841 	scsi_change_queue_depth(sdev, qdepth);
1842 	sdev_printk(KERN_INFO, sdev,
1843 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1844 	    sdev->queue_depth, sdev->tagged_supported,
1845 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1846 	return sdev->queue_depth;
1847 }
1848 
1849 /**
1850  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1851  * @sdev: scsi device struct
1852  * @qdepth: requested queue depth
1853  *
1854  * Returns nothing.
1855  */
1856 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1857 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1858 {
1859 	struct Scsi_Host *shost = sdev->host;
1860 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1861 
1862 	if (ioc->enable_sdev_max_qd)
1863 		qdepth = shost->can_queue;
1864 
1865 	scsih_change_queue_depth(sdev, qdepth);
1866 }
1867 
1868 /**
1869  * scsih_target_alloc - target add routine
1870  * @starget: scsi target struct
1871  *
1872  * Return: 0 if ok. Any other return is assumed to be an error and
1873  * the device is ignored.
1874  */
1875 static int
scsih_target_alloc(struct scsi_target * starget)1876 scsih_target_alloc(struct scsi_target *starget)
1877 {
1878 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1879 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1880 	struct MPT3SAS_TARGET *sas_target_priv_data;
1881 	struct _sas_device *sas_device;
1882 	struct _raid_device *raid_device;
1883 	struct _pcie_device *pcie_device;
1884 	unsigned long flags;
1885 	struct sas_rphy *rphy;
1886 
1887 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1888 				       GFP_KERNEL);
1889 	if (!sas_target_priv_data)
1890 		return -ENOMEM;
1891 
1892 	starget->hostdata = sas_target_priv_data;
1893 	sas_target_priv_data->starget = starget;
1894 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1895 
1896 	/* RAID volumes */
1897 	if (starget->channel == RAID_CHANNEL) {
1898 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1899 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1900 		    starget->channel);
1901 		if (raid_device) {
1902 			sas_target_priv_data->handle = raid_device->handle;
1903 			sas_target_priv_data->sas_address = raid_device->wwid;
1904 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1905 			if (ioc->is_warpdrive)
1906 				sas_target_priv_data->raid_device = raid_device;
1907 			raid_device->starget = starget;
1908 		}
1909 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1910 		return 0;
1911 	}
1912 
1913 	/* PCIe devices */
1914 	if (starget->channel == PCIE_CHANNEL) {
1915 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1916 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1917 			starget->channel);
1918 		if (pcie_device) {
1919 			sas_target_priv_data->handle = pcie_device->handle;
1920 			sas_target_priv_data->sas_address = pcie_device->wwid;
1921 			sas_target_priv_data->port = NULL;
1922 			sas_target_priv_data->pcie_dev = pcie_device;
1923 			pcie_device->starget = starget;
1924 			pcie_device->id = starget->id;
1925 			pcie_device->channel = starget->channel;
1926 			sas_target_priv_data->flags |=
1927 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1928 			if (pcie_device->fast_path)
1929 				sas_target_priv_data->flags |=
1930 					MPT_TARGET_FASTPATH_IO;
1931 		}
1932 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1933 		return 0;
1934 	}
1935 
1936 	/* sas/sata devices */
1937 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1938 	rphy = dev_to_rphy(starget->dev.parent);
1939 	sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1940 
1941 	if (sas_device) {
1942 		sas_target_priv_data->handle = sas_device->handle;
1943 		sas_target_priv_data->sas_address = sas_device->sas_address;
1944 		sas_target_priv_data->port = sas_device->port;
1945 		sas_target_priv_data->sas_dev = sas_device;
1946 		sas_device->starget = starget;
1947 		sas_device->id = starget->id;
1948 		sas_device->channel = starget->channel;
1949 		if (test_bit(sas_device->handle, ioc->pd_handles))
1950 			sas_target_priv_data->flags |=
1951 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1952 		if (sas_device->fast_path)
1953 			sas_target_priv_data->flags |=
1954 					MPT_TARGET_FASTPATH_IO;
1955 	}
1956 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1957 
1958 	return 0;
1959 }
1960 
1961 /**
1962  * scsih_target_destroy - target destroy routine
1963  * @starget: scsi target struct
1964  */
1965 static void
scsih_target_destroy(struct scsi_target * starget)1966 scsih_target_destroy(struct scsi_target *starget)
1967 {
1968 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1969 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1970 	struct MPT3SAS_TARGET *sas_target_priv_data;
1971 	struct _sas_device *sas_device;
1972 	struct _raid_device *raid_device;
1973 	struct _pcie_device *pcie_device;
1974 	unsigned long flags;
1975 
1976 	sas_target_priv_data = starget->hostdata;
1977 	if (!sas_target_priv_data)
1978 		return;
1979 
1980 	if (starget->channel == RAID_CHANNEL) {
1981 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1982 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1983 		    starget->channel);
1984 		if (raid_device) {
1985 			raid_device->starget = NULL;
1986 			raid_device->sdev = NULL;
1987 		}
1988 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1989 		goto out;
1990 	}
1991 
1992 	if (starget->channel == PCIE_CHANNEL) {
1993 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1994 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1995 							sas_target_priv_data);
1996 		if (pcie_device && (pcie_device->starget == starget) &&
1997 			(pcie_device->id == starget->id) &&
1998 			(pcie_device->channel == starget->channel))
1999 			pcie_device->starget = NULL;
2000 
2001 		if (pcie_device) {
2002 			/*
2003 			 * Corresponding get() is in _scsih_target_alloc()
2004 			 */
2005 			sas_target_priv_data->pcie_dev = NULL;
2006 			pcie_device_put(pcie_device);
2007 			pcie_device_put(pcie_device);
2008 		}
2009 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2010 		goto out;
2011 	}
2012 
2013 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2014 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2015 	if (sas_device && (sas_device->starget == starget) &&
2016 	    (sas_device->id == starget->id) &&
2017 	    (sas_device->channel == starget->channel))
2018 		sas_device->starget = NULL;
2019 
2020 	if (sas_device) {
2021 		/*
2022 		 * Corresponding get() is in _scsih_target_alloc()
2023 		 */
2024 		sas_target_priv_data->sas_dev = NULL;
2025 		sas_device_put(sas_device);
2026 
2027 		sas_device_put(sas_device);
2028 	}
2029 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2030 
2031  out:
2032 	kfree(sas_target_priv_data);
2033 	starget->hostdata = NULL;
2034 }
2035 
2036 /**
2037  * scsih_slave_alloc - device add routine
2038  * @sdev: scsi device struct
2039  *
2040  * Return: 0 if ok. Any other return is assumed to be an error and
2041  * the device is ignored.
2042  */
2043 static int
scsih_slave_alloc(struct scsi_device * sdev)2044 scsih_slave_alloc(struct scsi_device *sdev)
2045 {
2046 	struct Scsi_Host *shost;
2047 	struct MPT3SAS_ADAPTER *ioc;
2048 	struct MPT3SAS_TARGET *sas_target_priv_data;
2049 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2050 	struct scsi_target *starget;
2051 	struct _raid_device *raid_device;
2052 	struct _sas_device *sas_device;
2053 	struct _pcie_device *pcie_device;
2054 	unsigned long flags;
2055 
2056 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2057 				       GFP_KERNEL);
2058 	if (!sas_device_priv_data)
2059 		return -ENOMEM;
2060 
2061 	sas_device_priv_data->lun = sdev->lun;
2062 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2063 
2064 	starget = scsi_target(sdev);
2065 	sas_target_priv_data = starget->hostdata;
2066 	sas_target_priv_data->num_luns++;
2067 	sas_device_priv_data->sas_target = sas_target_priv_data;
2068 	sdev->hostdata = sas_device_priv_data;
2069 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2070 		sdev->no_uld_attach = 1;
2071 
2072 	shost = dev_to_shost(&starget->dev);
2073 	ioc = shost_priv(shost);
2074 	if (starget->channel == RAID_CHANNEL) {
2075 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2076 		raid_device = _scsih_raid_device_find_by_id(ioc,
2077 		    starget->id, starget->channel);
2078 		if (raid_device)
2079 			raid_device->sdev = sdev; /* raid is single lun */
2080 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2081 	}
2082 	if (starget->channel == PCIE_CHANNEL) {
2083 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2084 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2085 				sas_target_priv_data->sas_address);
2086 		if (pcie_device && (pcie_device->starget == NULL)) {
2087 			sdev_printk(KERN_INFO, sdev,
2088 			    "%s : pcie_device->starget set to starget @ %d\n",
2089 			    __func__, __LINE__);
2090 			pcie_device->starget = starget;
2091 		}
2092 
2093 		if (pcie_device)
2094 			pcie_device_put(pcie_device);
2095 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2096 
2097 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2098 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2099 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2100 		    sas_target_priv_data->sas_address,
2101 		    sas_target_priv_data->port);
2102 		if (sas_device && (sas_device->starget == NULL)) {
2103 			sdev_printk(KERN_INFO, sdev,
2104 			"%s : sas_device->starget set to starget @ %d\n",
2105 			     __func__, __LINE__);
2106 			sas_device->starget = starget;
2107 		}
2108 
2109 		if (sas_device)
2110 			sas_device_put(sas_device);
2111 
2112 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2113 	}
2114 
2115 	return 0;
2116 }
2117 
2118 /**
2119  * scsih_slave_destroy - device destroy routine
2120  * @sdev: scsi device struct
2121  */
2122 static void
scsih_slave_destroy(struct scsi_device * sdev)2123 scsih_slave_destroy(struct scsi_device *sdev)
2124 {
2125 	struct MPT3SAS_TARGET *sas_target_priv_data;
2126 	struct scsi_target *starget;
2127 	struct Scsi_Host *shost;
2128 	struct MPT3SAS_ADAPTER *ioc;
2129 	struct _sas_device *sas_device;
2130 	struct _pcie_device *pcie_device;
2131 	unsigned long flags;
2132 
2133 	if (!sdev->hostdata)
2134 		return;
2135 
2136 	starget = scsi_target(sdev);
2137 	sas_target_priv_data = starget->hostdata;
2138 	sas_target_priv_data->num_luns--;
2139 
2140 	shost = dev_to_shost(&starget->dev);
2141 	ioc = shost_priv(shost);
2142 
2143 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2144 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2145 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2146 				sas_target_priv_data);
2147 		if (pcie_device && !sas_target_priv_data->num_luns)
2148 			pcie_device->starget = NULL;
2149 
2150 		if (pcie_device)
2151 			pcie_device_put(pcie_device);
2152 
2153 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2154 
2155 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2156 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2157 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
2158 				sas_target_priv_data);
2159 		if (sas_device && !sas_target_priv_data->num_luns)
2160 			sas_device->starget = NULL;
2161 
2162 		if (sas_device)
2163 			sas_device_put(sas_device);
2164 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2165 	}
2166 
2167 	kfree(sdev->hostdata);
2168 	sdev->hostdata = NULL;
2169 }
2170 
2171 /**
2172  * _scsih_display_sata_capabilities - sata capabilities
2173  * @ioc: per adapter object
2174  * @handle: device handle
2175  * @sdev: scsi device struct
2176  */
2177 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2178 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2179 	u16 handle, struct scsi_device *sdev)
2180 {
2181 	Mpi2ConfigReply_t mpi_reply;
2182 	Mpi2SasDevicePage0_t sas_device_pg0;
2183 	u32 ioc_status;
2184 	u16 flags;
2185 	u32 device_info;
2186 
2187 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2188 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2189 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190 			__FILE__, __LINE__, __func__);
2191 		return;
2192 	}
2193 
2194 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2195 	    MPI2_IOCSTATUS_MASK;
2196 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2197 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2198 			__FILE__, __LINE__, __func__);
2199 		return;
2200 	}
2201 
2202 	flags = le16_to_cpu(sas_device_pg0.Flags);
2203 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2204 
2205 	sdev_printk(KERN_INFO, sdev,
2206 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2207 	    "sw_preserve(%s)\n",
2208 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2209 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2210 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2211 	    "n",
2212 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2213 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2214 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2215 }
2216 
2217 /*
2218  * raid transport support -
2219  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2220  * unloading the driver followed by a load - I believe that the subroutine
2221  * raid_class_release() is not cleaning up properly.
2222  */
2223 
2224 /**
2225  * scsih_is_raid - return boolean indicating device is raid volume
2226  * @dev: the device struct object
2227  */
2228 static int
scsih_is_raid(struct device * dev)2229 scsih_is_raid(struct device *dev)
2230 {
2231 	struct scsi_device *sdev = to_scsi_device(dev);
2232 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2233 
2234 	if (ioc->is_warpdrive)
2235 		return 0;
2236 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2237 }
2238 
2239 static int
scsih_is_nvme(struct device * dev)2240 scsih_is_nvme(struct device *dev)
2241 {
2242 	struct scsi_device *sdev = to_scsi_device(dev);
2243 
2244 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2245 }
2246 
2247 /**
2248  * scsih_get_resync - get raid volume resync percent complete
2249  * @dev: the device struct object
2250  */
2251 static void
scsih_get_resync(struct device * dev)2252 scsih_get_resync(struct device *dev)
2253 {
2254 	struct scsi_device *sdev = to_scsi_device(dev);
2255 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2256 	static struct _raid_device *raid_device;
2257 	unsigned long flags;
2258 	Mpi2RaidVolPage0_t vol_pg0;
2259 	Mpi2ConfigReply_t mpi_reply;
2260 	u32 volume_status_flags;
2261 	u8 percent_complete;
2262 	u16 handle;
2263 
2264 	percent_complete = 0;
2265 	handle = 0;
2266 	if (ioc->is_warpdrive)
2267 		goto out;
2268 
2269 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2270 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2271 	    sdev->channel);
2272 	if (raid_device) {
2273 		handle = raid_device->handle;
2274 		percent_complete = raid_device->percent_complete;
2275 	}
2276 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2277 
2278 	if (!handle)
2279 		goto out;
2280 
2281 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2282 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2283 	     sizeof(Mpi2RaidVolPage0_t))) {
2284 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2285 			__FILE__, __LINE__, __func__);
2286 		percent_complete = 0;
2287 		goto out;
2288 	}
2289 
2290 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2291 	if (!(volume_status_flags &
2292 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2293 		percent_complete = 0;
2294 
2295  out:
2296 
2297 	switch (ioc->hba_mpi_version_belonged) {
2298 	case MPI2_VERSION:
2299 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2300 		break;
2301 	case MPI25_VERSION:
2302 	case MPI26_VERSION:
2303 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2304 		break;
2305 	}
2306 }
2307 
2308 /**
2309  * scsih_get_state - get raid volume level
2310  * @dev: the device struct object
2311  */
2312 static void
scsih_get_state(struct device * dev)2313 scsih_get_state(struct device *dev)
2314 {
2315 	struct scsi_device *sdev = to_scsi_device(dev);
2316 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2317 	static struct _raid_device *raid_device;
2318 	unsigned long flags;
2319 	Mpi2RaidVolPage0_t vol_pg0;
2320 	Mpi2ConfigReply_t mpi_reply;
2321 	u32 volstate;
2322 	enum raid_state state = RAID_STATE_UNKNOWN;
2323 	u16 handle = 0;
2324 
2325 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2326 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2327 	    sdev->channel);
2328 	if (raid_device)
2329 		handle = raid_device->handle;
2330 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2331 
2332 	if (!raid_device)
2333 		goto out;
2334 
2335 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2336 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2337 	     sizeof(Mpi2RaidVolPage0_t))) {
2338 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2339 			__FILE__, __LINE__, __func__);
2340 		goto out;
2341 	}
2342 
2343 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2344 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2345 		state = RAID_STATE_RESYNCING;
2346 		goto out;
2347 	}
2348 
2349 	switch (vol_pg0.VolumeState) {
2350 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2351 	case MPI2_RAID_VOL_STATE_ONLINE:
2352 		state = RAID_STATE_ACTIVE;
2353 		break;
2354 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2355 		state = RAID_STATE_DEGRADED;
2356 		break;
2357 	case MPI2_RAID_VOL_STATE_FAILED:
2358 	case MPI2_RAID_VOL_STATE_MISSING:
2359 		state = RAID_STATE_OFFLINE;
2360 		break;
2361 	}
2362  out:
2363 	switch (ioc->hba_mpi_version_belonged) {
2364 	case MPI2_VERSION:
2365 		raid_set_state(mpt2sas_raid_template, dev, state);
2366 		break;
2367 	case MPI25_VERSION:
2368 	case MPI26_VERSION:
2369 		raid_set_state(mpt3sas_raid_template, dev, state);
2370 		break;
2371 	}
2372 }
2373 
2374 /**
2375  * _scsih_set_level - set raid level
2376  * @ioc: ?
2377  * @sdev: scsi device struct
2378  * @volume_type: volume type
2379  */
2380 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2381 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2382 	struct scsi_device *sdev, u8 volume_type)
2383 {
2384 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2385 
2386 	switch (volume_type) {
2387 	case MPI2_RAID_VOL_TYPE_RAID0:
2388 		level = RAID_LEVEL_0;
2389 		break;
2390 	case MPI2_RAID_VOL_TYPE_RAID10:
2391 		level = RAID_LEVEL_10;
2392 		break;
2393 	case MPI2_RAID_VOL_TYPE_RAID1E:
2394 		level = RAID_LEVEL_1E;
2395 		break;
2396 	case MPI2_RAID_VOL_TYPE_RAID1:
2397 		level = RAID_LEVEL_1;
2398 		break;
2399 	}
2400 
2401 	switch (ioc->hba_mpi_version_belonged) {
2402 	case MPI2_VERSION:
2403 		raid_set_level(mpt2sas_raid_template,
2404 			&sdev->sdev_gendev, level);
2405 		break;
2406 	case MPI25_VERSION:
2407 	case MPI26_VERSION:
2408 		raid_set_level(mpt3sas_raid_template,
2409 			&sdev->sdev_gendev, level);
2410 		break;
2411 	}
2412 }
2413 
2414 
2415 /**
2416  * _scsih_get_volume_capabilities - volume capabilities
2417  * @ioc: per adapter object
2418  * @raid_device: the raid_device object
2419  *
2420  * Return: 0 for success, else 1
2421  */
2422 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2423 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2424 	struct _raid_device *raid_device)
2425 {
2426 	Mpi2RaidVolPage0_t *vol_pg0;
2427 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2428 	Mpi2SasDevicePage0_t sas_device_pg0;
2429 	Mpi2ConfigReply_t mpi_reply;
2430 	u16 sz;
2431 	u8 num_pds;
2432 
2433 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2434 	    &num_pds)) || !num_pds) {
2435 		dfailprintk(ioc,
2436 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2437 				     __FILE__, __LINE__, __func__));
2438 		return 1;
2439 	}
2440 
2441 	raid_device->num_pds = num_pds;
2442 	sz = struct_size(vol_pg0, PhysDisk, num_pds);
2443 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2444 	if (!vol_pg0) {
2445 		dfailprintk(ioc,
2446 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2447 				     __FILE__, __LINE__, __func__));
2448 		return 1;
2449 	}
2450 
2451 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2452 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2453 		dfailprintk(ioc,
2454 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2455 				     __FILE__, __LINE__, __func__));
2456 		kfree(vol_pg0);
2457 		return 1;
2458 	}
2459 
2460 	raid_device->volume_type = vol_pg0->VolumeType;
2461 
2462 	/* figure out what the underlying devices are by
2463 	 * obtaining the device_info bits for the 1st device
2464 	 */
2465 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2466 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2467 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2468 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2469 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2470 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2471 			raid_device->device_info =
2472 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2473 		}
2474 	}
2475 
2476 	kfree(vol_pg0);
2477 	return 0;
2478 }
2479 
2480 /**
2481  * _scsih_enable_tlr - setting TLR flags
2482  * @ioc: per adapter object
2483  * @sdev: scsi device struct
2484  *
2485  * Enabling Transaction Layer Retries for tape devices when
2486  * vpd page 0x90 is present
2487  *
2488  */
2489 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2490 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2491 {
2492 
2493 	/* only for TAPE */
2494 	if (sdev->type != TYPE_TAPE)
2495 		return;
2496 
2497 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2498 		return;
2499 
2500 	sas_enable_tlr(sdev);
2501 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2502 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2503 	return;
2504 
2505 }
2506 
2507 /**
2508  * scsih_device_configure - device configure routine.
2509  * @sdev: scsi device struct
2510  * @lim: queue limits
2511  *
2512  * Return: 0 if ok. Any other return is assumed to be an error and
2513  * the device is ignored.
2514  */
2515 static int
scsih_device_configure(struct scsi_device * sdev,struct queue_limits * lim)2516 scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
2517 {
2518 	struct Scsi_Host *shost = sdev->host;
2519 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2520 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2521 	struct MPT3SAS_TARGET *sas_target_priv_data;
2522 	struct _sas_device *sas_device;
2523 	struct _pcie_device *pcie_device;
2524 	struct _raid_device *raid_device;
2525 	unsigned long flags;
2526 	int qdepth;
2527 	u8 ssp_target = 0;
2528 	char *ds = "";
2529 	char *r_level = "";
2530 	u16 handle, volume_handle = 0;
2531 	u64 volume_wwid = 0;
2532 
2533 	qdepth = 1;
2534 	sas_device_priv_data = sdev->hostdata;
2535 	sas_device_priv_data->configured_lun = 1;
2536 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2537 	sas_target_priv_data = sas_device_priv_data->sas_target;
2538 	handle = sas_target_priv_data->handle;
2539 
2540 	/* raid volume handling */
2541 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2542 
2543 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2544 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2545 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2546 		if (!raid_device) {
2547 			dfailprintk(ioc,
2548 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2549 					     __FILE__, __LINE__, __func__));
2550 			return 1;
2551 		}
2552 
2553 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2554 			dfailprintk(ioc,
2555 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2556 					     __FILE__, __LINE__, __func__));
2557 			return 1;
2558 		}
2559 
2560 		/*
2561 		 * WARPDRIVE: Initialize the required data for Direct IO
2562 		 */
2563 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2564 
2565 		/* RAID Queue Depth Support
2566 		 * IS volume = underlying qdepth of drive type, either
2567 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2568 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2569 		 */
2570 		if (raid_device->device_info &
2571 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2572 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2573 			ds = "SSP";
2574 		} else {
2575 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2576 			if (raid_device->device_info &
2577 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2578 				ds = "SATA";
2579 			else
2580 				ds = "STP";
2581 		}
2582 
2583 		switch (raid_device->volume_type) {
2584 		case MPI2_RAID_VOL_TYPE_RAID0:
2585 			r_level = "RAID0";
2586 			break;
2587 		case MPI2_RAID_VOL_TYPE_RAID1E:
2588 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2589 			if (ioc->manu_pg10.OEMIdentifier &&
2590 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2591 			    MFG10_GF0_R10_DISPLAY) &&
2592 			    !(raid_device->num_pds % 2))
2593 				r_level = "RAID10";
2594 			else
2595 				r_level = "RAID1E";
2596 			break;
2597 		case MPI2_RAID_VOL_TYPE_RAID1:
2598 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2599 			r_level = "RAID1";
2600 			break;
2601 		case MPI2_RAID_VOL_TYPE_RAID10:
2602 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2603 			r_level = "RAID10";
2604 			break;
2605 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2606 		default:
2607 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2608 			r_level = "RAIDX";
2609 			break;
2610 		}
2611 
2612 		if (!ioc->hide_ir_msg)
2613 			sdev_printk(KERN_INFO, sdev,
2614 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2615 			    " pd_count(%d), type(%s)\n",
2616 			    r_level, raid_device->handle,
2617 			    (unsigned long long)raid_device->wwid,
2618 			    raid_device->num_pds, ds);
2619 
2620 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2621 			lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
2622 			sdev_printk(KERN_INFO, sdev,
2623 					"Set queue's max_sector to: %u\n",
2624 						MPT3SAS_RAID_MAX_SECTORS);
2625 		}
2626 
2627 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2628 
2629 		/* raid transport support */
2630 		if (!ioc->is_warpdrive)
2631 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2632 		return 0;
2633 	}
2634 
2635 	/* non-raid handling */
2636 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2637 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2638 		    &volume_handle)) {
2639 			dfailprintk(ioc,
2640 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2641 					     __FILE__, __LINE__, __func__));
2642 			return 1;
2643 		}
2644 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2645 		    volume_handle, &volume_wwid)) {
2646 			dfailprintk(ioc,
2647 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2648 					     __FILE__, __LINE__, __func__));
2649 			return 1;
2650 		}
2651 	}
2652 
2653 	/* PCIe handling */
2654 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2655 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2656 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2657 				sas_device_priv_data->sas_target->sas_address);
2658 		if (!pcie_device) {
2659 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2660 			dfailprintk(ioc,
2661 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2662 					     __FILE__, __LINE__, __func__));
2663 			return 1;
2664 		}
2665 
2666 		qdepth = ioc->max_nvme_qd;
2667 		ds = "NVMe";
2668 		sdev_printk(KERN_INFO, sdev,
2669 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2670 			ds, handle, (unsigned long long)pcie_device->wwid,
2671 			pcie_device->port_num);
2672 		if (pcie_device->enclosure_handle != 0)
2673 			sdev_printk(KERN_INFO, sdev,
2674 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2675 			ds,
2676 			(unsigned long long)pcie_device->enclosure_logical_id,
2677 			pcie_device->slot);
2678 		if (pcie_device->connector_name[0] != '\0')
2679 			sdev_printk(KERN_INFO, sdev,
2680 				"%s: enclosure level(0x%04x),"
2681 				"connector name( %s)\n", ds,
2682 				pcie_device->enclosure_level,
2683 				pcie_device->connector_name);
2684 
2685 		if (pcie_device->nvme_mdts)
2686 			lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
2687 
2688 		pcie_device_put(pcie_device);
2689 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2690 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2691 		lim->virt_boundary_mask = ioc->page_size - 1;
2692 		return 0;
2693 	}
2694 
2695 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2696 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2697 	   sas_device_priv_data->sas_target->sas_address,
2698 	   sas_device_priv_data->sas_target->port);
2699 	if (!sas_device) {
2700 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2701 		dfailprintk(ioc,
2702 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2703 				     __FILE__, __LINE__, __func__));
2704 		return 1;
2705 	}
2706 
2707 	sas_device->volume_handle = volume_handle;
2708 	sas_device->volume_wwid = volume_wwid;
2709 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2710 		qdepth = (sas_device->port_type > 1) ?
2711 			ioc->max_wideport_qd : ioc->max_narrowport_qd;
2712 		ssp_target = 1;
2713 		if (sas_device->device_info &
2714 				MPI2_SAS_DEVICE_INFO_SEP) {
2715 			sdev_printk(KERN_WARNING, sdev,
2716 			"set ignore_delay_remove for handle(0x%04x)\n",
2717 			sas_device_priv_data->sas_target->handle);
2718 			sas_device_priv_data->ignore_delay_remove = 1;
2719 			ds = "SES";
2720 		} else
2721 			ds = "SSP";
2722 	} else {
2723 		qdepth = ioc->max_sata_qd;
2724 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2725 			ds = "STP";
2726 		else if (sas_device->device_info &
2727 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2728 			ds = "SATA";
2729 	}
2730 
2731 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2732 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2733 	    ds, handle, (unsigned long long)sas_device->sas_address,
2734 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2735 
2736 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2737 
2738 	sas_device_put(sas_device);
2739 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2740 
2741 	if (!ssp_target)
2742 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2743 
2744 
2745 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2746 
2747 	if (ssp_target) {
2748 		sas_read_port_mode_page(sdev);
2749 		_scsih_enable_tlr(ioc, sdev);
2750 	}
2751 
2752 	return 0;
2753 }
2754 
2755 /**
2756  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2757  * @sdev: scsi device struct
2758  * @bdev: pointer to block device context
2759  * @capacity: device size (in 512 byte sectors)
2760  * @params: three element array to place output:
2761  *              params[0] number of heads (max 255)
2762  *              params[1] number of sectors (max 63)
2763  *              params[2] number of cylinders
2764  */
2765 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2766 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2767 	sector_t capacity, int params[])
2768 {
2769 	int		heads;
2770 	int		sectors;
2771 	sector_t	cylinders;
2772 	ulong		dummy;
2773 
2774 	heads = 64;
2775 	sectors = 32;
2776 
2777 	dummy = heads * sectors;
2778 	cylinders = capacity;
2779 	sector_div(cylinders, dummy);
2780 
2781 	/*
2782 	 * Handle extended translation size for logical drives
2783 	 * > 1Gb
2784 	 */
2785 	if ((ulong)capacity >= 0x200000) {
2786 		heads = 255;
2787 		sectors = 63;
2788 		dummy = heads * sectors;
2789 		cylinders = capacity;
2790 		sector_div(cylinders, dummy);
2791 	}
2792 
2793 	/* return result */
2794 	params[0] = heads;
2795 	params[1] = sectors;
2796 	params[2] = cylinders;
2797 
2798 	return 0;
2799 }
2800 
2801 /**
2802  * _scsih_response_code - translation of device response code
2803  * @ioc: per adapter object
2804  * @response_code: response code returned by the device
2805  */
2806 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2807 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2808 {
2809 	char *desc;
2810 
2811 	switch (response_code) {
2812 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2813 		desc = "task management request completed";
2814 		break;
2815 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2816 		desc = "invalid frame";
2817 		break;
2818 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2819 		desc = "task management request not supported";
2820 		break;
2821 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2822 		desc = "task management request failed";
2823 		break;
2824 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2825 		desc = "task management request succeeded";
2826 		break;
2827 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2828 		desc = "invalid lun";
2829 		break;
2830 	case 0xA:
2831 		desc = "overlapped tag attempted";
2832 		break;
2833 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2834 		desc = "task queued, however not sent to target";
2835 		break;
2836 	default:
2837 		desc = "unknown";
2838 		break;
2839 	}
2840 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2841 }
2842 
2843 /**
2844  * _scsih_tm_done - tm completion routine
2845  * @ioc: per adapter object
2846  * @smid: system request message index
2847  * @msix_index: MSIX table index supplied by the OS
2848  * @reply: reply message frame(lower 32bit addr)
2849  * Context: none.
2850  *
2851  * The callback handler when using scsih_issue_tm.
2852  *
2853  * Return: 1 meaning mf should be freed from _base_interrupt
2854  *         0 means the mf is freed from this function.
2855  */
2856 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2857 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2858 {
2859 	MPI2DefaultReply_t *mpi_reply;
2860 
2861 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2862 		return 1;
2863 	if (ioc->tm_cmds.smid != smid)
2864 		return 1;
2865 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2866 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2867 	if (mpi_reply) {
2868 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2869 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2870 	}
2871 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2872 	complete(&ioc->tm_cmds.done);
2873 	return 1;
2874 }
2875 
2876 /**
2877  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2878  * @ioc: per adapter object
2879  * @handle: device handle
2880  *
2881  * During taskmangement request, we need to freeze the device queue.
2882  */
2883 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2884 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2885 {
2886 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2887 	struct scsi_device *sdev;
2888 	u8 skip = 0;
2889 
2890 	shost_for_each_device(sdev, ioc->shost) {
2891 		if (skip)
2892 			continue;
2893 		sas_device_priv_data = sdev->hostdata;
2894 		if (!sas_device_priv_data)
2895 			continue;
2896 		if (sas_device_priv_data->sas_target->handle == handle) {
2897 			sas_device_priv_data->sas_target->tm_busy = 1;
2898 			skip = 1;
2899 			ioc->ignore_loginfos = 1;
2900 		}
2901 	}
2902 }
2903 
2904 /**
2905  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2906  * @ioc: per adapter object
2907  * @handle: device handle
2908  *
2909  * During taskmangement request, we need to freeze the device queue.
2910  */
2911 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2912 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2913 {
2914 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2915 	struct scsi_device *sdev;
2916 	u8 skip = 0;
2917 
2918 	shost_for_each_device(sdev, ioc->shost) {
2919 		if (skip)
2920 			continue;
2921 		sas_device_priv_data = sdev->hostdata;
2922 		if (!sas_device_priv_data)
2923 			continue;
2924 		if (sas_device_priv_data->sas_target->handle == handle) {
2925 			sas_device_priv_data->sas_target->tm_busy = 0;
2926 			skip = 1;
2927 			ioc->ignore_loginfos = 0;
2928 		}
2929 	}
2930 }
2931 
2932 /**
2933  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2934  * @ioc: per adapter object
2935  * @channel: the channel assigned by the OS
2936  * @id: the id assigned by the OS
2937  * @lun: lun number
2938  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2939  * @smid_task: smid assigned to the task
2940  *
2941  * Look whether TM has aborted the timed out SCSI command, if
2942  * TM has aborted the IO then return SUCCESS else return FAILED.
2943  */
2944 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2945 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2946 	uint id, uint lun, u8 type, u16 smid_task)
2947 {
2948 
2949 	if (smid_task <= ioc->shost->can_queue) {
2950 		switch (type) {
2951 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2952 			if (!(_scsih_scsi_lookup_find_by_target(ioc,
2953 			    id, channel)))
2954 				return SUCCESS;
2955 			break;
2956 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2957 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2958 			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2959 			    lun, channel)))
2960 				return SUCCESS;
2961 			break;
2962 		default:
2963 			return SUCCESS;
2964 		}
2965 	} else if (smid_task == ioc->scsih_cmds.smid) {
2966 		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2967 		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2968 			return SUCCESS;
2969 	} else if (smid_task == ioc->ctl_cmds.smid) {
2970 		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2971 		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2972 			return SUCCESS;
2973 	}
2974 
2975 	return FAILED;
2976 }
2977 
2978 /**
2979  * scsih_tm_post_processing - post processing of target & LUN reset
2980  * @ioc: per adapter object
2981  * @handle: device handle
2982  * @channel: the channel assigned by the OS
2983  * @id: the id assigned by the OS
2984  * @lun: lun number
2985  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2986  * @smid_task: smid assigned to the task
2987  *
2988  * Post processing of target & LUN reset. Due to interrupt latency
2989  * issue it possible that interrupt for aborted IO might not be
2990  * received yet. So before returning failure status, poll the
2991  * reply descriptor pools for the reply of timed out SCSI command.
2992  * Return FAILED status if reply for timed out is not received
2993  * otherwise return SUCCESS.
2994  */
2995 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2996 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2997 	uint channel, uint id, uint lun, u8 type, u16 smid_task)
2998 {
2999 	int rc;
3000 
3001 	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3002 	if (rc == SUCCESS)
3003 		return rc;
3004 
3005 	ioc_info(ioc,
3006 	    "Poll ReplyDescriptor queues for completion of"
3007 	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3008 	    smid_task, type, handle);
3009 
3010 	/*
3011 	 * Due to interrupt latency issues, driver may receive interrupt for
3012 	 * TM first and then for aborted SCSI IO command. So, poll all the
3013 	 * ReplyDescriptor pools before returning the FAILED status to SML.
3014 	 */
3015 	mpt3sas_base_mask_interrupts(ioc);
3016 	mpt3sas_base_sync_reply_irqs(ioc, 1);
3017 	mpt3sas_base_unmask_interrupts(ioc);
3018 
3019 	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3020 }
3021 
3022 /**
3023  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3024  * @ioc: per adapter struct
3025  * @handle: device handle
3026  * @channel: the channel assigned by the OS
3027  * @id: the id assigned by the OS
3028  * @lun: lun number
3029  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3030  * @smid_task: smid assigned to the task
3031  * @msix_task: MSIX table index supplied by the OS
3032  * @timeout: timeout in seconds
3033  * @tr_method: Target Reset Method
3034  * Context: user
3035  *
3036  * A generic API for sending task management requests to firmware.
3037  *
3038  * The callback index is set inside `ioc->tm_cb_idx`.
3039  * The caller is responsible to check for outstanding commands.
3040  *
3041  * Return: SUCCESS or FAILED.
3042  */
3043 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3044 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3045 	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3046 	u8 timeout, u8 tr_method)
3047 {
3048 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3049 	Mpi2SCSITaskManagementReply_t *mpi_reply;
3050 	Mpi25SCSIIORequest_t *request;
3051 	u16 smid = 0;
3052 	u32 ioc_state;
3053 	int rc;
3054 	u8 issue_reset = 0;
3055 
3056 	lockdep_assert_held(&ioc->tm_cmds.mutex);
3057 
3058 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3059 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3060 		return FAILED;
3061 	}
3062 
3063 	if (ioc->shost_recovery || ioc->remove_host ||
3064 	    ioc->pci_error_recovery) {
3065 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3066 		return FAILED;
3067 	}
3068 
3069 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3070 	if (ioc_state & MPI2_DOORBELL_USED) {
3071 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3072 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3073 		return (!rc) ? SUCCESS : FAILED;
3074 	}
3075 
3076 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3077 		mpt3sas_print_fault_code(ioc, ioc_state &
3078 		    MPI2_DOORBELL_DATA_MASK);
3079 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3080 		return (!rc) ? SUCCESS : FAILED;
3081 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3082 	    MPI2_IOC_STATE_COREDUMP) {
3083 		mpt3sas_print_coredump_info(ioc, ioc_state &
3084 		    MPI2_DOORBELL_DATA_MASK);
3085 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3086 		return (!rc) ? SUCCESS : FAILED;
3087 	}
3088 
3089 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3090 	if (!smid) {
3091 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3092 		return FAILED;
3093 	}
3094 
3095 	dtmprintk(ioc,
3096 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3097 			   handle, type, smid_task, timeout, tr_method));
3098 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
3099 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3100 	ioc->tm_cmds.smid = smid;
3101 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3102 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3103 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3104 	mpi_request->DevHandle = cpu_to_le16(handle);
3105 	mpi_request->TaskType = type;
3106 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3107 	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3108 		mpi_request->MsgFlags = tr_method;
3109 	mpi_request->TaskMID = cpu_to_le16(smid_task);
3110 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3111 	mpt3sas_scsih_set_tm_flag(ioc, handle);
3112 	init_completion(&ioc->tm_cmds.done);
3113 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
3114 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3115 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3116 		mpt3sas_check_cmd_timeout(ioc,
3117 		    ioc->tm_cmds.status, mpi_request,
3118 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3119 		if (issue_reset) {
3120 			rc = mpt3sas_base_hard_reset_handler(ioc,
3121 					FORCE_BIG_HAMMER);
3122 			rc = (!rc) ? SUCCESS : FAILED;
3123 			goto out;
3124 		}
3125 	}
3126 
3127 	/* sync IRQs in case those were busy during flush. */
3128 	mpt3sas_base_sync_reply_irqs(ioc, 0);
3129 
3130 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3131 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3132 		mpi_reply = ioc->tm_cmds.reply;
3133 		dtmprintk(ioc,
3134 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3135 				   le16_to_cpu(mpi_reply->IOCStatus),
3136 				   le32_to_cpu(mpi_reply->IOCLogInfo),
3137 				   le32_to_cpu(mpi_reply->TerminationCount)));
3138 		if (ioc->logging_level & MPT_DEBUG_TM) {
3139 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
3140 			if (mpi_reply->IOCStatus)
3141 				_debug_dump_mf(mpi_request,
3142 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3143 		}
3144 	}
3145 
3146 	switch (type) {
3147 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3148 		rc = SUCCESS;
3149 		/*
3150 		 * If DevHandle filed in smid_task's entry of request pool
3151 		 * doesn't match with device handle on which this task abort
3152 		 * TM is received then it means that TM has successfully
3153 		 * aborted the timed out command. Since smid_task's entry in
3154 		 * request pool will be memset to zero once the timed out
3155 		 * command is returned to the SML. If the command is not
3156 		 * aborted then smid_task’s entry won’t be cleared and it
3157 		 * will have same DevHandle value on which this task abort TM
3158 		 * is received and driver will return the TM status as FAILED.
3159 		 */
3160 		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3161 		if (le16_to_cpu(request->DevHandle) != handle)
3162 			break;
3163 
3164 		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3165 		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3166 		    handle, timeout, tr_method, smid_task, msix_task);
3167 		rc = FAILED;
3168 		break;
3169 
3170 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3171 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3172 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3173 		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3174 		    type, smid_task);
3175 		break;
3176 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3177 		rc = SUCCESS;
3178 		break;
3179 	default:
3180 		rc = FAILED;
3181 		break;
3182 	}
3183 
3184 out:
3185 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3186 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3187 	return rc;
3188 }
3189 
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3190 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3191 		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3192 		u16 msix_task, u8 timeout, u8 tr_method)
3193 {
3194 	int ret;
3195 
3196 	mutex_lock(&ioc->tm_cmds.mutex);
3197 	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3198 			smid_task, msix_task, timeout, tr_method);
3199 	mutex_unlock(&ioc->tm_cmds.mutex);
3200 
3201 	return ret;
3202 }
3203 
3204 /**
3205  * _scsih_tm_display_info - displays info about the device
3206  * @ioc: per adapter struct
3207  * @scmd: pointer to scsi command object
3208  *
3209  * Called by task management callback handlers.
3210  */
3211 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3212 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3213 {
3214 	struct scsi_target *starget = scmd->device->sdev_target;
3215 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3216 	struct _sas_device *sas_device = NULL;
3217 	struct _pcie_device *pcie_device = NULL;
3218 	unsigned long flags;
3219 	char *device_str = NULL;
3220 
3221 	if (!priv_target)
3222 		return;
3223 	if (ioc->hide_ir_msg)
3224 		device_str = "WarpDrive";
3225 	else
3226 		device_str = "volume";
3227 
3228 	scsi_print_command(scmd);
3229 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3230 		starget_printk(KERN_INFO, starget,
3231 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3232 			device_str, priv_target->handle,
3233 		    device_str, (unsigned long long)priv_target->sas_address);
3234 
3235 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3236 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3237 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3238 		if (pcie_device) {
3239 			starget_printk(KERN_INFO, starget,
3240 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3241 				pcie_device->handle,
3242 				(unsigned long long)pcie_device->wwid,
3243 				pcie_device->port_num);
3244 			if (pcie_device->enclosure_handle != 0)
3245 				starget_printk(KERN_INFO, starget,
3246 					"enclosure logical id(0x%016llx), slot(%d)\n",
3247 					(unsigned long long)
3248 					pcie_device->enclosure_logical_id,
3249 					pcie_device->slot);
3250 			if (pcie_device->connector_name[0] != '\0')
3251 				starget_printk(KERN_INFO, starget,
3252 					"enclosure level(0x%04x), connector name( %s)\n",
3253 					pcie_device->enclosure_level,
3254 					pcie_device->connector_name);
3255 			pcie_device_put(pcie_device);
3256 		}
3257 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3258 
3259 	} else {
3260 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3261 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3262 		if (sas_device) {
3263 			if (priv_target->flags &
3264 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3265 				starget_printk(KERN_INFO, starget,
3266 				    "volume handle(0x%04x), "
3267 				    "volume wwid(0x%016llx)\n",
3268 				    sas_device->volume_handle,
3269 				   (unsigned long long)sas_device->volume_wwid);
3270 			}
3271 			starget_printk(KERN_INFO, starget,
3272 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3273 			    sas_device->handle,
3274 			    (unsigned long long)sas_device->sas_address,
3275 			    sas_device->phy);
3276 
3277 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3278 			    NULL, starget);
3279 
3280 			sas_device_put(sas_device);
3281 		}
3282 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3283 	}
3284 }
3285 
3286 /**
3287  * scsih_abort - eh threads main abort routine
3288  * @scmd: pointer to scsi command object
3289  *
3290  * Return: SUCCESS if command aborted else FAILED
3291  */
3292 static int
scsih_abort(struct scsi_cmnd * scmd)3293 scsih_abort(struct scsi_cmnd *scmd)
3294 {
3295 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3296 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3297 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3298 	u16 handle;
3299 	int r;
3300 
3301 	u8 timeout = 30;
3302 	struct _pcie_device *pcie_device = NULL;
3303 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3304 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3305 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3306 	    (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3307 	_scsih_tm_display_info(ioc, scmd);
3308 
3309 	sas_device_priv_data = scmd->device->hostdata;
3310 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3311 	    ioc->remove_host) {
3312 		sdev_printk(KERN_INFO, scmd->device,
3313 		    "device been deleted! scmd(0x%p)\n", scmd);
3314 		scmd->result = DID_NO_CONNECT << 16;
3315 		scsi_done(scmd);
3316 		r = SUCCESS;
3317 		goto out;
3318 	}
3319 
3320 	/* check for completed command */
3321 	if (st == NULL || st->cb_idx == 0xFF) {
3322 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3323 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3324 		scmd->result = DID_RESET << 16;
3325 		r = SUCCESS;
3326 		goto out;
3327 	}
3328 
3329 	/* for hidden raid components and volumes this is not supported */
3330 	if (sas_device_priv_data->sas_target->flags &
3331 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3332 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3333 		scmd->result = DID_RESET << 16;
3334 		r = FAILED;
3335 		goto out;
3336 	}
3337 
3338 	mpt3sas_halt_firmware(ioc);
3339 
3340 	handle = sas_device_priv_data->sas_target->handle;
3341 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3342 	if (pcie_device && (!ioc->tm_custom_handling) &&
3343 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3344 		timeout = ioc->nvme_abort_timeout;
3345 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3346 		scmd->device->id, scmd->device->lun,
3347 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3348 		st->smid, st->msix_io, timeout, 0);
3349 	/* Command must be cleared after abort */
3350 	if (r == SUCCESS && st->cb_idx != 0xFF)
3351 		r = FAILED;
3352  out:
3353 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3354 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3355 	if (pcie_device)
3356 		pcie_device_put(pcie_device);
3357 	return r;
3358 }
3359 
3360 /**
3361  * scsih_dev_reset - eh threads main device reset routine
3362  * @scmd: pointer to scsi command object
3363  *
3364  * Return: SUCCESS if command aborted else FAILED
3365  */
3366 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3367 scsih_dev_reset(struct scsi_cmnd *scmd)
3368 {
3369 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3370 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3371 	struct _sas_device *sas_device = NULL;
3372 	struct _pcie_device *pcie_device = NULL;
3373 	u16	handle;
3374 	u8	tr_method = 0;
3375 	u8	tr_timeout = 30;
3376 	int r;
3377 
3378 	struct scsi_target *starget = scmd->device->sdev_target;
3379 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3380 
3381 	sdev_printk(KERN_INFO, scmd->device,
3382 	    "attempting device reset! scmd(0x%p)\n", scmd);
3383 	_scsih_tm_display_info(ioc, scmd);
3384 
3385 	sas_device_priv_data = scmd->device->hostdata;
3386 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3387 	    ioc->remove_host) {
3388 		sdev_printk(KERN_INFO, scmd->device,
3389 		    "device been deleted! scmd(0x%p)\n", scmd);
3390 		scmd->result = DID_NO_CONNECT << 16;
3391 		scsi_done(scmd);
3392 		r = SUCCESS;
3393 		goto out;
3394 	}
3395 
3396 	/* for hidden raid components obtain the volume_handle */
3397 	handle = 0;
3398 	if (sas_device_priv_data->sas_target->flags &
3399 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3400 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3401 				target_priv_data);
3402 		if (sas_device)
3403 			handle = sas_device->volume_handle;
3404 	} else
3405 		handle = sas_device_priv_data->sas_target->handle;
3406 
3407 	if (!handle) {
3408 		scmd->result = DID_RESET << 16;
3409 		r = FAILED;
3410 		goto out;
3411 	}
3412 
3413 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3414 
3415 	if (pcie_device && (!ioc->tm_custom_handling) &&
3416 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3417 		tr_timeout = pcie_device->reset_timeout;
3418 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3419 	} else
3420 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3421 
3422 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3423 		scmd->device->id, scmd->device->lun,
3424 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3425 		tr_timeout, tr_method);
3426 	/* Check for busy commands after reset */
3427 	if (r == SUCCESS && scsi_device_busy(scmd->device))
3428 		r = FAILED;
3429  out:
3430 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3431 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3432 
3433 	if (sas_device)
3434 		sas_device_put(sas_device);
3435 	if (pcie_device)
3436 		pcie_device_put(pcie_device);
3437 
3438 	return r;
3439 }
3440 
3441 /**
3442  * scsih_target_reset - eh threads main target reset routine
3443  * @scmd: pointer to scsi command object
3444  *
3445  * Return: SUCCESS if command aborted else FAILED
3446  */
3447 static int
scsih_target_reset(struct scsi_cmnd * scmd)3448 scsih_target_reset(struct scsi_cmnd *scmd)
3449 {
3450 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3451 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3452 	struct _sas_device *sas_device = NULL;
3453 	struct _pcie_device *pcie_device = NULL;
3454 	u16	handle;
3455 	u8	tr_method = 0;
3456 	u8	tr_timeout = 30;
3457 	int r;
3458 	struct scsi_target *starget = scmd->device->sdev_target;
3459 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3460 
3461 	starget_printk(KERN_INFO, starget,
3462 	    "attempting target reset! scmd(0x%p)\n", scmd);
3463 	_scsih_tm_display_info(ioc, scmd);
3464 
3465 	sas_device_priv_data = scmd->device->hostdata;
3466 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3467 	    ioc->remove_host) {
3468 		starget_printk(KERN_INFO, starget,
3469 		    "target been deleted! scmd(0x%p)\n", scmd);
3470 		scmd->result = DID_NO_CONNECT << 16;
3471 		scsi_done(scmd);
3472 		r = SUCCESS;
3473 		goto out;
3474 	}
3475 
3476 	/* for hidden raid components obtain the volume_handle */
3477 	handle = 0;
3478 	if (sas_device_priv_data->sas_target->flags &
3479 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3480 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3481 				target_priv_data);
3482 		if (sas_device)
3483 			handle = sas_device->volume_handle;
3484 	} else
3485 		handle = sas_device_priv_data->sas_target->handle;
3486 
3487 	if (!handle) {
3488 		scmd->result = DID_RESET << 16;
3489 		r = FAILED;
3490 		goto out;
3491 	}
3492 
3493 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3494 
3495 	if (pcie_device && (!ioc->tm_custom_handling) &&
3496 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3497 		tr_timeout = pcie_device->reset_timeout;
3498 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3499 	} else
3500 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3501 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3502 		scmd->device->id, 0,
3503 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3504 	    tr_timeout, tr_method);
3505 	/* Check for busy commands after reset */
3506 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3507 		r = FAILED;
3508  out:
3509 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3510 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3511 
3512 	if (sas_device)
3513 		sas_device_put(sas_device);
3514 	if (pcie_device)
3515 		pcie_device_put(pcie_device);
3516 	return r;
3517 }
3518 
3519 
3520 /**
3521  * scsih_host_reset - eh threads main host reset routine
3522  * @scmd: pointer to scsi command object
3523  *
3524  * Return: SUCCESS if command aborted else FAILED
3525  */
3526 static int
scsih_host_reset(struct scsi_cmnd * scmd)3527 scsih_host_reset(struct scsi_cmnd *scmd)
3528 {
3529 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3530 	int r, retval;
3531 
3532 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3533 	scsi_print_command(scmd);
3534 
3535 	if (ioc->is_driver_loading || ioc->remove_host) {
3536 		ioc_info(ioc, "Blocking the host reset\n");
3537 		r = FAILED;
3538 		goto out;
3539 	}
3540 
3541 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3542 	r = (retval < 0) ? FAILED : SUCCESS;
3543 out:
3544 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3545 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3546 
3547 	return r;
3548 }
3549 
3550 /**
3551  * _scsih_fw_event_add - insert and queue up fw_event
3552  * @ioc: per adapter object
3553  * @fw_event: object describing the event
3554  * Context: This function will acquire ioc->fw_event_lock.
3555  *
3556  * This adds the firmware event object into link list, then queues it up to
3557  * be processed from user context.
3558  */
3559 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3560 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3561 {
3562 	unsigned long flags;
3563 
3564 	if (ioc->firmware_event_thread == NULL)
3565 		return;
3566 
3567 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3568 	fw_event_work_get(fw_event);
3569 	INIT_LIST_HEAD(&fw_event->list);
3570 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3571 	INIT_WORK(&fw_event->work, _firmware_event_work);
3572 	fw_event_work_get(fw_event);
3573 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3574 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3575 }
3576 
3577 /**
3578  * _scsih_fw_event_del_from_list - delete fw_event from the list
3579  * @ioc: per adapter object
3580  * @fw_event: object describing the event
3581  * Context: This function will acquire ioc->fw_event_lock.
3582  *
3583  * If the fw_event is on the fw_event_list, remove it and do a put.
3584  */
3585 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3586 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3587 	*fw_event)
3588 {
3589 	unsigned long flags;
3590 
3591 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3592 	if (!list_empty(&fw_event->list)) {
3593 		list_del_init(&fw_event->list);
3594 		fw_event_work_put(fw_event);
3595 	}
3596 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3597 }
3598 
3599 
3600  /**
3601  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3602  * @ioc: per adapter object
3603  * @event_data: trigger event data
3604  */
3605 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3606 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3607 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3608 {
3609 	struct fw_event_work *fw_event;
3610 	u16 sz;
3611 
3612 	if (ioc->is_driver_loading)
3613 		return;
3614 	sz = sizeof(*event_data);
3615 	fw_event = alloc_fw_event_work(sz);
3616 	if (!fw_event)
3617 		return;
3618 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3619 	fw_event->ioc = ioc;
3620 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3621 	_scsih_fw_event_add(ioc, fw_event);
3622 	fw_event_work_put(fw_event);
3623 }
3624 
3625 /**
3626  * _scsih_error_recovery_delete_devices - remove devices not responding
3627  * @ioc: per adapter object
3628  */
3629 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3630 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3631 {
3632 	struct fw_event_work *fw_event;
3633 
3634 	fw_event = alloc_fw_event_work(0);
3635 	if (!fw_event)
3636 		return;
3637 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3638 	fw_event->ioc = ioc;
3639 	_scsih_fw_event_add(ioc, fw_event);
3640 	fw_event_work_put(fw_event);
3641 }
3642 
3643 /**
3644  * mpt3sas_port_enable_complete - port enable completed (fake event)
3645  * @ioc: per adapter object
3646  */
3647 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3648 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3649 {
3650 	struct fw_event_work *fw_event;
3651 
3652 	fw_event = alloc_fw_event_work(0);
3653 	if (!fw_event)
3654 		return;
3655 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3656 	fw_event->ioc = ioc;
3657 	_scsih_fw_event_add(ioc, fw_event);
3658 	fw_event_work_put(fw_event);
3659 }
3660 
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3661 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3662 {
3663 	unsigned long flags;
3664 	struct fw_event_work *fw_event = NULL;
3665 
3666 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3667 	if (!list_empty(&ioc->fw_event_list)) {
3668 		fw_event = list_first_entry(&ioc->fw_event_list,
3669 				struct fw_event_work, list);
3670 		list_del_init(&fw_event->list);
3671 		fw_event_work_put(fw_event);
3672 	}
3673 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3674 
3675 	return fw_event;
3676 }
3677 
3678 /**
3679  * _scsih_fw_event_cleanup_queue - cleanup event queue
3680  * @ioc: per adapter object
3681  *
3682  * Walk the firmware event queue, either killing timers, or waiting
3683  * for outstanding events to complete
3684  *
3685  * Context: task, can sleep
3686  */
3687 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3689 {
3690 	struct fw_event_work *fw_event;
3691 
3692 	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 	    !ioc->firmware_event_thread)
3694 		return;
3695 	/*
3696 	 * Set current running event as ignore, so that
3697 	 * current running event will exit quickly.
3698 	 * As diag reset has occurred it is of no use
3699 	 * to process remaining stale event data entries.
3700 	 */
3701 	if (ioc->shost_recovery && ioc->current_event)
3702 		ioc->current_event->ignore = 1;
3703 
3704 	ioc->fw_events_cleanup = 1;
3705 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3706 	     (fw_event = ioc->current_event)) {
3707 
3708 		/*
3709 		 * Don't call cancel_work_sync() for current_event
3710 		 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3711 		 * otherwise we may observe deadlock if current
3712 		 * hard reset issued as part of processing the current_event.
3713 		 *
3714 		 * Orginal logic of cleaning the current_event is added
3715 		 * for handling the back to back host reset issued by the user.
3716 		 * i.e. during back to back host reset, driver use to process
3717 		 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3718 		 * event back to back and this made the drives to unregister
3719 		 * the devices from SML.
3720 		 */
3721 
3722 		if (fw_event == ioc->current_event &&
3723 		    ioc->current_event->event !=
3724 		    MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3725 			ioc->current_event = NULL;
3726 			continue;
3727 		}
3728 
3729 		/*
3730 		 * Driver has to clear ioc->start_scan flag when
3731 		 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3732 		 * otherwise scsi_scan_host() API waits for the
3733 		 * 5 minute timer to expire. If we exit from
3734 		 * scsi_scan_host() early then we can issue the
3735 		 * new port enable request as part of current diag reset.
3736 		 */
3737 		if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3738 			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3739 			ioc->start_scan = 0;
3740 		}
3741 
3742 		/*
3743 		 * Wait on the fw_event to complete. If this returns 1, then
3744 		 * the event was never executed, and we need a put for the
3745 		 * reference the work had on the fw_event.
3746 		 *
3747 		 * If it did execute, we wait for it to finish, and the put will
3748 		 * happen from _firmware_event_work()
3749 		 */
3750 		if (cancel_work_sync(&fw_event->work))
3751 			fw_event_work_put(fw_event);
3752 
3753 	}
3754 	ioc->fw_events_cleanup = 0;
3755 }
3756 
3757 /**
3758  * _scsih_internal_device_block - block the sdev device
3759  * @sdev: per device object
3760  * @sas_device_priv_data : per device driver private data
3761  *
3762  * make sure device is blocked without error, if not
3763  * print an error
3764  */
3765 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3766 _scsih_internal_device_block(struct scsi_device *sdev,
3767 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3768 {
3769 	int r = 0;
3770 
3771 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3772 	    sas_device_priv_data->sas_target->handle);
3773 	sas_device_priv_data->block = 1;
3774 
3775 	r = scsi_internal_device_block_nowait(sdev);
3776 	if (r == -EINVAL)
3777 		sdev_printk(KERN_WARNING, sdev,
3778 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3779 		    r, sas_device_priv_data->sas_target->handle);
3780 }
3781 
3782 /**
3783  * _scsih_internal_device_unblock - unblock the sdev device
3784  * @sdev: per device object
3785  * @sas_device_priv_data : per device driver private data
3786  * make sure device is unblocked without error, if not retry
3787  * by blocking and then unblocking
3788  */
3789 
3790 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3791 _scsih_internal_device_unblock(struct scsi_device *sdev,
3792 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3793 {
3794 	int r = 0;
3795 
3796 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3797 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3798 	sas_device_priv_data->block = 0;
3799 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3800 	if (r == -EINVAL) {
3801 		/* The device has been set to SDEV_RUNNING by SD layer during
3802 		 * device addition but the request queue is still stopped by
3803 		 * our earlier block call. We need to perform a block again
3804 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3805 
3806 		sdev_printk(KERN_WARNING, sdev,
3807 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3808 		    "performing a block followed by an unblock\n",
3809 		    r, sas_device_priv_data->sas_target->handle);
3810 		sas_device_priv_data->block = 1;
3811 		r = scsi_internal_device_block_nowait(sdev);
3812 		if (r)
3813 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3814 			    "failed with return(%d) for handle(0x%04x)\n",
3815 			    r, sas_device_priv_data->sas_target->handle);
3816 
3817 		sas_device_priv_data->block = 0;
3818 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3819 		if (r)
3820 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3821 			    " failed with return(%d) for handle(0x%04x)\n",
3822 			    r, sas_device_priv_data->sas_target->handle);
3823 	}
3824 }
3825 
3826 /**
3827  * _scsih_ublock_io_all_device - unblock every device
3828  * @ioc: per adapter object
3829  *
3830  * change the device state from block to running
3831  */
3832 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3833 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3834 {
3835 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3836 	struct scsi_device *sdev;
3837 
3838 	shost_for_each_device(sdev, ioc->shost) {
3839 		sas_device_priv_data = sdev->hostdata;
3840 		if (!sas_device_priv_data)
3841 			continue;
3842 		if (!sas_device_priv_data->block)
3843 			continue;
3844 
3845 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3846 			"device_running, handle(0x%04x)\n",
3847 		    sas_device_priv_data->sas_target->handle));
3848 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3849 	}
3850 }
3851 
3852 
3853 /**
3854  * _scsih_ublock_io_device - prepare device to be deleted
3855  * @ioc: per adapter object
3856  * @sas_address: sas address
3857  * @port: hba port entry
3858  *
3859  * unblock then put device in offline state
3860  */
3861 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)3862 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3863 	u64 sas_address, struct hba_port *port)
3864 {
3865 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3866 	struct scsi_device *sdev;
3867 
3868 	shost_for_each_device(sdev, ioc->shost) {
3869 		sas_device_priv_data = sdev->hostdata;
3870 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3871 			continue;
3872 		if (sas_device_priv_data->sas_target->sas_address
3873 		    != sas_address)
3874 			continue;
3875 		if (sas_device_priv_data->sas_target->port != port)
3876 			continue;
3877 		if (sas_device_priv_data->block)
3878 			_scsih_internal_device_unblock(sdev,
3879 				sas_device_priv_data);
3880 	}
3881 }
3882 
3883 /**
3884  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3885  * @ioc: per adapter object
3886  *
3887  * During device pull we need to appropriately set the sdev state.
3888  */
3889 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3890 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3891 {
3892 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3893 	struct scsi_device *sdev;
3894 
3895 	shost_for_each_device(sdev, ioc->shost) {
3896 		sas_device_priv_data = sdev->hostdata;
3897 		if (!sas_device_priv_data)
3898 			continue;
3899 		if (sas_device_priv_data->block)
3900 			continue;
3901 		if (sas_device_priv_data->ignore_delay_remove) {
3902 			sdev_printk(KERN_INFO, sdev,
3903 			"%s skip device_block for SES handle(0x%04x)\n",
3904 			__func__, sas_device_priv_data->sas_target->handle);
3905 			continue;
3906 		}
3907 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3908 	}
3909 }
3910 
3911 /**
3912  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3913  * @ioc: per adapter object
3914  * @handle: device handle
3915  *
3916  * During device pull we need to appropriately set the sdev state.
3917  */
3918 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3919 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3920 {
3921 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3922 	struct scsi_device *sdev;
3923 	struct _sas_device *sas_device;
3924 
3925 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3926 
3927 	shost_for_each_device(sdev, ioc->shost) {
3928 		sas_device_priv_data = sdev->hostdata;
3929 		if (!sas_device_priv_data)
3930 			continue;
3931 		if (sas_device_priv_data->sas_target->handle != handle)
3932 			continue;
3933 		if (sas_device_priv_data->block)
3934 			continue;
3935 		if (sas_device && sas_device->pend_sas_rphy_add)
3936 			continue;
3937 		if (sas_device_priv_data->ignore_delay_remove) {
3938 			sdev_printk(KERN_INFO, sdev,
3939 			"%s skip device_block for SES handle(0x%04x)\n",
3940 			__func__, sas_device_priv_data->sas_target->handle);
3941 			continue;
3942 		}
3943 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3944 	}
3945 
3946 	if (sas_device)
3947 		sas_device_put(sas_device);
3948 }
3949 
3950 /**
3951  * _scsih_block_io_to_children_attached_to_ex
3952  * @ioc: per adapter object
3953  * @sas_expander: the sas_device object
3954  *
3955  * This routine set sdev state to SDEV_BLOCK for all devices
3956  * attached to this expander. This function called when expander is
3957  * pulled.
3958  */
3959 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3960 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3961 	struct _sas_node *sas_expander)
3962 {
3963 	struct _sas_port *mpt3sas_port;
3964 	struct _sas_device *sas_device;
3965 	struct _sas_node *expander_sibling;
3966 	unsigned long flags;
3967 
3968 	if (!sas_expander)
3969 		return;
3970 
3971 	list_for_each_entry(mpt3sas_port,
3972 	   &sas_expander->sas_port_list, port_list) {
3973 		if (mpt3sas_port->remote_identify.device_type ==
3974 		    SAS_END_DEVICE) {
3975 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3976 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3977 			    mpt3sas_port->remote_identify.sas_address,
3978 			    mpt3sas_port->hba_port);
3979 			if (sas_device) {
3980 				set_bit(sas_device->handle,
3981 						ioc->blocking_handles);
3982 				sas_device_put(sas_device);
3983 			}
3984 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3985 		}
3986 	}
3987 
3988 	list_for_each_entry(mpt3sas_port,
3989 	   &sas_expander->sas_port_list, port_list) {
3990 
3991 		if (mpt3sas_port->remote_identify.device_type ==
3992 		    SAS_EDGE_EXPANDER_DEVICE ||
3993 		    mpt3sas_port->remote_identify.device_type ==
3994 		    SAS_FANOUT_EXPANDER_DEVICE) {
3995 			expander_sibling =
3996 			    mpt3sas_scsih_expander_find_by_sas_address(
3997 			    ioc, mpt3sas_port->remote_identify.sas_address,
3998 			    mpt3sas_port->hba_port);
3999 			_scsih_block_io_to_children_attached_to_ex(ioc,
4000 			    expander_sibling);
4001 		}
4002 	}
4003 }
4004 
4005 /**
4006  * _scsih_block_io_to_children_attached_directly
4007  * @ioc: per adapter object
4008  * @event_data: topology change event data
4009  *
4010  * This routine set sdev state to SDEV_BLOCK for all devices
4011  * direct attached during device pull.
4012  */
4013 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4014 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4015 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4016 {
4017 	int i;
4018 	u16 handle;
4019 	u16 reason_code;
4020 
4021 	for (i = 0; i < event_data->NumEntries; i++) {
4022 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4023 		if (!handle)
4024 			continue;
4025 		reason_code = event_data->PHY[i].PhyStatus &
4026 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4027 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4028 			_scsih_block_io_device(ioc, handle);
4029 	}
4030 }
4031 
4032 /**
4033  * _scsih_block_io_to_pcie_children_attached_directly
4034  * @ioc: per adapter object
4035  * @event_data: topology change event data
4036  *
4037  * This routine set sdev state to SDEV_BLOCK for all devices
4038  * direct attached during device pull/reconnect.
4039  */
4040 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4041 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4042 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4043 {
4044 	int i;
4045 	u16 handle;
4046 	u16 reason_code;
4047 
4048 	for (i = 0; i < event_data->NumEntries; i++) {
4049 		handle =
4050 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4051 		if (!handle)
4052 			continue;
4053 		reason_code = event_data->PortEntry[i].PortStatus;
4054 		if (reason_code ==
4055 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4056 			_scsih_block_io_device(ioc, handle);
4057 	}
4058 }
4059 /**
4060  * _scsih_tm_tr_send - send task management request
4061  * @ioc: per adapter object
4062  * @handle: device handle
4063  * Context: interrupt time.
4064  *
4065  * This code is to initiate the device removal handshake protocol
4066  * with controller firmware.  This function will issue target reset
4067  * using high priority request queue.  It will send a sas iounit
4068  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4069  *
4070  * This is designed to send muliple task management request at the same
4071  * time to the fifo. If the fifo is full, we will append the request,
4072  * and process it in a future completion.
4073  */
4074 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4075 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4076 {
4077 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4078 	u16 smid;
4079 	struct _sas_device *sas_device = NULL;
4080 	struct _pcie_device *pcie_device = NULL;
4081 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4082 	u64 sas_address = 0;
4083 	unsigned long flags;
4084 	struct _tr_list *delayed_tr;
4085 	u32 ioc_state;
4086 	u8 tr_method = 0;
4087 	struct hba_port *port = NULL;
4088 
4089 	if (ioc->pci_error_recovery) {
4090 		dewtprintk(ioc,
4091 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4092 				    __func__, handle));
4093 		return;
4094 	}
4095 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4096 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4097 		dewtprintk(ioc,
4098 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4099 				    __func__, handle));
4100 		return;
4101 	}
4102 
4103 	/* if PD, then return */
4104 	if (test_bit(handle, ioc->pd_handles))
4105 		return;
4106 
4107 	clear_bit(handle, ioc->pend_os_device_add);
4108 
4109 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
4110 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4111 	if (sas_device && sas_device->starget &&
4112 	    sas_device->starget->hostdata) {
4113 		sas_target_priv_data = sas_device->starget->hostdata;
4114 		sas_target_priv_data->deleted = 1;
4115 		sas_address = sas_device->sas_address;
4116 		port = sas_device->port;
4117 	}
4118 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4119 	if (!sas_device) {
4120 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4121 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4122 		if (pcie_device && pcie_device->starget &&
4123 			pcie_device->starget->hostdata) {
4124 			sas_target_priv_data = pcie_device->starget->hostdata;
4125 			sas_target_priv_data->deleted = 1;
4126 			sas_address = pcie_device->wwid;
4127 		}
4128 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4129 		if (pcie_device && (!ioc->tm_custom_handling) &&
4130 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
4131 		    pcie_device->device_info))))
4132 			tr_method =
4133 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4134 		else
4135 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4136 	}
4137 	if (sas_target_priv_data) {
4138 		dewtprintk(ioc,
4139 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4140 				    handle, (u64)sas_address));
4141 		if (sas_device) {
4142 			if (sas_device->enclosure_handle != 0)
4143 				dewtprintk(ioc,
4144 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4145 						    (u64)sas_device->enclosure_logical_id,
4146 						    sas_device->slot));
4147 			if (sas_device->connector_name[0] != '\0')
4148 				dewtprintk(ioc,
4149 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4150 						    sas_device->enclosure_level,
4151 						    sas_device->connector_name));
4152 		} else if (pcie_device) {
4153 			if (pcie_device->enclosure_handle != 0)
4154 				dewtprintk(ioc,
4155 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4156 						    (u64)pcie_device->enclosure_logical_id,
4157 						    pcie_device->slot));
4158 			if (pcie_device->connector_name[0] != '\0')
4159 				dewtprintk(ioc,
4160 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4161 						    pcie_device->enclosure_level,
4162 						    pcie_device->connector_name));
4163 		}
4164 		_scsih_ublock_io_device(ioc, sas_address, port);
4165 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4166 	}
4167 
4168 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4169 	if (!smid) {
4170 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4171 		if (!delayed_tr)
4172 			goto out;
4173 		INIT_LIST_HEAD(&delayed_tr->list);
4174 		delayed_tr->handle = handle;
4175 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4176 		dewtprintk(ioc,
4177 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4178 				    handle));
4179 		goto out;
4180 	}
4181 
4182 	dewtprintk(ioc,
4183 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4184 			    handle, smid, ioc->tm_tr_cb_idx));
4185 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4186 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4187 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4188 	mpi_request->DevHandle = cpu_to_le16(handle);
4189 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4190 	mpi_request->MsgFlags = tr_method;
4191 	set_bit(handle, ioc->device_remove_in_progress);
4192 	ioc->put_smid_hi_priority(ioc, smid, 0);
4193 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4194 
4195 out:
4196 	if (sas_device)
4197 		sas_device_put(sas_device);
4198 	if (pcie_device)
4199 		pcie_device_put(pcie_device);
4200 }
4201 
4202 /**
4203  * _scsih_tm_tr_complete -
4204  * @ioc: per adapter object
4205  * @smid: system request message index
4206  * @msix_index: MSIX table index supplied by the OS
4207  * @reply: reply message frame(lower 32bit addr)
4208  * Context: interrupt time.
4209  *
4210  * This is the target reset completion routine.
4211  * This code is part of the code to initiate the device removal
4212  * handshake protocol with controller firmware.
4213  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4214  *
4215  * Return: 1 meaning mf should be freed from _base_interrupt
4216  *         0 means the mf is freed from this function.
4217  */
4218 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4219 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4220 	u32 reply)
4221 {
4222 	u16 handle;
4223 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4224 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4225 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4226 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4227 	u16 smid_sas_ctrl;
4228 	u32 ioc_state;
4229 	struct _sc_list *delayed_sc;
4230 
4231 	if (ioc->pci_error_recovery) {
4232 		dewtprintk(ioc,
4233 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4234 				    __func__));
4235 		return 1;
4236 	}
4237 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4238 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4239 		dewtprintk(ioc,
4240 			   ioc_info(ioc, "%s: host is not operational\n",
4241 				    __func__));
4242 		return 1;
4243 	}
4244 	if (unlikely(!mpi_reply)) {
4245 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4246 			__FILE__, __LINE__, __func__);
4247 		return 1;
4248 	}
4249 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4250 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4251 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4252 		dewtprintk(ioc,
4253 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4254 				   handle,
4255 				   le16_to_cpu(mpi_reply->DevHandle), smid));
4256 		return 0;
4257 	}
4258 
4259 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4260 	dewtprintk(ioc,
4261 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4262 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4263 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4264 			    le32_to_cpu(mpi_reply->TerminationCount)));
4265 
4266 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4267 	if (!smid_sas_ctrl) {
4268 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4269 		if (!delayed_sc)
4270 			return _scsih_check_for_pending_tm(ioc, smid);
4271 		INIT_LIST_HEAD(&delayed_sc->list);
4272 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4273 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4274 		dewtprintk(ioc,
4275 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4276 				    handle));
4277 		return _scsih_check_for_pending_tm(ioc, smid);
4278 	}
4279 
4280 	dewtprintk(ioc,
4281 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4282 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4283 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4284 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4285 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4286 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4287 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4288 	ioc->put_smid_default(ioc, smid_sas_ctrl);
4289 
4290 	return _scsih_check_for_pending_tm(ioc, smid);
4291 }
4292 
4293 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4294  *				 issue to IOC or not.
4295  * @ioc: per adapter object
4296  * @scmd: pointer to scsi command object
4297  *
4298  * Returns true if scmd can be issued to IOC otherwise returns false.
4299  */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4300 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4301 	struct scsi_cmnd *scmd)
4302 {
4303 
4304 	if (ioc->pci_error_recovery)
4305 		return false;
4306 
4307 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4308 		if (ioc->remove_host)
4309 			return false;
4310 
4311 		return true;
4312 	}
4313 
4314 	if (ioc->remove_host) {
4315 
4316 		switch (scmd->cmnd[0]) {
4317 		case SYNCHRONIZE_CACHE:
4318 		case START_STOP:
4319 			return true;
4320 		default:
4321 			return false;
4322 		}
4323 	}
4324 
4325 	return true;
4326 }
4327 
4328 /**
4329  * _scsih_sas_control_complete - completion routine
4330  * @ioc: per adapter object
4331  * @smid: system request message index
4332  * @msix_index: MSIX table index supplied by the OS
4333  * @reply: reply message frame(lower 32bit addr)
4334  * Context: interrupt time.
4335  *
4336  * This is the sas iounit control completion routine.
4337  * This code is part of the code to initiate the device removal
4338  * handshake protocol with controller firmware.
4339  *
4340  * Return: 1 meaning mf should be freed from _base_interrupt
4341  *         0 means the mf is freed from this function.
4342  */
4343 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4344 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4345 	u8 msix_index, u32 reply)
4346 {
4347 	Mpi2SasIoUnitControlReply_t *mpi_reply =
4348 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4349 
4350 	if (likely(mpi_reply)) {
4351 		dewtprintk(ioc,
4352 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4353 				    le16_to_cpu(mpi_reply->DevHandle), smid,
4354 				    le16_to_cpu(mpi_reply->IOCStatus),
4355 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4356 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4357 		     MPI2_IOCSTATUS_SUCCESS) {
4358 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4359 			    ioc->device_remove_in_progress);
4360 		}
4361 	} else {
4362 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4363 			__FILE__, __LINE__, __func__);
4364 	}
4365 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4366 }
4367 
4368 /**
4369  * _scsih_tm_tr_volume_send - send target reset request for volumes
4370  * @ioc: per adapter object
4371  * @handle: device handle
4372  * Context: interrupt time.
4373  *
4374  * This is designed to send muliple task management request at the same
4375  * time to the fifo. If the fifo is full, we will append the request,
4376  * and process it in a future completion.
4377  */
4378 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4379 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4380 {
4381 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4382 	u16 smid;
4383 	struct _tr_list *delayed_tr;
4384 
4385 	if (ioc->pci_error_recovery) {
4386 		dewtprintk(ioc,
4387 			   ioc_info(ioc, "%s: host reset in progress!\n",
4388 				    __func__));
4389 		return;
4390 	}
4391 
4392 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4393 	if (!smid) {
4394 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4395 		if (!delayed_tr)
4396 			return;
4397 		INIT_LIST_HEAD(&delayed_tr->list);
4398 		delayed_tr->handle = handle;
4399 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4400 		dewtprintk(ioc,
4401 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4402 				    handle));
4403 		return;
4404 	}
4405 
4406 	dewtprintk(ioc,
4407 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4408 			    handle, smid, ioc->tm_tr_volume_cb_idx));
4409 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4410 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4411 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4412 	mpi_request->DevHandle = cpu_to_le16(handle);
4413 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4414 	ioc->put_smid_hi_priority(ioc, smid, 0);
4415 }
4416 
4417 /**
4418  * _scsih_tm_volume_tr_complete - target reset completion
4419  * @ioc: per adapter object
4420  * @smid: system request message index
4421  * @msix_index: MSIX table index supplied by the OS
4422  * @reply: reply message frame(lower 32bit addr)
4423  * Context: interrupt time.
4424  *
4425  * Return: 1 meaning mf should be freed from _base_interrupt
4426  *         0 means the mf is freed from this function.
4427  */
4428 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4429 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4430 	u8 msix_index, u32 reply)
4431 {
4432 	u16 handle;
4433 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4434 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4435 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4436 
4437 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4438 		dewtprintk(ioc,
4439 			   ioc_info(ioc, "%s: host reset in progress!\n",
4440 				    __func__));
4441 		return 1;
4442 	}
4443 	if (unlikely(!mpi_reply)) {
4444 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4445 			__FILE__, __LINE__, __func__);
4446 		return 1;
4447 	}
4448 
4449 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4450 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4451 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4452 		dewtprintk(ioc,
4453 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4454 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4455 				   smid));
4456 		return 0;
4457 	}
4458 
4459 	dewtprintk(ioc,
4460 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4461 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4462 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4463 			    le32_to_cpu(mpi_reply->TerminationCount)));
4464 
4465 	return _scsih_check_for_pending_tm(ioc, smid);
4466 }
4467 
4468 /**
4469  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4470  * @ioc: per adapter object
4471  * @smid: system request message index
4472  * @event: Event ID
4473  * @event_context: used to track events uniquely
4474  *
4475  * Context - processed in interrupt context.
4476  */
4477 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4478 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4479 				U32 event_context)
4480 {
4481 	Mpi2EventAckRequest_t *ack_request;
4482 	int i = smid - ioc->internal_smid;
4483 	unsigned long flags;
4484 
4485 	/* Without releasing the smid just update the
4486 	 * call back index and reuse the same smid for
4487 	 * processing this delayed request
4488 	 */
4489 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4490 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4491 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4492 
4493 	dewtprintk(ioc,
4494 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4495 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4496 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4497 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4498 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4499 	ack_request->Event = event;
4500 	ack_request->EventContext = event_context;
4501 	ack_request->VF_ID = 0;  /* TODO */
4502 	ack_request->VP_ID = 0;
4503 	ioc->put_smid_default(ioc, smid);
4504 }
4505 
4506 /**
4507  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4508  *				sas_io_unit_ctrl messages
4509  * @ioc: per adapter object
4510  * @smid: system request message index
4511  * @handle: device handle
4512  *
4513  * Context - processed in interrupt context.
4514  */
4515 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4516 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4517 					u16 smid, u16 handle)
4518 {
4519 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4520 	u32 ioc_state;
4521 	int i = smid - ioc->internal_smid;
4522 	unsigned long flags;
4523 
4524 	if (ioc->remove_host) {
4525 		dewtprintk(ioc,
4526 			   ioc_info(ioc, "%s: host has been removed\n",
4527 				    __func__));
4528 		return;
4529 	} else if (ioc->pci_error_recovery) {
4530 		dewtprintk(ioc,
4531 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4532 				    __func__));
4533 		return;
4534 	}
4535 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4536 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4537 		dewtprintk(ioc,
4538 			   ioc_info(ioc, "%s: host is not operational\n",
4539 				    __func__));
4540 		return;
4541 	}
4542 
4543 	/* Without releasing the smid just update the
4544 	 * call back index and reuse the same smid for
4545 	 * processing this delayed request
4546 	 */
4547 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4548 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4549 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4550 
4551 	dewtprintk(ioc,
4552 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4553 			    handle, smid, ioc->tm_sas_control_cb_idx));
4554 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4555 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4556 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4557 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4558 	mpi_request->DevHandle = cpu_to_le16(handle);
4559 	ioc->put_smid_default(ioc, smid);
4560 }
4561 
4562 /**
4563  * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4564  * @ioc: per adapter object
4565  * @smid: system request message index
4566  *
4567  * Context: Executed in interrupt context
4568  *
4569  * This will check delayed internal messages list, and process the
4570  * next request.
4571  *
4572  * Return: 1 meaning mf should be freed from _base_interrupt
4573  *         0 means the mf is freed from this function.
4574  */
4575 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4576 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4577 {
4578 	struct _sc_list *delayed_sc;
4579 	struct _event_ack_list *delayed_event_ack;
4580 
4581 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4582 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4583 						struct _event_ack_list, list);
4584 		_scsih_issue_delayed_event_ack(ioc, smid,
4585 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4586 		list_del(&delayed_event_ack->list);
4587 		kfree(delayed_event_ack);
4588 		return 0;
4589 	}
4590 
4591 	if (!list_empty(&ioc->delayed_sc_list)) {
4592 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4593 						struct _sc_list, list);
4594 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4595 						 delayed_sc->handle);
4596 		list_del(&delayed_sc->list);
4597 		kfree(delayed_sc);
4598 		return 0;
4599 	}
4600 	return 1;
4601 }
4602 
4603 /**
4604  * _scsih_check_for_pending_tm - check for pending task management
4605  * @ioc: per adapter object
4606  * @smid: system request message index
4607  *
4608  * This will check delayed target reset list, and feed the
4609  * next reqeust.
4610  *
4611  * Return: 1 meaning mf should be freed from _base_interrupt
4612  *         0 means the mf is freed from this function.
4613  */
4614 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4615 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4616 {
4617 	struct _tr_list *delayed_tr;
4618 
4619 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4620 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4621 		    struct _tr_list, list);
4622 		mpt3sas_base_free_smid(ioc, smid);
4623 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4624 		list_del(&delayed_tr->list);
4625 		kfree(delayed_tr);
4626 		return 0;
4627 	}
4628 
4629 	if (!list_empty(&ioc->delayed_tr_list)) {
4630 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4631 		    struct _tr_list, list);
4632 		mpt3sas_base_free_smid(ioc, smid);
4633 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4634 		list_del(&delayed_tr->list);
4635 		kfree(delayed_tr);
4636 		return 0;
4637 	}
4638 
4639 	return 1;
4640 }
4641 
4642 /**
4643  * _scsih_check_topo_delete_events - sanity check on topo events
4644  * @ioc: per adapter object
4645  * @event_data: the event data payload
4646  *
4647  * This routine added to better handle cable breaker.
4648  *
4649  * This handles the case where driver receives multiple expander
4650  * add and delete events in a single shot.  When there is a delete event
4651  * the routine will void any pending add events waiting in the event queue.
4652  */
4653 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4654 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4655 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4656 {
4657 	struct fw_event_work *fw_event;
4658 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4659 	u16 expander_handle;
4660 	struct _sas_node *sas_expander;
4661 	unsigned long flags;
4662 	int i, reason_code;
4663 	u16 handle;
4664 
4665 	for (i = 0 ; i < event_data->NumEntries; i++) {
4666 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4667 		if (!handle)
4668 			continue;
4669 		reason_code = event_data->PHY[i].PhyStatus &
4670 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4671 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4672 			_scsih_tm_tr_send(ioc, handle);
4673 	}
4674 
4675 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4676 	if (expander_handle < ioc->sas_hba.num_phys) {
4677 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4678 		return;
4679 	}
4680 	if (event_data->ExpStatus ==
4681 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4682 		/* put expander attached devices into blocking state */
4683 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4684 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4685 		    expander_handle);
4686 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4687 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4688 		do {
4689 			handle = find_first_bit(ioc->blocking_handles,
4690 			    ioc->facts.MaxDevHandle);
4691 			if (handle < ioc->facts.MaxDevHandle)
4692 				_scsih_block_io_device(ioc, handle);
4693 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4694 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4695 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4696 
4697 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4698 		return;
4699 
4700 	/* mark ignore flag for pending events */
4701 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4702 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4703 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4704 		    fw_event->ignore)
4705 			continue;
4706 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4707 				   fw_event->event_data;
4708 		if (local_event_data->ExpStatus ==
4709 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4710 		    local_event_data->ExpStatus ==
4711 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4712 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4713 			    expander_handle) {
4714 				dewtprintk(ioc,
4715 					   ioc_info(ioc, "setting ignoring flag\n"));
4716 				fw_event->ignore = 1;
4717 			}
4718 		}
4719 	}
4720 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4721 }
4722 
4723 /**
4724  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4725  * events
4726  * @ioc: per adapter object
4727  * @event_data: the event data payload
4728  *
4729  * This handles the case where driver receives multiple switch
4730  * or device add and delete events in a single shot.  When there
4731  * is a delete event the routine will void any pending add
4732  * events waiting in the event queue.
4733  */
4734 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4735 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4736 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4737 {
4738 	struct fw_event_work *fw_event;
4739 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4740 	unsigned long flags;
4741 	int i, reason_code;
4742 	u16 handle, switch_handle;
4743 
4744 	for (i = 0; i < event_data->NumEntries; i++) {
4745 		handle =
4746 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4747 		if (!handle)
4748 			continue;
4749 		reason_code = event_data->PortEntry[i].PortStatus;
4750 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4751 			_scsih_tm_tr_send(ioc, handle);
4752 	}
4753 
4754 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4755 	if (!switch_handle) {
4756 		_scsih_block_io_to_pcie_children_attached_directly(
4757 							ioc, event_data);
4758 		return;
4759 	}
4760     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4761 	if ((event_data->SwitchStatus
4762 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4763 		(event_data->SwitchStatus ==
4764 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4765 		_scsih_block_io_to_pcie_children_attached_directly(
4766 							ioc, event_data);
4767 
4768 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4769 		return;
4770 
4771 	/* mark ignore flag for pending events */
4772 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4773 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4774 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4775 			fw_event->ignore)
4776 			continue;
4777 		local_event_data =
4778 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4779 			fw_event->event_data;
4780 		if (local_event_data->SwitchStatus ==
4781 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4782 		    local_event_data->SwitchStatus ==
4783 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4784 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4785 				switch_handle) {
4786 				dewtprintk(ioc,
4787 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4788 				fw_event->ignore = 1;
4789 			}
4790 		}
4791 	}
4792 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4793 }
4794 
4795 /**
4796  * _scsih_set_volume_delete_flag - setting volume delete flag
4797  * @ioc: per adapter object
4798  * @handle: device handle
4799  *
4800  * This returns nothing.
4801  */
4802 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4803 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4804 {
4805 	struct _raid_device *raid_device;
4806 	struct MPT3SAS_TARGET *sas_target_priv_data;
4807 	unsigned long flags;
4808 
4809 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4810 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4811 	if (raid_device && raid_device->starget &&
4812 	    raid_device->starget->hostdata) {
4813 		sas_target_priv_data =
4814 		    raid_device->starget->hostdata;
4815 		sas_target_priv_data->deleted = 1;
4816 		dewtprintk(ioc,
4817 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4818 				    handle, (u64)raid_device->wwid));
4819 	}
4820 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4821 }
4822 
4823 /**
4824  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4825  * @handle: input handle
4826  * @a: handle for volume a
4827  * @b: handle for volume b
4828  *
4829  * IR firmware only supports two raid volumes.  The purpose of this
4830  * routine is to set the volume handle in either a or b. When the given
4831  * input handle is non-zero, or when a and b have not been set before.
4832  */
4833 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4834 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4835 {
4836 	if (!handle || handle == *a || handle == *b)
4837 		return;
4838 	if (!*a)
4839 		*a = handle;
4840 	else if (!*b)
4841 		*b = handle;
4842 }
4843 
4844 /**
4845  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4846  * @ioc: per adapter object
4847  * @event_data: the event data payload
4848  * Context: interrupt time.
4849  *
4850  * This routine will send target reset to volume, followed by target
4851  * resets to the PDs. This is called when a PD has been removed, or
4852  * volume has been deleted or removed. When the target reset is sent
4853  * to volume, the PD target resets need to be queued to start upon
4854  * completion of the volume target reset.
4855  */
4856 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4857 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4858 	Mpi2EventDataIrConfigChangeList_t *event_data)
4859 {
4860 	Mpi2EventIrConfigElement_t *element;
4861 	int i;
4862 	u16 handle, volume_handle, a, b;
4863 	struct _tr_list *delayed_tr;
4864 
4865 	a = 0;
4866 	b = 0;
4867 
4868 	if (ioc->is_warpdrive)
4869 		return;
4870 
4871 	/* Volume Resets for Deleted or Removed */
4872 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4873 	for (i = 0; i < event_data->NumElements; i++, element++) {
4874 		if (le32_to_cpu(event_data->Flags) &
4875 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4876 			continue;
4877 		if (element->ReasonCode ==
4878 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4879 		    element->ReasonCode ==
4880 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4881 			volume_handle = le16_to_cpu(element->VolDevHandle);
4882 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4883 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4884 		}
4885 	}
4886 
4887 	/* Volume Resets for UNHIDE events */
4888 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4889 	for (i = 0; i < event_data->NumElements; i++, element++) {
4890 		if (le32_to_cpu(event_data->Flags) &
4891 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4892 			continue;
4893 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4894 			volume_handle = le16_to_cpu(element->VolDevHandle);
4895 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4896 		}
4897 	}
4898 
4899 	if (a)
4900 		_scsih_tm_tr_volume_send(ioc, a);
4901 	if (b)
4902 		_scsih_tm_tr_volume_send(ioc, b);
4903 
4904 	/* PD target resets */
4905 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4906 	for (i = 0; i < event_data->NumElements; i++, element++) {
4907 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4908 			continue;
4909 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4910 		volume_handle = le16_to_cpu(element->VolDevHandle);
4911 		clear_bit(handle, ioc->pd_handles);
4912 		if (!volume_handle)
4913 			_scsih_tm_tr_send(ioc, handle);
4914 		else if (volume_handle == a || volume_handle == b) {
4915 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4916 			BUG_ON(!delayed_tr);
4917 			INIT_LIST_HEAD(&delayed_tr->list);
4918 			delayed_tr->handle = handle;
4919 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4920 			dewtprintk(ioc,
4921 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4922 					    handle));
4923 		} else
4924 			_scsih_tm_tr_send(ioc, handle);
4925 	}
4926 }
4927 
4928 
4929 /**
4930  * _scsih_check_volume_delete_events - set delete flag for volumes
4931  * @ioc: per adapter object
4932  * @event_data: the event data payload
4933  * Context: interrupt time.
4934  *
4935  * This will handle the case when the cable connected to entire volume is
4936  * pulled. We will take care of setting the deleted flag so normal IO will
4937  * not be sent.
4938  */
4939 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4940 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4941 	Mpi2EventDataIrVolume_t *event_data)
4942 {
4943 	u32 state;
4944 
4945 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4946 		return;
4947 	state = le32_to_cpu(event_data->NewValue);
4948 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4949 	    MPI2_RAID_VOL_STATE_FAILED)
4950 		_scsih_set_volume_delete_flag(ioc,
4951 		    le16_to_cpu(event_data->VolDevHandle));
4952 }
4953 
4954 /**
4955  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4956  * @ioc: per adapter object
4957  * @event_data: the temp threshold event data
4958  * Context: interrupt time.
4959  */
4960 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4961 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4962 	Mpi2EventDataTemperature_t *event_data)
4963 {
4964 	u32 doorbell;
4965 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4966 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4967 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4968 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4969 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4970 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4971 			event_data->SensorNum);
4972 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4973 			event_data->CurrentTemperature);
4974 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4975 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4976 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4977 			    MPI2_IOC_STATE_FAULT) {
4978 				mpt3sas_print_fault_code(ioc,
4979 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4980 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4981 			    MPI2_IOC_STATE_COREDUMP) {
4982 				mpt3sas_print_coredump_info(ioc,
4983 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4984 			}
4985 		}
4986 	}
4987 }
4988 
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4989 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4990 {
4991 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4992 
4993 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4994 		return 0;
4995 
4996 	if (pending)
4997 		return test_and_set_bit(0, &priv->ata_command_pending);
4998 
4999 	clear_bit(0, &priv->ata_command_pending);
5000 	return 0;
5001 }
5002 
5003 /**
5004  * _scsih_flush_running_cmds - completing outstanding commands.
5005  * @ioc: per adapter object
5006  *
5007  * The flushing out of all pending scmd commands following host reset,
5008  * where all IO is dropped to the floor.
5009  */
5010 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)5011 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5012 {
5013 	struct scsi_cmnd *scmd;
5014 	struct scsiio_tracker *st;
5015 	u16 smid;
5016 	int count = 0;
5017 
5018 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5019 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5020 		if (!scmd)
5021 			continue;
5022 		count++;
5023 		_scsih_set_satl_pending(scmd, false);
5024 		st = scsi_cmd_priv(scmd);
5025 		mpt3sas_base_clear_st(ioc, st);
5026 		scsi_dma_unmap(scmd);
5027 		if (ioc->pci_error_recovery || ioc->remove_host)
5028 			scmd->result = DID_NO_CONNECT << 16;
5029 		else
5030 			scmd->result = DID_RESET << 16;
5031 		scsi_done(scmd);
5032 	}
5033 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5034 }
5035 
5036 /**
5037  * _scsih_setup_eedp - setup MPI request for EEDP transfer
5038  * @ioc: per adapter object
5039  * @scmd: pointer to scsi command object
5040  * @mpi_request: pointer to the SCSI_IO request message frame
5041  *
5042  * Supporting protection 1 and 3.
5043  */
5044 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)5045 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5046 	Mpi25SCSIIORequest_t *mpi_request)
5047 {
5048 	u16 eedp_flags;
5049 	Mpi25SCSIIORequest_t *mpi_request_3v =
5050 	   (Mpi25SCSIIORequest_t *)mpi_request;
5051 
5052 	switch (scsi_get_prot_op(scmd)) {
5053 	case SCSI_PROT_READ_STRIP:
5054 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5055 		break;
5056 	case SCSI_PROT_WRITE_INSERT:
5057 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5058 		break;
5059 	default:
5060 		return;
5061 	}
5062 
5063 	if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5064 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5065 
5066 	if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5067 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5068 
5069 	if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5070 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5071 
5072 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5073 			cpu_to_be32(scsi_prot_ref_tag(scmd));
5074 	}
5075 
5076 	mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5077 
5078 	if (ioc->is_gen35_ioc)
5079 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5080 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5081 }
5082 
5083 /**
5084  * _scsih_eedp_error_handling - return sense code for EEDP errors
5085  * @scmd: pointer to scsi command object
5086  * @ioc_status: ioc status
5087  */
5088 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)5089 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5090 {
5091 	u8 ascq;
5092 
5093 	switch (ioc_status) {
5094 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5095 		ascq = 0x01;
5096 		break;
5097 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5098 		ascq = 0x02;
5099 		break;
5100 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5101 		ascq = 0x03;
5102 		break;
5103 	default:
5104 		ascq = 0x00;
5105 		break;
5106 	}
5107 	scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5108 	set_host_byte(scmd, DID_ABORT);
5109 }
5110 
5111 /**
5112  * scsih_qcmd - main scsi request entry point
5113  * @shost: SCSI host pointer
5114  * @scmd: pointer to scsi command object
5115  *
5116  * The callback index is set inside `ioc->scsi_io_cb_idx`.
5117  *
5118  * Return: 0 on success.  If there's a failure, return either:
5119  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5120  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5121  */
5122 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5123 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5124 {
5125 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5126 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5127 	struct MPT3SAS_TARGET *sas_target_priv_data;
5128 	struct _raid_device *raid_device;
5129 	struct request *rq = scsi_cmd_to_rq(scmd);
5130 	int class;
5131 	Mpi25SCSIIORequest_t *mpi_request;
5132 	struct _pcie_device *pcie_device = NULL;
5133 	u32 mpi_control;
5134 	u16 smid;
5135 	u16 handle;
5136 
5137 	if (ioc->logging_level & MPT_DEBUG_SCSI)
5138 		scsi_print_command(scmd);
5139 
5140 	sas_device_priv_data = scmd->device->hostdata;
5141 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5142 		scmd->result = DID_NO_CONNECT << 16;
5143 		scsi_done(scmd);
5144 		return 0;
5145 	}
5146 
5147 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5148 		scmd->result = DID_NO_CONNECT << 16;
5149 		scsi_done(scmd);
5150 		return 0;
5151 	}
5152 
5153 	sas_target_priv_data = sas_device_priv_data->sas_target;
5154 
5155 	/* invalid device handle */
5156 	handle = sas_target_priv_data->handle;
5157 
5158 	/*
5159 	 * Avoid error handling escallation when device is disconnected
5160 	 */
5161 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5162 		if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5163 		    scmd->cmnd[0] == TEST_UNIT_READY) {
5164 			scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5165 			scsi_done(scmd);
5166 			return 0;
5167 		}
5168 	}
5169 
5170 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5171 		scmd->result = DID_NO_CONNECT << 16;
5172 		scsi_done(scmd);
5173 		return 0;
5174 	}
5175 
5176 
5177 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5178 		/* host recovery or link resets sent via IOCTLs */
5179 		return SCSI_MLQUEUE_HOST_BUSY;
5180 	} else if (sas_target_priv_data->deleted) {
5181 		/* device has been deleted */
5182 		scmd->result = DID_NO_CONNECT << 16;
5183 		scsi_done(scmd);
5184 		return 0;
5185 	} else if (sas_target_priv_data->tm_busy ||
5186 		   sas_device_priv_data->block) {
5187 		/* device busy with task management */
5188 		return SCSI_MLQUEUE_DEVICE_BUSY;
5189 	}
5190 
5191 	/*
5192 	 * Bug work around for firmware SATL handling.  The loop
5193 	 * is based on atomic operations and ensures consistency
5194 	 * since we're lockless at this point
5195 	 */
5196 	do {
5197 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5198 			return SCSI_MLQUEUE_DEVICE_BUSY;
5199 	} while (_scsih_set_satl_pending(scmd, true));
5200 
5201 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5202 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
5203 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5204 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5205 	else
5206 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5207 
5208 	/* set tags */
5209 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5210 	/* NCQ Prio supported, make sure control indicated high priority */
5211 	if (sas_device_priv_data->ncq_prio_enable) {
5212 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5213 		if (class == IOPRIO_CLASS_RT)
5214 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5215 	}
5216 	/* Make sure Device is not raid volume.
5217 	 * We do not expose raid functionality to upper layer for warpdrive.
5218 	 */
5219 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5220 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5221 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5222 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5223 
5224 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5225 	if (!smid) {
5226 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5227 		_scsih_set_satl_pending(scmd, false);
5228 		goto out;
5229 	}
5230 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5231 	memset(mpi_request, 0, ioc->request_sz);
5232 	_scsih_setup_eedp(ioc, scmd, mpi_request);
5233 
5234 	if (scmd->cmd_len == 32)
5235 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5236 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5237 	if (sas_device_priv_data->sas_target->flags &
5238 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5239 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5240 	else
5241 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5242 	mpi_request->DevHandle = cpu_to_le16(handle);
5243 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5244 	mpi_request->Control = cpu_to_le32(mpi_control);
5245 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5246 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5247 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5248 	mpi_request->SenseBufferLowAddress =
5249 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5250 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5251 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5252 	    mpi_request->LUN);
5253 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5254 
5255 	if (mpi_request->DataLength) {
5256 		pcie_device = sas_target_priv_data->pcie_dev;
5257 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5258 			mpt3sas_base_free_smid(ioc, smid);
5259 			_scsih_set_satl_pending(scmd, false);
5260 			goto out;
5261 		}
5262 	} else
5263 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5264 
5265 	raid_device = sas_target_priv_data->raid_device;
5266 	if (raid_device && raid_device->direct_io_enabled)
5267 		mpt3sas_setup_direct_io(ioc, scmd,
5268 			raid_device, mpi_request);
5269 
5270 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5271 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5272 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5273 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5274 			ioc->put_smid_fast_path(ioc, smid, handle);
5275 		} else
5276 			ioc->put_smid_scsi_io(ioc, smid,
5277 			    le16_to_cpu(mpi_request->DevHandle));
5278 	} else
5279 		ioc->put_smid_default(ioc, smid);
5280 	return 0;
5281 
5282  out:
5283 	return SCSI_MLQUEUE_HOST_BUSY;
5284 }
5285 
5286 /**
5287  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5288  * @sense_buffer: sense data returned by target
5289  * @data: normalized skey/asc/ascq
5290  */
5291 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5292 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5293 {
5294 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5295 		/* descriptor format */
5296 		data->skey = sense_buffer[1] & 0x0F;
5297 		data->asc = sense_buffer[2];
5298 		data->ascq = sense_buffer[3];
5299 	} else {
5300 		/* fixed format */
5301 		data->skey = sense_buffer[2] & 0x0F;
5302 		data->asc = sense_buffer[12];
5303 		data->ascq = sense_buffer[13];
5304 	}
5305 }
5306 
5307 /**
5308  * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5309  * @ioc: per adapter object
5310  * @scmd: pointer to scsi command object
5311  * @mpi_reply: reply mf payload returned from firmware
5312  * @smid: ?
5313  *
5314  * scsi_status - SCSI Status code returned from target device
5315  * scsi_state - state info associated with SCSI_IO determined by ioc
5316  * ioc_status - ioc supplied status info
5317  */
5318 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5319 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5320 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5321 {
5322 	u32 response_info;
5323 	u8 *response_bytes;
5324 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5325 	    MPI2_IOCSTATUS_MASK;
5326 	u8 scsi_state = mpi_reply->SCSIState;
5327 	u8 scsi_status = mpi_reply->SCSIStatus;
5328 	char *desc_ioc_state = NULL;
5329 	char *desc_scsi_status = NULL;
5330 	char *desc_scsi_state = ioc->tmp_string;
5331 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5332 	struct _sas_device *sas_device = NULL;
5333 	struct _pcie_device *pcie_device = NULL;
5334 	struct scsi_target *starget = scmd->device->sdev_target;
5335 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5336 	char *device_str = NULL;
5337 
5338 	if (!priv_target)
5339 		return;
5340 	if (ioc->hide_ir_msg)
5341 		device_str = "WarpDrive";
5342 	else
5343 		device_str = "volume";
5344 
5345 	if (log_info == 0x31170000)
5346 		return;
5347 
5348 	switch (ioc_status) {
5349 	case MPI2_IOCSTATUS_SUCCESS:
5350 		desc_ioc_state = "success";
5351 		break;
5352 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5353 		desc_ioc_state = "invalid function";
5354 		break;
5355 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5356 		desc_ioc_state = "scsi recovered error";
5357 		break;
5358 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5359 		desc_ioc_state = "scsi invalid dev handle";
5360 		break;
5361 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5362 		desc_ioc_state = "scsi device not there";
5363 		break;
5364 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5365 		desc_ioc_state = "scsi data overrun";
5366 		break;
5367 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5368 		desc_ioc_state = "scsi data underrun";
5369 		break;
5370 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5371 		desc_ioc_state = "scsi io data error";
5372 		break;
5373 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5374 		desc_ioc_state = "scsi protocol error";
5375 		break;
5376 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5377 		desc_ioc_state = "scsi task terminated";
5378 		break;
5379 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5380 		desc_ioc_state = "scsi residual mismatch";
5381 		break;
5382 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5383 		desc_ioc_state = "scsi task mgmt failed";
5384 		break;
5385 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5386 		desc_ioc_state = "scsi ioc terminated";
5387 		break;
5388 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5389 		desc_ioc_state = "scsi ext terminated";
5390 		break;
5391 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5392 		desc_ioc_state = "eedp guard error";
5393 		break;
5394 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5395 		desc_ioc_state = "eedp ref tag error";
5396 		break;
5397 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5398 		desc_ioc_state = "eedp app tag error";
5399 		break;
5400 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5401 		desc_ioc_state = "insufficient power";
5402 		break;
5403 	default:
5404 		desc_ioc_state = "unknown";
5405 		break;
5406 	}
5407 
5408 	switch (scsi_status) {
5409 	case MPI2_SCSI_STATUS_GOOD:
5410 		desc_scsi_status = "good";
5411 		break;
5412 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5413 		desc_scsi_status = "check condition";
5414 		break;
5415 	case MPI2_SCSI_STATUS_CONDITION_MET:
5416 		desc_scsi_status = "condition met";
5417 		break;
5418 	case MPI2_SCSI_STATUS_BUSY:
5419 		desc_scsi_status = "busy";
5420 		break;
5421 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5422 		desc_scsi_status = "intermediate";
5423 		break;
5424 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5425 		desc_scsi_status = "intermediate condmet";
5426 		break;
5427 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5428 		desc_scsi_status = "reservation conflict";
5429 		break;
5430 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5431 		desc_scsi_status = "command terminated";
5432 		break;
5433 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5434 		desc_scsi_status = "task set full";
5435 		break;
5436 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5437 		desc_scsi_status = "aca active";
5438 		break;
5439 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5440 		desc_scsi_status = "task aborted";
5441 		break;
5442 	default:
5443 		desc_scsi_status = "unknown";
5444 		break;
5445 	}
5446 
5447 	desc_scsi_state[0] = '\0';
5448 	if (!scsi_state)
5449 		desc_scsi_state = " ";
5450 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5451 		strcat(desc_scsi_state, "response info ");
5452 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5453 		strcat(desc_scsi_state, "state terminated ");
5454 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5455 		strcat(desc_scsi_state, "no status ");
5456 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5457 		strcat(desc_scsi_state, "autosense failed ");
5458 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5459 		strcat(desc_scsi_state, "autosense valid ");
5460 
5461 	scsi_print_command(scmd);
5462 
5463 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5464 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5465 			 device_str, (u64)priv_target->sas_address);
5466 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5467 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5468 		if (pcie_device) {
5469 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5470 				 (u64)pcie_device->wwid, pcie_device->port_num);
5471 			if (pcie_device->enclosure_handle != 0)
5472 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5473 					 (u64)pcie_device->enclosure_logical_id,
5474 					 pcie_device->slot);
5475 			if (pcie_device->connector_name[0])
5476 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5477 					 pcie_device->enclosure_level,
5478 					 pcie_device->connector_name);
5479 			pcie_device_put(pcie_device);
5480 		}
5481 	} else {
5482 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5483 		if (sas_device) {
5484 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5485 				 (u64)sas_device->sas_address, sas_device->phy);
5486 
5487 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5488 			    NULL, NULL);
5489 
5490 			sas_device_put(sas_device);
5491 		}
5492 	}
5493 
5494 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5495 		 le16_to_cpu(mpi_reply->DevHandle),
5496 		 desc_ioc_state, ioc_status, smid);
5497 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5498 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5499 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5500 		 le16_to_cpu(mpi_reply->TaskTag),
5501 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5502 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5503 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5504 
5505 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5506 		struct sense_info data;
5507 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5508 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5509 			 data.skey, data.asc, data.ascq,
5510 			 le32_to_cpu(mpi_reply->SenseCount));
5511 	}
5512 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5513 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5514 		response_bytes = (u8 *)&response_info;
5515 		_scsih_response_code(ioc, response_bytes[0]);
5516 	}
5517 }
5518 
5519 /**
5520  * _scsih_turn_on_pfa_led - illuminate PFA LED
5521  * @ioc: per adapter object
5522  * @handle: device handle
5523  * Context: process
5524  */
5525 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5526 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5527 {
5528 	Mpi2SepReply_t mpi_reply;
5529 	Mpi2SepRequest_t mpi_request;
5530 	struct _sas_device *sas_device;
5531 
5532 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5533 	if (!sas_device)
5534 		return;
5535 
5536 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5537 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5538 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5539 	mpi_request.SlotStatus =
5540 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5541 	mpi_request.DevHandle = cpu_to_le16(handle);
5542 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5543 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5544 	    &mpi_request)) != 0) {
5545 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5546 			__FILE__, __LINE__, __func__);
5547 		goto out;
5548 	}
5549 	sas_device->pfa_led_on = 1;
5550 
5551 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5552 		dewtprintk(ioc,
5553 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5554 				    le16_to_cpu(mpi_reply.IOCStatus),
5555 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5556 		goto out;
5557 	}
5558 out:
5559 	sas_device_put(sas_device);
5560 }
5561 
5562 /**
5563  * _scsih_turn_off_pfa_led - turn off Fault LED
5564  * @ioc: per adapter object
5565  * @sas_device: sas device whose PFA LED has to turned off
5566  * Context: process
5567  */
5568 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5569 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5570 	struct _sas_device *sas_device)
5571 {
5572 	Mpi2SepReply_t mpi_reply;
5573 	Mpi2SepRequest_t mpi_request;
5574 
5575 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5576 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5577 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5578 	mpi_request.SlotStatus = 0;
5579 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5580 	mpi_request.DevHandle = 0;
5581 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5582 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5583 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5584 		&mpi_request)) != 0) {
5585 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5586 			__FILE__, __LINE__, __func__);
5587 		return;
5588 	}
5589 
5590 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5591 		dewtprintk(ioc,
5592 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5593 				    le16_to_cpu(mpi_reply.IOCStatus),
5594 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5595 		return;
5596 	}
5597 }
5598 
5599 /**
5600  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5601  * @ioc: per adapter object
5602  * @handle: device handle
5603  * Context: interrupt.
5604  */
5605 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5606 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5607 {
5608 	struct fw_event_work *fw_event;
5609 
5610 	fw_event = alloc_fw_event_work(0);
5611 	if (!fw_event)
5612 		return;
5613 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5614 	fw_event->device_handle = handle;
5615 	fw_event->ioc = ioc;
5616 	_scsih_fw_event_add(ioc, fw_event);
5617 	fw_event_work_put(fw_event);
5618 }
5619 
5620 /**
5621  * _scsih_smart_predicted_fault - process smart errors
5622  * @ioc: per adapter object
5623  * @handle: device handle
5624  * Context: interrupt.
5625  */
5626 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5627 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5628 {
5629 	struct scsi_target *starget;
5630 	struct MPT3SAS_TARGET *sas_target_priv_data;
5631 	Mpi2EventNotificationReply_t *event_reply;
5632 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5633 	struct _sas_device *sas_device;
5634 	ssize_t sz;
5635 	unsigned long flags;
5636 
5637 	/* only handle non-raid devices */
5638 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5639 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5640 	if (!sas_device)
5641 		goto out_unlock;
5642 
5643 	starget = sas_device->starget;
5644 	sas_target_priv_data = starget->hostdata;
5645 
5646 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5647 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5648 		goto out_unlock;
5649 
5650 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5651 
5652 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5653 
5654 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5655 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5656 
5657 	/* insert into event log */
5658 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5659 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5660 	event_reply = kzalloc(sz, GFP_ATOMIC);
5661 	if (!event_reply) {
5662 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5663 			__FILE__, __LINE__, __func__);
5664 		goto out;
5665 	}
5666 
5667 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5668 	event_reply->Event =
5669 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5670 	event_reply->MsgLength = sz/4;
5671 	event_reply->EventDataLength =
5672 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5673 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5674 	    event_reply->EventData;
5675 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5676 	event_data->ASC = 0x5D;
5677 	event_data->DevHandle = cpu_to_le16(handle);
5678 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5679 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5680 	kfree(event_reply);
5681 out:
5682 	if (sas_device)
5683 		sas_device_put(sas_device);
5684 	return;
5685 
5686 out_unlock:
5687 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5688 	goto out;
5689 }
5690 
5691 /**
5692  * _scsih_io_done - scsi request callback
5693  * @ioc: per adapter object
5694  * @smid: system request message index
5695  * @msix_index: MSIX table index supplied by the OS
5696  * @reply: reply message frame(lower 32bit addr)
5697  *
5698  * Callback handler when using _scsih_qcmd.
5699  *
5700  * Return: 1 meaning mf should be freed from _base_interrupt
5701  *         0 means the mf is freed from this function.
5702  */
5703 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5704 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5705 {
5706 	Mpi25SCSIIORequest_t *mpi_request;
5707 	Mpi2SCSIIOReply_t *mpi_reply;
5708 	struct scsi_cmnd *scmd;
5709 	struct scsiio_tracker *st;
5710 	u16 ioc_status;
5711 	u32 xfer_cnt;
5712 	u8 scsi_state;
5713 	u8 scsi_status;
5714 	u32 log_info;
5715 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5716 	u32 response_code = 0;
5717 
5718 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5719 
5720 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5721 	if (scmd == NULL)
5722 		return 1;
5723 
5724 	_scsih_set_satl_pending(scmd, false);
5725 
5726 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5727 
5728 	if (mpi_reply == NULL) {
5729 		scmd->result = DID_OK << 16;
5730 		goto out;
5731 	}
5732 
5733 	sas_device_priv_data = scmd->device->hostdata;
5734 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5735 	     sas_device_priv_data->sas_target->deleted) {
5736 		scmd->result = DID_NO_CONNECT << 16;
5737 		goto out;
5738 	}
5739 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5740 
5741 	/*
5742 	 * WARPDRIVE: If direct_io is set then it is directIO,
5743 	 * the failed direct I/O should be redirected to volume
5744 	 */
5745 	st = scsi_cmd_priv(scmd);
5746 	if (st->direct_io &&
5747 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5748 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5749 		st->direct_io = 0;
5750 		st->scmd = scmd;
5751 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5752 		mpi_request->DevHandle =
5753 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5754 		ioc->put_smid_scsi_io(ioc, smid,
5755 		    sas_device_priv_data->sas_target->handle);
5756 		return 0;
5757 	}
5758 	/* turning off TLR */
5759 	scsi_state = mpi_reply->SCSIState;
5760 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5761 		response_code =
5762 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5763 	if (!sas_device_priv_data->tlr_snoop_check) {
5764 		sas_device_priv_data->tlr_snoop_check++;
5765 		if ((!ioc->is_warpdrive &&
5766 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5767 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5768 		    && sas_is_tlr_enabled(scmd->device) &&
5769 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5770 			sas_disable_tlr(scmd->device);
5771 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5772 		}
5773 	}
5774 
5775 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5776 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5777 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5778 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5779 	else
5780 		log_info = 0;
5781 	ioc_status &= MPI2_IOCSTATUS_MASK;
5782 	scsi_status = mpi_reply->SCSIStatus;
5783 
5784 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5785 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5786 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5787 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5788 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5789 	}
5790 
5791 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5792 		struct sense_info data;
5793 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5794 		    smid);
5795 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5796 		    le32_to_cpu(mpi_reply->SenseCount));
5797 		memcpy(scmd->sense_buffer, sense_data, sz);
5798 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5799 		/* failure prediction threshold exceeded */
5800 		if (data.asc == 0x5D)
5801 			_scsih_smart_predicted_fault(ioc,
5802 			    le16_to_cpu(mpi_reply->DevHandle));
5803 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5804 
5805 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5806 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5807 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5808 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5809 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5810 	}
5811 	switch (ioc_status) {
5812 	case MPI2_IOCSTATUS_BUSY:
5813 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5814 		scmd->result = SAM_STAT_BUSY;
5815 		break;
5816 
5817 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5818 		scmd->result = DID_NO_CONNECT << 16;
5819 		break;
5820 
5821 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5822 		if (sas_device_priv_data->block) {
5823 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5824 			goto out;
5825 		}
5826 		if (log_info == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
5827 			/*
5828 			 * This is a ATA NCQ command aborted due to another NCQ
5829 			 * command failure. We must retry this command
5830 			 * immediately but without incrementing its retry
5831 			 * counter.
5832 			 */
5833 			WARN_ON_ONCE(xfer_cnt != 0);
5834 			scmd->result = DID_IMM_RETRY << 16;
5835 			break;
5836 		}
5837 		if (log_info == 0x31110630) {
5838 			if (scmd->retries > 2) {
5839 				scmd->result = DID_NO_CONNECT << 16;
5840 				scsi_device_set_state(scmd->device,
5841 				    SDEV_OFFLINE);
5842 			} else {
5843 				scmd->result = DID_SOFT_ERROR << 16;
5844 				scmd->device->expecting_cc_ua = 1;
5845 			}
5846 			break;
5847 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5848 			scmd->result = DID_RESET << 16;
5849 			break;
5850 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5851 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5852 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5853 			scmd->result = DID_RESET << 16;
5854 			break;
5855 		}
5856 		scmd->result = DID_SOFT_ERROR << 16;
5857 		break;
5858 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5859 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5860 		scmd->result = DID_RESET << 16;
5861 		break;
5862 
5863 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5864 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5865 			scmd->result = DID_SOFT_ERROR << 16;
5866 		else
5867 			scmd->result = (DID_OK << 16) | scsi_status;
5868 		break;
5869 
5870 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5871 		scmd->result = (DID_OK << 16) | scsi_status;
5872 
5873 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5874 			break;
5875 
5876 		if (xfer_cnt < scmd->underflow) {
5877 			if (scsi_status == SAM_STAT_BUSY)
5878 				scmd->result = SAM_STAT_BUSY;
5879 			else
5880 				scmd->result = DID_SOFT_ERROR << 16;
5881 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5882 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5883 			scmd->result = DID_SOFT_ERROR << 16;
5884 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5885 			scmd->result = DID_RESET << 16;
5886 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5887 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5888 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5889 			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5890 					 0x20, 0);
5891 		}
5892 		break;
5893 
5894 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5895 		scsi_set_resid(scmd, 0);
5896 		fallthrough;
5897 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5898 	case MPI2_IOCSTATUS_SUCCESS:
5899 		scmd->result = (DID_OK << 16) | scsi_status;
5900 		if (response_code ==
5901 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5902 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5903 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5904 			scmd->result = DID_SOFT_ERROR << 16;
5905 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5906 			scmd->result = DID_RESET << 16;
5907 		break;
5908 
5909 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5910 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5911 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5912 		_scsih_eedp_error_handling(scmd, ioc_status);
5913 		break;
5914 
5915 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5916 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5917 	case MPI2_IOCSTATUS_INVALID_SGL:
5918 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5919 	case MPI2_IOCSTATUS_INVALID_FIELD:
5920 	case MPI2_IOCSTATUS_INVALID_STATE:
5921 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5922 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5923 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5924 	default:
5925 		scmd->result = DID_SOFT_ERROR << 16;
5926 		break;
5927 
5928 	}
5929 
5930 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5931 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5932 
5933  out:
5934 
5935 	scsi_dma_unmap(scmd);
5936 	mpt3sas_base_free_smid(ioc, smid);
5937 	scsi_done(scmd);
5938 	return 0;
5939 }
5940 
5941 /**
5942  * _scsih_update_vphys_after_reset - update the Port's
5943  *			vphys_list after reset
5944  * @ioc: per adapter object
5945  *
5946  * Returns nothing.
5947  */
5948 static void
_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER * ioc)5949 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5950 {
5951 	u16 sz, ioc_status;
5952 	int i;
5953 	Mpi2ConfigReply_t mpi_reply;
5954 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5955 	u16 attached_handle;
5956 	u64 attached_sas_addr;
5957 	u8 found = 0, port_id;
5958 	Mpi2SasPhyPage0_t phy_pg0;
5959 	struct hba_port *port, *port_next, *mport;
5960 	struct virtual_phy *vphy, *vphy_next;
5961 	struct _sas_device *sas_device;
5962 
5963 	/*
5964 	 * Mark all the vphys objects as dirty.
5965 	 */
5966 	list_for_each_entry_safe(port, port_next,
5967 	    &ioc->port_table_list, list) {
5968 		if (!port->vphys_mask)
5969 			continue;
5970 		list_for_each_entry_safe(vphy, vphy_next,
5971 		    &port->vphys_list, list) {
5972 			vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5973 		}
5974 	}
5975 
5976 	/*
5977 	 * Read SASIOUnitPage0 to get each HBA Phy's data.
5978 	 */
5979 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
5980 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5981 	if (!sas_iounit_pg0) {
5982 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5983 		    __FILE__, __LINE__, __func__);
5984 		return;
5985 	}
5986 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5987 	    sas_iounit_pg0, sz)) != 0)
5988 		goto out;
5989 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5990 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5991 		goto out;
5992 	/*
5993 	 * Loop over each HBA Phy.
5994 	 */
5995 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5996 		/*
5997 		 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5998 		 */
5999 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6000 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6001 			continue;
6002 		/*
6003 		 * Check whether Phy is connected to SEP device or not,
6004 		 * if it is SEP device then read the Phy's SASPHYPage0 data to
6005 		 * determine whether Phy is a virtual Phy or not. if it is
6006 		 * virtual phy then it is conformed that the attached remote
6007 		 * device is a HBA's vSES device.
6008 		 */
6009 		if (!(le32_to_cpu(
6010 		    sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6011 		    MPI2_SAS_DEVICE_INFO_SEP))
6012 			continue;
6013 
6014 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6015 		    i))) {
6016 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6017 			    __FILE__, __LINE__, __func__);
6018 			continue;
6019 		}
6020 
6021 		if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6022 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6023 			continue;
6024 		/*
6025 		 * Get the vSES device's SAS Address.
6026 		 */
6027 		attached_handle = le16_to_cpu(
6028 		    sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6029 		if (_scsih_get_sas_address(ioc, attached_handle,
6030 		    &attached_sas_addr) != 0) {
6031 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6032 			    __FILE__, __LINE__, __func__);
6033 			continue;
6034 		}
6035 
6036 		found = 0;
6037 		port = port_next = NULL;
6038 		/*
6039 		 * Loop over each virtual_phy object from
6040 		 * each port's vphys_list.
6041 		 */
6042 		list_for_each_entry_safe(port,
6043 		    port_next, &ioc->port_table_list, list) {
6044 			if (!port->vphys_mask)
6045 				continue;
6046 			list_for_each_entry_safe(vphy, vphy_next,
6047 			    &port->vphys_list, list) {
6048 				/*
6049 				 * Continue with next virtual_phy object
6050 				 * if the object is not marked as dirty.
6051 				 */
6052 				if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6053 					continue;
6054 
6055 				/*
6056 				 * Continue with next virtual_phy object
6057 				 * if the object's SAS Address is not equals
6058 				 * to current Phy's vSES device SAS Address.
6059 				 */
6060 				if (vphy->sas_address != attached_sas_addr)
6061 					continue;
6062 				/*
6063 				 * Enable current Phy number bit in object's
6064 				 * phy_mask field.
6065 				 */
6066 				if (!(vphy->phy_mask & (1 << i)))
6067 					vphy->phy_mask = (1 << i);
6068 				/*
6069 				 * Get hba_port object from hba_port table
6070 				 * corresponding to current phy's Port ID.
6071 				 * if there is no hba_port object corresponding
6072 				 * to Phy's Port ID then create a new hba_port
6073 				 * object & add to hba_port table.
6074 				 */
6075 				port_id = sas_iounit_pg0->PhyData[i].Port;
6076 				mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6077 				if (!mport) {
6078 					mport = kzalloc(
6079 					    sizeof(struct hba_port), GFP_KERNEL);
6080 					if (!mport)
6081 						break;
6082 					mport->port_id = port_id;
6083 					ioc_info(ioc,
6084 					    "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6085 					    __func__, mport, mport->port_id);
6086 					list_add_tail(&mport->list,
6087 						&ioc->port_table_list);
6088 				}
6089 				/*
6090 				 * If mport & port pointers are not pointing to
6091 				 * same hba_port object then it means that vSES
6092 				 * device's Port ID got changed after reset and
6093 				 * hence move current virtual_phy object from
6094 				 * port's vphys_list to mport's vphys_list.
6095 				 */
6096 				if (port != mport) {
6097 					if (!mport->vphys_mask)
6098 						INIT_LIST_HEAD(
6099 						    &mport->vphys_list);
6100 					mport->vphys_mask |= (1 << i);
6101 					port->vphys_mask &= ~(1 << i);
6102 					list_move(&vphy->list,
6103 					    &mport->vphys_list);
6104 					sas_device = mpt3sas_get_sdev_by_addr(
6105 					    ioc, attached_sas_addr, port);
6106 					if (sas_device)
6107 						sas_device->port = mport;
6108 				}
6109 				/*
6110 				 * Earlier while updating the hba_port table,
6111 				 * it is determined that there is no other
6112 				 * direct attached device with mport's Port ID,
6113 				 * Hence mport was marked as dirty. Only vSES
6114 				 * device has this Port ID, so unmark the mport
6115 				 * as dirt.
6116 				 */
6117 				if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6118 					mport->sas_address = 0;
6119 					mport->phy_mask = 0;
6120 					mport->flags &=
6121 					    ~HBA_PORT_FLAG_DIRTY_PORT;
6122 				}
6123 				/*
6124 				 * Unmark current virtual_phy object as dirty.
6125 				 */
6126 				vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6127 				found = 1;
6128 				break;
6129 			}
6130 			if (found)
6131 				break;
6132 		}
6133 	}
6134 out:
6135 	kfree(sas_iounit_pg0);
6136 }
6137 
6138 /**
6139  * _scsih_get_port_table_after_reset - Construct temporary port table
6140  * @ioc: per adapter object
6141  * @port_table: address where port table needs to be constructed
6142  *
6143  * return number of HBA port entries available after reset.
6144  */
6145 static int
_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table)6146 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6147 	struct hba_port *port_table)
6148 {
6149 	u16 sz, ioc_status;
6150 	int i, j;
6151 	Mpi2ConfigReply_t mpi_reply;
6152 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6153 	u16 attached_handle;
6154 	u64 attached_sas_addr;
6155 	u8 found = 0, port_count = 0, port_id;
6156 
6157 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6158 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6159 	if (!sas_iounit_pg0) {
6160 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6161 		    __FILE__, __LINE__, __func__);
6162 		return port_count;
6163 	}
6164 
6165 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6166 	    sas_iounit_pg0, sz)) != 0)
6167 		goto out;
6168 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6169 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6170 		goto out;
6171 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6172 		found = 0;
6173 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6174 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6175 			continue;
6176 		attached_handle =
6177 		    le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6178 		if (_scsih_get_sas_address(
6179 		    ioc, attached_handle, &attached_sas_addr) != 0) {
6180 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6181 			    __FILE__, __LINE__, __func__);
6182 			continue;
6183 		}
6184 
6185 		for (j = 0; j < port_count; j++) {
6186 			port_id = sas_iounit_pg0->PhyData[i].Port;
6187 			if (port_table[j].port_id == port_id &&
6188 			    port_table[j].sas_address == attached_sas_addr) {
6189 				port_table[j].phy_mask |= (1 << i);
6190 				found = 1;
6191 				break;
6192 			}
6193 		}
6194 
6195 		if (found)
6196 			continue;
6197 
6198 		port_id = sas_iounit_pg0->PhyData[i].Port;
6199 		port_table[port_count].port_id = port_id;
6200 		port_table[port_count].phy_mask = (1 << i);
6201 		port_table[port_count].sas_address = attached_sas_addr;
6202 		port_count++;
6203 	}
6204 out:
6205 	kfree(sas_iounit_pg0);
6206 	return port_count;
6207 }
6208 
6209 enum hba_port_matched_codes {
6210 	NOT_MATCHED = 0,
6211 	MATCHED_WITH_ADDR_AND_PHYMASK,
6212 	MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6213 	MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6214 	MATCHED_WITH_ADDR,
6215 };
6216 
6217 /**
6218  * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6219  *					from HBA port table
6220  * @ioc: per adapter object
6221  * @port_entry: hba port entry from temporary port table which needs to be
6222  *		searched for matched entry in the HBA port table
6223  * @matched_port_entry: save matched hba port entry here
6224  * @count: count of matched entries
6225  *
6226  * return type of matched entry found.
6227  */
6228 static enum hba_port_matched_codes
_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_entry,struct hba_port ** matched_port_entry,int * count)6229 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6230 	struct hba_port *port_entry,
6231 	struct hba_port **matched_port_entry, int *count)
6232 {
6233 	struct hba_port *port_table_entry, *matched_port = NULL;
6234 	enum hba_port_matched_codes matched_code = NOT_MATCHED;
6235 	int lcount = 0;
6236 	*matched_port_entry = NULL;
6237 
6238 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6239 		if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6240 			continue;
6241 
6242 		if ((port_table_entry->sas_address == port_entry->sas_address)
6243 		    && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6244 			matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6245 			matched_port = port_table_entry;
6246 			break;
6247 		}
6248 
6249 		if ((port_table_entry->sas_address == port_entry->sas_address)
6250 		    && (port_table_entry->phy_mask & port_entry->phy_mask)
6251 		    && (port_table_entry->port_id == port_entry->port_id)) {
6252 			matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6253 			matched_port = port_table_entry;
6254 			continue;
6255 		}
6256 
6257 		if ((port_table_entry->sas_address == port_entry->sas_address)
6258 		    && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6259 			if (matched_code ==
6260 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6261 				continue;
6262 			matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6263 			matched_port = port_table_entry;
6264 			continue;
6265 		}
6266 
6267 		if (port_table_entry->sas_address == port_entry->sas_address) {
6268 			if (matched_code ==
6269 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6270 				continue;
6271 			if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6272 				continue;
6273 			matched_code = MATCHED_WITH_ADDR;
6274 			matched_port = port_table_entry;
6275 			lcount++;
6276 		}
6277 	}
6278 
6279 	*matched_port_entry = matched_port;
6280 	if (matched_code ==  MATCHED_WITH_ADDR)
6281 		*count = lcount;
6282 	return matched_code;
6283 }
6284 
6285 /**
6286  * _scsih_del_phy_part_of_anther_port - remove phy if it
6287  *				is a part of anther port
6288  *@ioc: per adapter object
6289  *@port_table: port table after reset
6290  *@index: hba port entry index
6291  *@port_count: number of ports available after host reset
6292  *@offset: HBA phy bit offset
6293  *
6294  */
6295 static void
_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table,int index,u8 port_count,int offset)6296 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6297 	struct hba_port *port_table,
6298 	int index, u8 port_count, int offset)
6299 {
6300 	struct _sas_node *sas_node = &ioc->sas_hba;
6301 	u32 i, found = 0;
6302 
6303 	for (i = 0; i < port_count; i++) {
6304 		if (i == index)
6305 			continue;
6306 
6307 		if (port_table[i].phy_mask & (1 << offset)) {
6308 			mpt3sas_transport_del_phy_from_an_existing_port(
6309 			    ioc, sas_node, &sas_node->phy[offset]);
6310 			found = 1;
6311 			break;
6312 		}
6313 	}
6314 	if (!found)
6315 		port_table[index].phy_mask |= (1 << offset);
6316 }
6317 
6318 /**
6319  * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6320  *						right port
6321  *@ioc: per adapter object
6322  *@hba_port_entry: hba port table entry
6323  *@port_table: temporary port table
6324  *@index: hba port entry index
6325  *@port_count: number of ports available after host reset
6326  *
6327  */
6328 static void
_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * hba_port_entry,struct hba_port * port_table,int index,int port_count)6329 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6330 	struct hba_port *hba_port_entry, struct hba_port *port_table,
6331 	int index, int port_count)
6332 {
6333 	u32 phy_mask, offset = 0;
6334 	struct _sas_node *sas_node = &ioc->sas_hba;
6335 
6336 	phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6337 
6338 	for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6339 		if (phy_mask & (1 << offset)) {
6340 			if (!(port_table[index].phy_mask & (1 << offset))) {
6341 				_scsih_del_phy_part_of_anther_port(
6342 				    ioc, port_table, index, port_count,
6343 				    offset);
6344 				continue;
6345 			}
6346 			if (sas_node->phy[offset].phy_belongs_to_port)
6347 				mpt3sas_transport_del_phy_from_an_existing_port(
6348 				    ioc, sas_node, &sas_node->phy[offset]);
6349 			mpt3sas_transport_add_phy_to_an_existing_port(
6350 			    ioc, sas_node, &sas_node->phy[offset],
6351 			    hba_port_entry->sas_address,
6352 			    hba_port_entry);
6353 		}
6354 	}
6355 }
6356 
6357 /**
6358  * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6359  * @ioc: per adapter object
6360  *
6361  * Returns nothing.
6362  */
6363 static void
_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER * ioc)6364 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6365 {
6366 	struct hba_port *port, *port_next;
6367 	struct virtual_phy *vphy, *vphy_next;
6368 
6369 	list_for_each_entry_safe(port, port_next,
6370 	    &ioc->port_table_list, list) {
6371 		if (!port->vphys_mask)
6372 			continue;
6373 		list_for_each_entry_safe(vphy, vphy_next,
6374 		    &port->vphys_list, list) {
6375 			if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6376 				drsprintk(ioc, ioc_info(ioc,
6377 				    "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6378 				    vphy, port->port_id,
6379 				    vphy->phy_mask));
6380 				port->vphys_mask &= ~vphy->phy_mask;
6381 				list_del(&vphy->list);
6382 				kfree(vphy);
6383 			}
6384 		}
6385 		if (!port->vphys_mask && !port->sas_address)
6386 			port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6387 	}
6388 }
6389 
6390 /**
6391  * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6392  *					after host reset
6393  *@ioc: per adapter object
6394  *
6395  */
6396 static void
_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER * ioc)6397 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6398 {
6399 	struct hba_port *port, *port_next;
6400 
6401 	list_for_each_entry_safe(port, port_next,
6402 	    &ioc->port_table_list, list) {
6403 		if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6404 		    port->flags & HBA_PORT_FLAG_NEW_PORT)
6405 			continue;
6406 
6407 		drsprintk(ioc, ioc_info(ioc,
6408 		    "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6409 		    port, port->port_id, port->phy_mask));
6410 		list_del(&port->list);
6411 		kfree(port);
6412 	}
6413 }
6414 
6415 /**
6416  * _scsih_sas_port_refresh - Update HBA port table after host reset
6417  * @ioc: per adapter object
6418  */
6419 static void
_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER * ioc)6420 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6421 {
6422 	u32 port_count = 0;
6423 	struct hba_port *port_table;
6424 	struct hba_port *port_table_entry;
6425 	struct hba_port *port_entry = NULL;
6426 	int i, j, count = 0, lcount = 0;
6427 	int ret;
6428 	u64 sas_addr;
6429 	u8 num_phys;
6430 
6431 	drsprintk(ioc, ioc_info(ioc,
6432 	    "updating ports for sas_host(0x%016llx)\n",
6433 	    (unsigned long long)ioc->sas_hba.sas_address));
6434 
6435 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6436 	if (!num_phys) {
6437 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6438 		    __FILE__, __LINE__, __func__);
6439 		return;
6440 	}
6441 
6442 	if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6443 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6444 		   __FILE__, __LINE__, __func__);
6445 		return;
6446 	}
6447 	ioc->sas_hba.num_phys = num_phys;
6448 
6449 	port_table = kcalloc(ioc->sas_hba.num_phys,
6450 	    sizeof(struct hba_port), GFP_KERNEL);
6451 	if (!port_table)
6452 		return;
6453 
6454 	port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6455 	if (!port_count)
6456 		return;
6457 
6458 	drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6459 	for (j = 0; j < port_count; j++)
6460 		drsprintk(ioc, ioc_info(ioc,
6461 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6462 		    port_table[j].port_id,
6463 		    port_table[j].phy_mask, port_table[j].sas_address));
6464 
6465 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6466 		port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6467 
6468 	drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6469 	port_table_entry = NULL;
6470 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6471 		drsprintk(ioc, ioc_info(ioc,
6472 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6473 		    port_table_entry->port_id,
6474 		    port_table_entry->phy_mask,
6475 		    port_table_entry->sas_address));
6476 	}
6477 
6478 	for (j = 0; j < port_count; j++) {
6479 		ret = _scsih_look_and_get_matched_port_entry(ioc,
6480 		    &port_table[j], &port_entry, &count);
6481 		if (!port_entry) {
6482 			drsprintk(ioc, ioc_info(ioc,
6483 			    "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6484 			    port_table[j].sas_address,
6485 			    port_table[j].port_id));
6486 			continue;
6487 		}
6488 
6489 		switch (ret) {
6490 		case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6491 		case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6492 			_scsih_add_or_del_phys_from_existing_port(ioc,
6493 			    port_entry, port_table, j, port_count);
6494 			break;
6495 		case MATCHED_WITH_ADDR:
6496 			sas_addr = port_table[j].sas_address;
6497 			for (i = 0; i < port_count; i++) {
6498 				if (port_table[i].sas_address == sas_addr)
6499 					lcount++;
6500 			}
6501 
6502 			if (count > 1 || lcount > 1)
6503 				port_entry = NULL;
6504 			else
6505 				_scsih_add_or_del_phys_from_existing_port(ioc,
6506 				    port_entry, port_table, j, port_count);
6507 		}
6508 
6509 		if (!port_entry)
6510 			continue;
6511 
6512 		if (port_entry->port_id != port_table[j].port_id)
6513 			port_entry->port_id = port_table[j].port_id;
6514 		port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6515 		port_entry->phy_mask = port_table[j].phy_mask;
6516 	}
6517 
6518 	port_table_entry = NULL;
6519 }
6520 
6521 /**
6522  * _scsih_alloc_vphy - allocate virtual_phy object
6523  * @ioc: per adapter object
6524  * @port_id: Port ID number
6525  * @phy_num: HBA Phy number
6526  *
6527  * Returns allocated virtual_phy object.
6528  */
6529 static struct virtual_phy *
_scsih_alloc_vphy(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 phy_num)6530 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6531 {
6532 	struct virtual_phy *vphy;
6533 	struct hba_port *port;
6534 
6535 	port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6536 	if (!port)
6537 		return NULL;
6538 
6539 	vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6540 	if (!vphy) {
6541 		vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6542 		if (!vphy)
6543 			return NULL;
6544 
6545 		if (!port->vphys_mask)
6546 			INIT_LIST_HEAD(&port->vphys_list);
6547 
6548 		/*
6549 		 * Enable bit corresponding to HBA phy number on its
6550 		 * parent hba_port object's vphys_mask field.
6551 		 */
6552 		port->vphys_mask |= (1 << phy_num);
6553 		vphy->phy_mask |= (1 << phy_num);
6554 
6555 		list_add_tail(&vphy->list, &port->vphys_list);
6556 
6557 		ioc_info(ioc,
6558 		    "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6559 		    vphy, port->port_id, phy_num);
6560 	}
6561 	return vphy;
6562 }
6563 
6564 /**
6565  * _scsih_sas_host_refresh - refreshing sas host object contents
6566  * @ioc: per adapter object
6567  * Context: user
6568  *
6569  * During port enable, fw will send topology events for every device. Its
6570  * possible that the handles may change from the previous setting, so this
6571  * code keeping handles updating if changed.
6572  */
6573 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)6574 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6575 {
6576 	u16 sz;
6577 	u16 ioc_status;
6578 	int i;
6579 	Mpi2ConfigReply_t mpi_reply;
6580 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6581 	u16 attached_handle;
6582 	u8 link_rate, port_id;
6583 	struct hba_port *port;
6584 	Mpi2SasPhyPage0_t phy_pg0;
6585 
6586 	dtmprintk(ioc,
6587 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6588 			   (u64)ioc->sas_hba.sas_address));
6589 
6590 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6591 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6592 	if (!sas_iounit_pg0) {
6593 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6594 			__FILE__, __LINE__, __func__);
6595 		return;
6596 	}
6597 
6598 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6599 	    sas_iounit_pg0, sz)) != 0)
6600 		goto out;
6601 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6602 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6603 		goto out;
6604 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6605 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6606 		if (i == 0)
6607 			ioc->sas_hba.handle = le16_to_cpu(
6608 			    sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6609 		port_id = sas_iounit_pg0->PhyData[i].Port;
6610 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6611 			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6612 			if (!port)
6613 				goto out;
6614 
6615 			port->port_id = port_id;
6616 			ioc_info(ioc,
6617 			    "hba_port entry: %p, port: %d is added to hba_port list\n",
6618 			    port, port->port_id);
6619 			if (ioc->shost_recovery)
6620 				port->flags = HBA_PORT_FLAG_NEW_PORT;
6621 			list_add_tail(&port->list, &ioc->port_table_list);
6622 		}
6623 		/*
6624 		 * Check whether current Phy belongs to HBA vSES device or not.
6625 		 */
6626 		if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6627 		    MPI2_SAS_DEVICE_INFO_SEP &&
6628 		    (link_rate >=  MPI2_SAS_NEG_LINK_RATE_1_5)) {
6629 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6630 			    &phy_pg0, i))) {
6631 				ioc_err(ioc,
6632 				    "failure at %s:%d/%s()!\n",
6633 				     __FILE__, __LINE__, __func__);
6634 				goto out;
6635 			}
6636 			if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6637 			    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6638 				continue;
6639 			/*
6640 			 * Allocate a virtual_phy object for vSES device, if
6641 			 * this vSES device is hot added.
6642 			 */
6643 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6644 				goto out;
6645 			ioc->sas_hba.phy[i].hba_vphy = 1;
6646 		}
6647 
6648 		/*
6649 		 * Add new HBA phys to STL if these new phys got added as part
6650 		 * of HBA Firmware upgrade/downgrade operation.
6651 		 */
6652 		if (!ioc->sas_hba.phy[i].phy) {
6653 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6654 							&phy_pg0, i))) {
6655 				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6656 					__FILE__, __LINE__, __func__);
6657 				continue;
6658 			}
6659 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6660 				MPI2_IOCSTATUS_MASK;
6661 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6662 				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6663 					__FILE__, __LINE__, __func__);
6664 				continue;
6665 			}
6666 			ioc->sas_hba.phy[i].phy_id = i;
6667 			mpt3sas_transport_add_host_phy(ioc,
6668 				&ioc->sas_hba.phy[i], phy_pg0,
6669 				ioc->sas_hba.parent_dev);
6670 			continue;
6671 		}
6672 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6673 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6674 		    AttachedDevHandle);
6675 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6676 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6677 		ioc->sas_hba.phy[i].port =
6678 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6679 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6680 		    attached_handle, i, link_rate,
6681 		    ioc->sas_hba.phy[i].port);
6682 	}
6683 	/*
6684 	 * Clear the phy details if this phy got disabled as part of
6685 	 * HBA Firmware upgrade/downgrade operation.
6686 	 */
6687 	for (i = ioc->sas_hba.num_phys;
6688 	     i < ioc->sas_hba.nr_phys_allocated; i++) {
6689 		if (ioc->sas_hba.phy[i].phy &&
6690 		    ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6691 		    SAS_LINK_RATE_1_5_GBPS)
6692 			mpt3sas_transport_update_links(ioc,
6693 				ioc->sas_hba.sas_address, 0, i,
6694 				MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6695 	}
6696  out:
6697 	kfree(sas_iounit_pg0);
6698 }
6699 
6700 /**
6701  * _scsih_sas_host_add - create sas host object
6702  * @ioc: per adapter object
6703  *
6704  * Creating host side data object, stored in ioc->sas_hba
6705  */
6706 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)6707 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6708 {
6709 	int i;
6710 	Mpi2ConfigReply_t mpi_reply;
6711 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6712 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6713 	Mpi2SasPhyPage0_t phy_pg0;
6714 	Mpi2SasDevicePage0_t sas_device_pg0;
6715 	Mpi2SasEnclosurePage0_t enclosure_pg0;
6716 	u16 ioc_status;
6717 	u16 sz;
6718 	u8 device_missing_delay;
6719 	u8 num_phys, port_id;
6720 	struct hba_port *port;
6721 
6722 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6723 	if (!num_phys) {
6724 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6725 			__FILE__, __LINE__, __func__);
6726 		return;
6727 	}
6728 
6729 	ioc->sas_hba.nr_phys_allocated = max_t(u8,
6730 	    MPT_MAX_HBA_NUM_PHYS, num_phys);
6731 	ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6732 	    sizeof(struct _sas_phy), GFP_KERNEL);
6733 	if (!ioc->sas_hba.phy) {
6734 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6735 			__FILE__, __LINE__, __func__);
6736 		goto out;
6737 	}
6738 	ioc->sas_hba.num_phys = num_phys;
6739 
6740 	/* sas_iounit page 0 */
6741 	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6742 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6743 	if (!sas_iounit_pg0) {
6744 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6745 			__FILE__, __LINE__, __func__);
6746 		return;
6747 	}
6748 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6749 	    sas_iounit_pg0, sz))) {
6750 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6751 			__FILE__, __LINE__, __func__);
6752 		goto out;
6753 	}
6754 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6755 	    MPI2_IOCSTATUS_MASK;
6756 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6757 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6758 			__FILE__, __LINE__, __func__);
6759 		goto out;
6760 	}
6761 
6762 	/* sas_iounit page 1 */
6763 	sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
6764 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6765 	if (!sas_iounit_pg1) {
6766 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6767 			__FILE__, __LINE__, __func__);
6768 		goto out;
6769 	}
6770 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6771 	    sas_iounit_pg1, sz))) {
6772 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6773 			__FILE__, __LINE__, __func__);
6774 		goto out;
6775 	}
6776 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6777 	    MPI2_IOCSTATUS_MASK;
6778 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6779 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6780 			__FILE__, __LINE__, __func__);
6781 		goto out;
6782 	}
6783 
6784 	ioc->io_missing_delay =
6785 	    sas_iounit_pg1->IODeviceMissingDelay;
6786 	device_missing_delay =
6787 	    sas_iounit_pg1->ReportDeviceMissingDelay;
6788 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6789 		ioc->device_missing_delay = (device_missing_delay &
6790 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6791 	else
6792 		ioc->device_missing_delay = device_missing_delay &
6793 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6794 
6795 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6796 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6797 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6798 		    i))) {
6799 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6800 				__FILE__, __LINE__, __func__);
6801 			goto out;
6802 		}
6803 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6804 		    MPI2_IOCSTATUS_MASK;
6805 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6806 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6807 				__FILE__, __LINE__, __func__);
6808 			goto out;
6809 		}
6810 
6811 		if (i == 0)
6812 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6813 			    PhyData[0].ControllerDevHandle);
6814 
6815 		port_id = sas_iounit_pg0->PhyData[i].Port;
6816 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6817 			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6818 			if (!port)
6819 				goto out;
6820 
6821 			port->port_id = port_id;
6822 			ioc_info(ioc,
6823 			   "hba_port entry: %p, port: %d is added to hba_port list\n",
6824 			   port, port->port_id);
6825 			list_add_tail(&port->list,
6826 			    &ioc->port_table_list);
6827 		}
6828 
6829 		/*
6830 		 * Check whether current Phy belongs to HBA vSES device or not.
6831 		 */
6832 		if ((le32_to_cpu(phy_pg0.PhyInfo) &
6833 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6834 		    (phy_pg0.NegotiatedLinkRate >> 4) >=
6835 		    MPI2_SAS_NEG_LINK_RATE_1_5) {
6836 			/*
6837 			 * Allocate a virtual_phy object for vSES device.
6838 			 */
6839 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6840 				goto out;
6841 			ioc->sas_hba.phy[i].hba_vphy = 1;
6842 		}
6843 
6844 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6845 		ioc->sas_hba.phy[i].phy_id = i;
6846 		ioc->sas_hba.phy[i].port =
6847 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6848 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6849 		    phy_pg0, ioc->sas_hba.parent_dev);
6850 	}
6851 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6852 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6853 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6854 			__FILE__, __LINE__, __func__);
6855 		goto out;
6856 	}
6857 	ioc->sas_hba.enclosure_handle =
6858 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6859 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6860 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6861 		 ioc->sas_hba.handle,
6862 		 (u64)ioc->sas_hba.sas_address,
6863 		 ioc->sas_hba.num_phys);
6864 
6865 	if (ioc->sas_hba.enclosure_handle) {
6866 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6867 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6868 		   ioc->sas_hba.enclosure_handle)))
6869 			ioc->sas_hba.enclosure_logical_id =
6870 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6871 	}
6872 
6873  out:
6874 	kfree(sas_iounit_pg1);
6875 	kfree(sas_iounit_pg0);
6876 }
6877 
6878 /**
6879  * _scsih_expander_add -  creating expander object
6880  * @ioc: per adapter object
6881  * @handle: expander handle
6882  *
6883  * Creating expander object, stored in ioc->sas_expander_list.
6884  *
6885  * Return: 0 for success, else error.
6886  */
6887 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)6888 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6889 {
6890 	struct _sas_node *sas_expander;
6891 	struct _enclosure_node *enclosure_dev;
6892 	Mpi2ConfigReply_t mpi_reply;
6893 	Mpi2ExpanderPage0_t expander_pg0;
6894 	Mpi2ExpanderPage1_t expander_pg1;
6895 	u32 ioc_status;
6896 	u16 parent_handle;
6897 	u64 sas_address, sas_address_parent = 0;
6898 	int i;
6899 	unsigned long flags;
6900 	struct _sas_port *mpt3sas_port = NULL;
6901 	u8 port_id;
6902 
6903 	int rc = 0;
6904 
6905 	if (!handle)
6906 		return -1;
6907 
6908 	if (ioc->shost_recovery || ioc->pci_error_recovery)
6909 		return -1;
6910 
6911 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6912 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6913 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6914 			__FILE__, __LINE__, __func__);
6915 		return -1;
6916 	}
6917 
6918 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6919 	    MPI2_IOCSTATUS_MASK;
6920 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6921 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6922 			__FILE__, __LINE__, __func__);
6923 		return -1;
6924 	}
6925 
6926 	/* handle out of order topology events */
6927 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6928 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6929 	    != 0) {
6930 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6931 			__FILE__, __LINE__, __func__);
6932 		return -1;
6933 	}
6934 
6935 	port_id = expander_pg0.PhysicalPort;
6936 	if (sas_address_parent != ioc->sas_hba.sas_address) {
6937 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
6938 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6939 		    sas_address_parent,
6940 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
6941 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6942 		if (!sas_expander) {
6943 			rc = _scsih_expander_add(ioc, parent_handle);
6944 			if (rc != 0)
6945 				return rc;
6946 		}
6947 	}
6948 
6949 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6950 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
6951 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6952 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6953 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6954 
6955 	if (sas_expander)
6956 		return 0;
6957 
6958 	sas_expander = kzalloc(sizeof(struct _sas_node),
6959 	    GFP_KERNEL);
6960 	if (!sas_expander) {
6961 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6962 			__FILE__, __LINE__, __func__);
6963 		return -1;
6964 	}
6965 
6966 	sas_expander->handle = handle;
6967 	sas_expander->num_phys = expander_pg0.NumPhys;
6968 	sas_expander->sas_address_parent = sas_address_parent;
6969 	sas_expander->sas_address = sas_address;
6970 	sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6971 	if (!sas_expander->port) {
6972 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6973 		    __FILE__, __LINE__, __func__);
6974 		rc = -1;
6975 		goto out_fail;
6976 	}
6977 
6978 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6979 		 handle, parent_handle,
6980 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
6981 
6982 	if (!sas_expander->num_phys) {
6983 		rc = -1;
6984 		goto out_fail;
6985 	}
6986 	sas_expander->phy = kcalloc(sas_expander->num_phys,
6987 	    sizeof(struct _sas_phy), GFP_KERNEL);
6988 	if (!sas_expander->phy) {
6989 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6990 			__FILE__, __LINE__, __func__);
6991 		rc = -1;
6992 		goto out_fail;
6993 	}
6994 
6995 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
6996 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6997 	    sas_address_parent, sas_expander->port);
6998 	if (!mpt3sas_port) {
6999 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7000 			__FILE__, __LINE__, __func__);
7001 		rc = -1;
7002 		goto out_fail;
7003 	}
7004 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
7005 	sas_expander->rphy = mpt3sas_port->rphy;
7006 
7007 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
7008 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
7009 		    &expander_pg1, i, handle))) {
7010 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7011 				__FILE__, __LINE__, __func__);
7012 			rc = -1;
7013 			goto out_fail;
7014 		}
7015 		sas_expander->phy[i].handle = handle;
7016 		sas_expander->phy[i].phy_id = i;
7017 		sas_expander->phy[i].port =
7018 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
7019 
7020 		if ((mpt3sas_transport_add_expander_phy(ioc,
7021 		    &sas_expander->phy[i], expander_pg1,
7022 		    sas_expander->parent_dev))) {
7023 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7024 				__FILE__, __LINE__, __func__);
7025 			rc = -1;
7026 			goto out_fail;
7027 		}
7028 	}
7029 
7030 	if (sas_expander->enclosure_handle) {
7031 		enclosure_dev =
7032 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7033 						sas_expander->enclosure_handle);
7034 		if (enclosure_dev)
7035 			sas_expander->enclosure_logical_id =
7036 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7037 	}
7038 
7039 	_scsih_expander_node_add(ioc, sas_expander);
7040 	return 0;
7041 
7042  out_fail:
7043 
7044 	if (mpt3sas_port)
7045 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7046 		    sas_address_parent, sas_expander->port);
7047 	kfree(sas_expander);
7048 	return rc;
7049 }
7050 
7051 /**
7052  * mpt3sas_expander_remove - removing expander object
7053  * @ioc: per adapter object
7054  * @sas_address: expander sas_address
7055  * @port: hba port entry
7056  */
7057 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)7058 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7059 	struct hba_port *port)
7060 {
7061 	struct _sas_node *sas_expander;
7062 	unsigned long flags;
7063 
7064 	if (ioc->shost_recovery)
7065 		return;
7066 
7067 	if (!port)
7068 		return;
7069 
7070 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7071 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7072 	    sas_address, port);
7073 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7074 	if (sas_expander)
7075 		_scsih_expander_node_remove(ioc, sas_expander);
7076 }
7077 
7078 /**
7079  * _scsih_done -  internal SCSI_IO callback handler.
7080  * @ioc: per adapter object
7081  * @smid: system request message index
7082  * @msix_index: MSIX table index supplied by the OS
7083  * @reply: reply message frame(lower 32bit addr)
7084  *
7085  * Callback handler when sending internal generated SCSI_IO.
7086  * The callback index passed is `ioc->scsih_cb_idx`
7087  *
7088  * Return: 1 meaning mf should be freed from _base_interrupt
7089  *         0 means the mf is freed from this function.
7090  */
7091 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)7092 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7093 {
7094 	MPI2DefaultReply_t *mpi_reply;
7095 
7096 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
7097 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7098 		return 1;
7099 	if (ioc->scsih_cmds.smid != smid)
7100 		return 1;
7101 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7102 	if (mpi_reply) {
7103 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
7104 		    mpi_reply->MsgLength*4);
7105 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7106 	}
7107 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7108 	complete(&ioc->scsih_cmds.done);
7109 	return 1;
7110 }
7111 
7112 
7113 
7114 
7115 #define MPT3_MAX_LUNS (255)
7116 
7117 
7118 /**
7119  * _scsih_check_access_status - check access flags
7120  * @ioc: per adapter object
7121  * @sas_address: sas address
7122  * @handle: sas device handle
7123  * @access_status: errors returned during discovery of the device
7124  *
7125  * Return: 0 for success, else failure
7126  */
7127 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)7128 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7129 	u16 handle, u8 access_status)
7130 {
7131 	u8 rc = 1;
7132 	char *desc = NULL;
7133 
7134 	switch (access_status) {
7135 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7136 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7137 		rc = 0;
7138 		break;
7139 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7140 		desc = "sata capability failed";
7141 		break;
7142 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7143 		desc = "sata affiliation conflict";
7144 		break;
7145 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7146 		desc = "route not addressable";
7147 		break;
7148 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7149 		desc = "smp error not addressable";
7150 		break;
7151 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7152 		desc = "device blocked";
7153 		break;
7154 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7155 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7156 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7157 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7158 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7159 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7160 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7161 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7162 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7163 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7164 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7165 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7166 		desc = "sata initialization failed";
7167 		break;
7168 	default:
7169 		desc = "unknown";
7170 		break;
7171 	}
7172 
7173 	if (!rc)
7174 		return 0;
7175 
7176 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7177 		desc, (u64)sas_address, handle);
7178 	return rc;
7179 }
7180 
7181 /**
7182  * _scsih_check_device - checking device responsiveness
7183  * @ioc: per adapter object
7184  * @parent_sas_address: sas address of parent expander or sas host
7185  * @handle: attached device handle
7186  * @phy_number: phy number
7187  * @link_rate: new link rate
7188  */
7189 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)7190 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7191 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7192 {
7193 	Mpi2ConfigReply_t mpi_reply;
7194 	Mpi2SasDevicePage0_t sas_device_pg0;
7195 	struct _sas_device *sas_device = NULL;
7196 	struct _enclosure_node *enclosure_dev = NULL;
7197 	u32 ioc_status;
7198 	unsigned long flags;
7199 	u64 sas_address;
7200 	struct scsi_target *starget;
7201 	struct MPT3SAS_TARGET *sas_target_priv_data;
7202 	u32 device_info;
7203 	struct hba_port *port;
7204 
7205 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7206 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7207 		return;
7208 
7209 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7210 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7211 		return;
7212 
7213 	/* wide port handling ~ we need only handle device once for the phy that
7214 	 * is matched in sas device page zero
7215 	 */
7216 	if (phy_number != sas_device_pg0.PhyNum)
7217 		return;
7218 
7219 	/* check if this is end device */
7220 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7221 	if (!(_scsih_is_end_device(device_info)))
7222 		return;
7223 
7224 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7225 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7226 	port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7227 	if (!port)
7228 		goto out_unlock;
7229 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7230 	    sas_address, port);
7231 
7232 	if (!sas_device)
7233 		goto out_unlock;
7234 
7235 	if (unlikely(sas_device->handle != handle)) {
7236 		starget = sas_device->starget;
7237 		sas_target_priv_data = starget->hostdata;
7238 		starget_printk(KERN_INFO, starget,
7239 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
7240 			sas_device->handle, handle);
7241 		sas_target_priv_data->handle = handle;
7242 		sas_device->handle = handle;
7243 		if (le16_to_cpu(sas_device_pg0.Flags) &
7244 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7245 			sas_device->enclosure_level =
7246 				sas_device_pg0.EnclosureLevel;
7247 			memcpy(sas_device->connector_name,
7248 				sas_device_pg0.ConnectorName, 4);
7249 			sas_device->connector_name[4] = '\0';
7250 		} else {
7251 			sas_device->enclosure_level = 0;
7252 			sas_device->connector_name[0] = '\0';
7253 		}
7254 
7255 		sas_device->enclosure_handle =
7256 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
7257 		sas_device->is_chassis_slot_valid = 0;
7258 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7259 						sas_device->enclosure_handle);
7260 		if (enclosure_dev) {
7261 			sas_device->enclosure_logical_id =
7262 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7263 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7264 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7265 				sas_device->is_chassis_slot_valid = 1;
7266 				sas_device->chassis_slot =
7267 					enclosure_dev->pg0.ChassisSlot;
7268 			}
7269 		}
7270 	}
7271 
7272 	/* check if device is present */
7273 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7274 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7275 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7276 			handle);
7277 		goto out_unlock;
7278 	}
7279 
7280 	/* check if there were any issues with discovery */
7281 	if (_scsih_check_access_status(ioc, sas_address, handle,
7282 	    sas_device_pg0.AccessStatus))
7283 		goto out_unlock;
7284 
7285 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7286 	_scsih_ublock_io_device(ioc, sas_address, port);
7287 
7288 	if (sas_device)
7289 		sas_device_put(sas_device);
7290 	return;
7291 
7292 out_unlock:
7293 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7294 	if (sas_device)
7295 		sas_device_put(sas_device);
7296 }
7297 
7298 /**
7299  * _scsih_add_device -  creating sas device object
7300  * @ioc: per adapter object
7301  * @handle: sas device handle
7302  * @phy_num: phy number end device attached to
7303  * @is_pd: is this hidden raid component
7304  *
7305  * Creating end device object, stored in ioc->sas_device_list.
7306  *
7307  * Return: 0 for success, non-zero for failure.
7308  */
7309 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)7310 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7311 	u8 is_pd)
7312 {
7313 	Mpi2ConfigReply_t mpi_reply;
7314 	Mpi2SasDevicePage0_t sas_device_pg0;
7315 	struct _sas_device *sas_device;
7316 	struct _enclosure_node *enclosure_dev = NULL;
7317 	u32 ioc_status;
7318 	u64 sas_address;
7319 	u32 device_info;
7320 	u8 port_id;
7321 
7322 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7323 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7324 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7325 			__FILE__, __LINE__, __func__);
7326 		return -1;
7327 	}
7328 
7329 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7330 	    MPI2_IOCSTATUS_MASK;
7331 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7332 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7333 			__FILE__, __LINE__, __func__);
7334 		return -1;
7335 	}
7336 
7337 	/* check if this is end device */
7338 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7339 	if (!(_scsih_is_end_device(device_info)))
7340 		return -1;
7341 	set_bit(handle, ioc->pend_os_device_add);
7342 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7343 
7344 	/* check if device is present */
7345 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7346 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7347 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7348 			handle);
7349 		return -1;
7350 	}
7351 
7352 	/* check if there were any issues with discovery */
7353 	if (_scsih_check_access_status(ioc, sas_address, handle,
7354 	    sas_device_pg0.AccessStatus))
7355 		return -1;
7356 
7357 	port_id = sas_device_pg0.PhysicalPort;
7358 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
7359 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7360 	if (sas_device) {
7361 		clear_bit(handle, ioc->pend_os_device_add);
7362 		sas_device_put(sas_device);
7363 		return -1;
7364 	}
7365 
7366 	if (sas_device_pg0.EnclosureHandle) {
7367 		enclosure_dev =
7368 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7369 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
7370 		if (enclosure_dev == NULL)
7371 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7372 				 sas_device_pg0.EnclosureHandle);
7373 	}
7374 
7375 	sas_device = kzalloc(sizeof(struct _sas_device),
7376 	    GFP_KERNEL);
7377 	if (!sas_device) {
7378 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7379 			__FILE__, __LINE__, __func__);
7380 		return 0;
7381 	}
7382 
7383 	kref_init(&sas_device->refcount);
7384 	sas_device->handle = handle;
7385 	if (_scsih_get_sas_address(ioc,
7386 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
7387 	    &sas_device->sas_address_parent) != 0)
7388 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7389 			__FILE__, __LINE__, __func__);
7390 	sas_device->enclosure_handle =
7391 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
7392 	if (sas_device->enclosure_handle != 0)
7393 		sas_device->slot =
7394 		    le16_to_cpu(sas_device_pg0.Slot);
7395 	sas_device->device_info = device_info;
7396 	sas_device->sas_address = sas_address;
7397 	sas_device->phy = sas_device_pg0.PhyNum;
7398 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7399 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7400 	sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7401 	if (!sas_device->port) {
7402 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7403 		    __FILE__, __LINE__, __func__);
7404 		goto out;
7405 	}
7406 
7407 	if (le16_to_cpu(sas_device_pg0.Flags)
7408 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7409 		sas_device->enclosure_level =
7410 			sas_device_pg0.EnclosureLevel;
7411 		memcpy(sas_device->connector_name,
7412 			sas_device_pg0.ConnectorName, 4);
7413 		sas_device->connector_name[4] = '\0';
7414 	} else {
7415 		sas_device->enclosure_level = 0;
7416 		sas_device->connector_name[0] = '\0';
7417 	}
7418 	/* get enclosure_logical_id & chassis_slot*/
7419 	sas_device->is_chassis_slot_valid = 0;
7420 	if (enclosure_dev) {
7421 		sas_device->enclosure_logical_id =
7422 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7423 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7424 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7425 			sas_device->is_chassis_slot_valid = 1;
7426 			sas_device->chassis_slot =
7427 					enclosure_dev->pg0.ChassisSlot;
7428 		}
7429 	}
7430 
7431 	/* get device name */
7432 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7433 	sas_device->port_type = sas_device_pg0.MaxPortConnections;
7434 	ioc_info(ioc,
7435 	    "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7436 	    handle, sas_device->sas_address, sas_device->port_type);
7437 
7438 	if (ioc->wait_for_discovery_to_complete)
7439 		_scsih_sas_device_init_add(ioc, sas_device);
7440 	else
7441 		_scsih_sas_device_add(ioc, sas_device);
7442 
7443 out:
7444 	sas_device_put(sas_device);
7445 	return 0;
7446 }
7447 
7448 /**
7449  * _scsih_remove_device -  removing sas device object
7450  * @ioc: per adapter object
7451  * @sas_device: the sas_device object
7452  */
7453 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)7454 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7455 	struct _sas_device *sas_device)
7456 {
7457 	struct MPT3SAS_TARGET *sas_target_priv_data;
7458 
7459 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7460 	     (sas_device->pfa_led_on)) {
7461 		_scsih_turn_off_pfa_led(ioc, sas_device);
7462 		sas_device->pfa_led_on = 0;
7463 	}
7464 
7465 	dewtprintk(ioc,
7466 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7467 			    __func__,
7468 			    sas_device->handle, (u64)sas_device->sas_address));
7469 
7470 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7471 	    NULL, NULL));
7472 
7473 	if (sas_device->starget && sas_device->starget->hostdata) {
7474 		sas_target_priv_data = sas_device->starget->hostdata;
7475 		sas_target_priv_data->deleted = 1;
7476 		_scsih_ublock_io_device(ioc, sas_device->sas_address,
7477 		    sas_device->port);
7478 		sas_target_priv_data->handle =
7479 		     MPT3SAS_INVALID_DEVICE_HANDLE;
7480 	}
7481 
7482 	if (!ioc->hide_drives)
7483 		mpt3sas_transport_port_remove(ioc,
7484 		    sas_device->sas_address,
7485 		    sas_device->sas_address_parent,
7486 		    sas_device->port);
7487 
7488 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7489 		 sas_device->handle, (u64)sas_device->sas_address);
7490 
7491 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7492 
7493 	dewtprintk(ioc,
7494 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7495 			    __func__,
7496 			    sas_device->handle, (u64)sas_device->sas_address));
7497 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7498 	    NULL, NULL));
7499 }
7500 
7501 /**
7502  * _scsih_sas_topology_change_event_debug - debug for topology event
7503  * @ioc: per adapter object
7504  * @event_data: event data payload
7505  * Context: user.
7506  */
7507 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)7508 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7509 	Mpi2EventDataSasTopologyChangeList_t *event_data)
7510 {
7511 	int i;
7512 	u16 handle;
7513 	u16 reason_code;
7514 	u8 phy_number;
7515 	char *status_str = NULL;
7516 	u8 link_rate, prev_link_rate;
7517 
7518 	switch (event_data->ExpStatus) {
7519 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7520 		status_str = "add";
7521 		break;
7522 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7523 		status_str = "remove";
7524 		break;
7525 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7526 	case 0:
7527 		status_str =  "responding";
7528 		break;
7529 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7530 		status_str = "remove delay";
7531 		break;
7532 	default:
7533 		status_str = "unknown status";
7534 		break;
7535 	}
7536 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7537 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7538 	    "start_phy(%02d), count(%d)\n",
7539 	    le16_to_cpu(event_data->ExpanderDevHandle),
7540 	    le16_to_cpu(event_data->EnclosureHandle),
7541 	    event_data->StartPhyNum, event_data->NumEntries);
7542 	for (i = 0; i < event_data->NumEntries; i++) {
7543 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7544 		if (!handle)
7545 			continue;
7546 		phy_number = event_data->StartPhyNum + i;
7547 		reason_code = event_data->PHY[i].PhyStatus &
7548 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7549 		switch (reason_code) {
7550 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7551 			status_str = "target add";
7552 			break;
7553 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7554 			status_str = "target remove";
7555 			break;
7556 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7557 			status_str = "delay target remove";
7558 			break;
7559 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7560 			status_str = "link rate change";
7561 			break;
7562 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7563 			status_str = "target responding";
7564 			break;
7565 		default:
7566 			status_str = "unknown";
7567 			break;
7568 		}
7569 		link_rate = event_data->PHY[i].LinkRate >> 4;
7570 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7571 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7572 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7573 		    handle, status_str, link_rate, prev_link_rate);
7574 
7575 	}
7576 }
7577 
7578 /**
7579  * _scsih_sas_topology_change_event - handle topology changes
7580  * @ioc: per adapter object
7581  * @fw_event: The fw_event_work object
7582  * Context: user.
7583  *
7584  */
7585 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7586 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7587 	struct fw_event_work *fw_event)
7588 {
7589 	int i;
7590 	u16 parent_handle, handle;
7591 	u16 reason_code;
7592 	u8 phy_number, max_phys;
7593 	struct _sas_node *sas_expander;
7594 	u64 sas_address;
7595 	unsigned long flags;
7596 	u8 link_rate, prev_link_rate;
7597 	struct hba_port *port;
7598 	Mpi2EventDataSasTopologyChangeList_t *event_data =
7599 		(Mpi2EventDataSasTopologyChangeList_t *)
7600 		fw_event->event_data;
7601 
7602 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7603 		_scsih_sas_topology_change_event_debug(ioc, event_data);
7604 
7605 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7606 		return 0;
7607 
7608 	if (!ioc->sas_hba.num_phys)
7609 		_scsih_sas_host_add(ioc);
7610 	else
7611 		_scsih_sas_host_refresh(ioc);
7612 
7613 	if (fw_event->ignore) {
7614 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7615 		return 0;
7616 	}
7617 
7618 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7619 	port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7620 
7621 	/* handle expander add */
7622 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7623 		if (_scsih_expander_add(ioc, parent_handle) != 0)
7624 			return 0;
7625 
7626 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7627 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7628 	    parent_handle);
7629 	if (sas_expander) {
7630 		sas_address = sas_expander->sas_address;
7631 		max_phys = sas_expander->num_phys;
7632 		port = sas_expander->port;
7633 	} else if (parent_handle < ioc->sas_hba.num_phys) {
7634 		sas_address = ioc->sas_hba.sas_address;
7635 		max_phys = ioc->sas_hba.num_phys;
7636 	} else {
7637 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7638 		return 0;
7639 	}
7640 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7641 
7642 	/* handle siblings events */
7643 	for (i = 0; i < event_data->NumEntries; i++) {
7644 		if (fw_event->ignore) {
7645 			dewtprintk(ioc,
7646 				   ioc_info(ioc, "ignoring expander event\n"));
7647 			return 0;
7648 		}
7649 		if (ioc->remove_host || ioc->pci_error_recovery)
7650 			return 0;
7651 		phy_number = event_data->StartPhyNum + i;
7652 		if (phy_number >= max_phys)
7653 			continue;
7654 		reason_code = event_data->PHY[i].PhyStatus &
7655 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7656 		if ((event_data->PHY[i].PhyStatus &
7657 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7658 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7659 				continue;
7660 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7661 		if (!handle)
7662 			continue;
7663 		link_rate = event_data->PHY[i].LinkRate >> 4;
7664 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7665 		switch (reason_code) {
7666 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7667 
7668 			if (ioc->shost_recovery)
7669 				break;
7670 
7671 			if (link_rate == prev_link_rate)
7672 				break;
7673 
7674 			mpt3sas_transport_update_links(ioc, sas_address,
7675 			    handle, phy_number, link_rate, port);
7676 
7677 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7678 				break;
7679 
7680 			_scsih_check_device(ioc, sas_address, handle,
7681 			    phy_number, link_rate);
7682 
7683 			if (!test_bit(handle, ioc->pend_os_device_add))
7684 				break;
7685 
7686 			fallthrough;
7687 
7688 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7689 
7690 			if (ioc->shost_recovery)
7691 				break;
7692 
7693 			mpt3sas_transport_update_links(ioc, sas_address,
7694 			    handle, phy_number, link_rate, port);
7695 
7696 			_scsih_add_device(ioc, handle, phy_number, 0);
7697 
7698 			break;
7699 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7700 
7701 			_scsih_device_remove_by_handle(ioc, handle);
7702 			break;
7703 		}
7704 	}
7705 
7706 	/* handle expander removal */
7707 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7708 	    sas_expander)
7709 		mpt3sas_expander_remove(ioc, sas_address, port);
7710 
7711 	return 0;
7712 }
7713 
7714 /**
7715  * _scsih_sas_device_status_change_event_debug - debug for device event
7716  * @ioc: ?
7717  * @event_data: event data payload
7718  * Context: user.
7719  */
7720 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7721 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7722 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7723 {
7724 	char *reason_str = NULL;
7725 
7726 	switch (event_data->ReasonCode) {
7727 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7728 		reason_str = "smart data";
7729 		break;
7730 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7731 		reason_str = "unsupported device discovered";
7732 		break;
7733 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7734 		reason_str = "internal device reset";
7735 		break;
7736 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7737 		reason_str = "internal task abort";
7738 		break;
7739 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7740 		reason_str = "internal task abort set";
7741 		break;
7742 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7743 		reason_str = "internal clear task set";
7744 		break;
7745 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7746 		reason_str = "internal query task";
7747 		break;
7748 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7749 		reason_str = "sata init failure";
7750 		break;
7751 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7752 		reason_str = "internal device reset complete";
7753 		break;
7754 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7755 		reason_str = "internal task abort complete";
7756 		break;
7757 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7758 		reason_str = "internal async notification";
7759 		break;
7760 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7761 		reason_str = "expander reduced functionality";
7762 		break;
7763 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7764 		reason_str = "expander reduced functionality complete";
7765 		break;
7766 	default:
7767 		reason_str = "unknown reason";
7768 		break;
7769 	}
7770 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7771 		 reason_str, le16_to_cpu(event_data->DevHandle),
7772 		 (u64)le64_to_cpu(event_data->SASAddress),
7773 		 le16_to_cpu(event_data->TaskTag));
7774 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7775 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7776 			event_data->ASC, event_data->ASCQ);
7777 	pr_cont("\n");
7778 }
7779 
7780 /**
7781  * _scsih_sas_device_status_change_event - handle device status change
7782  * @ioc: per adapter object
7783  * @event_data: The fw event
7784  * Context: user.
7785  */
7786 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7787 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7788 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7789 {
7790 	struct MPT3SAS_TARGET *target_priv_data;
7791 	struct _sas_device *sas_device;
7792 	u64 sas_address;
7793 	unsigned long flags;
7794 
7795 	/* In MPI Revision K (0xC), the internal device reset complete was
7796 	 * implemented, so avoid setting tm_busy flag for older firmware.
7797 	 */
7798 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7799 		return;
7800 
7801 	if (event_data->ReasonCode !=
7802 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7803 	   event_data->ReasonCode !=
7804 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7805 		return;
7806 
7807 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7808 	sas_address = le64_to_cpu(event_data->SASAddress);
7809 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7810 	    sas_address,
7811 	    mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7812 
7813 	if (!sas_device || !sas_device->starget)
7814 		goto out;
7815 
7816 	target_priv_data = sas_device->starget->hostdata;
7817 	if (!target_priv_data)
7818 		goto out;
7819 
7820 	if (event_data->ReasonCode ==
7821 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7822 		target_priv_data->tm_busy = 1;
7823 	else
7824 		target_priv_data->tm_busy = 0;
7825 
7826 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7827 		ioc_info(ioc,
7828 		    "%s tm_busy flag for handle(0x%04x)\n",
7829 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7830 		    target_priv_data->handle);
7831 
7832 out:
7833 	if (sas_device)
7834 		sas_device_put(sas_device);
7835 
7836 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7837 }
7838 
7839 
7840 /**
7841  * _scsih_check_pcie_access_status - check access flags
7842  * @ioc: per adapter object
7843  * @wwid: wwid
7844  * @handle: sas device handle
7845  * @access_status: errors returned during discovery of the device
7846  *
7847  * Return: 0 for success, else failure
7848  */
7849 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)7850 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7851 	u16 handle, u8 access_status)
7852 {
7853 	u8 rc = 1;
7854 	char *desc = NULL;
7855 
7856 	switch (access_status) {
7857 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7858 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7859 		rc = 0;
7860 		break;
7861 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7862 		desc = "PCIe device capability failed";
7863 		break;
7864 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7865 		desc = "PCIe device blocked";
7866 		ioc_info(ioc,
7867 		    "Device with Access Status (%s): wwid(0x%016llx), "
7868 		    "handle(0x%04x)\n ll only be added to the internal list",
7869 		    desc, (u64)wwid, handle);
7870 		rc = 0;
7871 		break;
7872 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7873 		desc = "PCIe device mem space access failed";
7874 		break;
7875 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7876 		desc = "PCIe device unsupported";
7877 		break;
7878 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7879 		desc = "PCIe device MSIx Required";
7880 		break;
7881 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7882 		desc = "PCIe device init fail max";
7883 		break;
7884 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7885 		desc = "PCIe device status unknown";
7886 		break;
7887 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7888 		desc = "nvme ready timeout";
7889 		break;
7890 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7891 		desc = "nvme device configuration unsupported";
7892 		break;
7893 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7894 		desc = "nvme identify failed";
7895 		break;
7896 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7897 		desc = "nvme qconfig failed";
7898 		break;
7899 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7900 		desc = "nvme qcreation failed";
7901 		break;
7902 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7903 		desc = "nvme eventcfg failed";
7904 		break;
7905 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7906 		desc = "nvme get feature stat failed";
7907 		break;
7908 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7909 		desc = "nvme idle timeout";
7910 		break;
7911 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7912 		desc = "nvme failure status";
7913 		break;
7914 	default:
7915 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7916 			access_status, (u64)wwid, handle);
7917 		return rc;
7918 	}
7919 
7920 	if (!rc)
7921 		return rc;
7922 
7923 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7924 		 desc, (u64)wwid, handle);
7925 	return rc;
7926 }
7927 
7928 /**
7929  * _scsih_pcie_device_remove_from_sml -  removing pcie device
7930  * from SML and free up associated memory
7931  * @ioc: per adapter object
7932  * @pcie_device: the pcie_device object
7933  */
7934 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)7935 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7936 	struct _pcie_device *pcie_device)
7937 {
7938 	struct MPT3SAS_TARGET *sas_target_priv_data;
7939 
7940 	dewtprintk(ioc,
7941 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7942 			    __func__,
7943 			    pcie_device->handle, (u64)pcie_device->wwid));
7944 	if (pcie_device->enclosure_handle != 0)
7945 		dewtprintk(ioc,
7946 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7947 				    __func__,
7948 				    (u64)pcie_device->enclosure_logical_id,
7949 				    pcie_device->slot));
7950 	if (pcie_device->connector_name[0] != '\0')
7951 		dewtprintk(ioc,
7952 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7953 				    __func__,
7954 				    pcie_device->enclosure_level,
7955 				    pcie_device->connector_name));
7956 
7957 	if (pcie_device->starget && pcie_device->starget->hostdata) {
7958 		sas_target_priv_data = pcie_device->starget->hostdata;
7959 		sas_target_priv_data->deleted = 1;
7960 		_scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7961 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7962 	}
7963 
7964 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7965 		 pcie_device->handle, (u64)pcie_device->wwid);
7966 	if (pcie_device->enclosure_handle != 0)
7967 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7968 			 (u64)pcie_device->enclosure_logical_id,
7969 			 pcie_device->slot);
7970 	if (pcie_device->connector_name[0] != '\0')
7971 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7972 			 pcie_device->enclosure_level,
7973 			 pcie_device->connector_name);
7974 
7975 	if (pcie_device->starget && (pcie_device->access_status !=
7976 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7977 		scsi_remove_target(&pcie_device->starget->dev);
7978 	dewtprintk(ioc,
7979 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7980 			    __func__,
7981 			    pcie_device->handle, (u64)pcie_device->wwid));
7982 	if (pcie_device->enclosure_handle != 0)
7983 		dewtprintk(ioc,
7984 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7985 				    __func__,
7986 				    (u64)pcie_device->enclosure_logical_id,
7987 				    pcie_device->slot));
7988 	if (pcie_device->connector_name[0] != '\0')
7989 		dewtprintk(ioc,
7990 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7991 				    __func__,
7992 				    pcie_device->enclosure_level,
7993 				    pcie_device->connector_name));
7994 
7995 	kfree(pcie_device->serial_number);
7996 }
7997 
7998 
7999 /**
8000  * _scsih_pcie_check_device - checking device responsiveness
8001  * @ioc: per adapter object
8002  * @handle: attached device handle
8003  */
8004 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8005 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8006 {
8007 	Mpi2ConfigReply_t mpi_reply;
8008 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8009 	u32 ioc_status;
8010 	struct _pcie_device *pcie_device;
8011 	u64 wwid;
8012 	unsigned long flags;
8013 	struct scsi_target *starget;
8014 	struct MPT3SAS_TARGET *sas_target_priv_data;
8015 	u32 device_info;
8016 
8017 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8018 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8019 		return;
8020 
8021 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8022 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8023 		return;
8024 
8025 	/* check if this is end device */
8026 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8027 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8028 		return;
8029 
8030 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8031 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8032 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8033 
8034 	if (!pcie_device) {
8035 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8036 		return;
8037 	}
8038 
8039 	if (unlikely(pcie_device->handle != handle)) {
8040 		starget = pcie_device->starget;
8041 		sas_target_priv_data = starget->hostdata;
8042 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
8043 		starget_printk(KERN_INFO, starget,
8044 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
8045 		    pcie_device->handle, handle);
8046 		sas_target_priv_data->handle = handle;
8047 		pcie_device->handle = handle;
8048 
8049 		if (le32_to_cpu(pcie_device_pg0.Flags) &
8050 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8051 			pcie_device->enclosure_level =
8052 			    pcie_device_pg0.EnclosureLevel;
8053 			memcpy(&pcie_device->connector_name[0],
8054 			    &pcie_device_pg0.ConnectorName[0], 4);
8055 		} else {
8056 			pcie_device->enclosure_level = 0;
8057 			pcie_device->connector_name[0] = '\0';
8058 		}
8059 	}
8060 
8061 	/* check if device is present */
8062 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8063 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8064 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8065 			 handle);
8066 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8067 		pcie_device_put(pcie_device);
8068 		return;
8069 	}
8070 
8071 	/* check if there were any issues with discovery */
8072 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8073 	    pcie_device_pg0.AccessStatus)) {
8074 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8075 		pcie_device_put(pcie_device);
8076 		return;
8077 	}
8078 
8079 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8080 	pcie_device_put(pcie_device);
8081 
8082 	_scsih_ublock_io_device(ioc, wwid, NULL);
8083 
8084 	return;
8085 }
8086 
8087 /**
8088  * _scsih_pcie_add_device -  creating pcie device object
8089  * @ioc: per adapter object
8090  * @handle: pcie device handle
8091  *
8092  * Creating end device object, stored in ioc->pcie_device_list.
8093  *
8094  * Return: 1 means queue the event later, 0 means complete the event
8095  */
8096 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8097 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8098 {
8099 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8100 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
8101 	Mpi2ConfigReply_t mpi_reply;
8102 	struct _pcie_device *pcie_device;
8103 	struct _enclosure_node *enclosure_dev;
8104 	u32 ioc_status;
8105 	u64 wwid;
8106 
8107 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8108 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8109 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8110 			__FILE__, __LINE__, __func__);
8111 		return 0;
8112 	}
8113 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8114 	    MPI2_IOCSTATUS_MASK;
8115 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8116 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8117 			__FILE__, __LINE__, __func__);
8118 		return 0;
8119 	}
8120 
8121 	set_bit(handle, ioc->pend_os_device_add);
8122 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8123 
8124 	/* check if device is present */
8125 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8126 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8127 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8128 			handle);
8129 		return 0;
8130 	}
8131 
8132 	/* check if there were any issues with discovery */
8133 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8134 	    pcie_device_pg0.AccessStatus))
8135 		return 0;
8136 
8137 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8138 	    (pcie_device_pg0.DeviceInfo))))
8139 		return 0;
8140 
8141 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8142 	if (pcie_device) {
8143 		clear_bit(handle, ioc->pend_os_device_add);
8144 		pcie_device_put(pcie_device);
8145 		return 0;
8146 	}
8147 
8148 	/* PCIe Device Page 2 contains read-only information about a
8149 	 * specific NVMe device; therefore, this page is only
8150 	 * valid for NVMe devices and skip for pcie devices of type scsi.
8151 	 */
8152 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8153 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8154 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8155 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8156 		    handle)) {
8157 			ioc_err(ioc,
8158 			    "failure at %s:%d/%s()!\n", __FILE__,
8159 			    __LINE__, __func__);
8160 			return 0;
8161 		}
8162 
8163 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8164 					MPI2_IOCSTATUS_MASK;
8165 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8166 			ioc_err(ioc,
8167 			    "failure at %s:%d/%s()!\n", __FILE__,
8168 			    __LINE__, __func__);
8169 			return 0;
8170 		}
8171 	}
8172 
8173 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8174 	if (!pcie_device) {
8175 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8176 			__FILE__, __LINE__, __func__);
8177 		return 0;
8178 	}
8179 
8180 	kref_init(&pcie_device->refcount);
8181 	pcie_device->id = ioc->pcie_target_id++;
8182 	pcie_device->channel = PCIE_CHANNEL;
8183 	pcie_device->handle = handle;
8184 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
8185 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8186 	pcie_device->wwid = wwid;
8187 	pcie_device->port_num = pcie_device_pg0.PortNum;
8188 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8189 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8190 
8191 	pcie_device->enclosure_handle =
8192 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8193 	if (pcie_device->enclosure_handle != 0)
8194 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8195 
8196 	if (le32_to_cpu(pcie_device_pg0.Flags) &
8197 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8198 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8199 		memcpy(&pcie_device->connector_name[0],
8200 		    &pcie_device_pg0.ConnectorName[0], 4);
8201 	} else {
8202 		pcie_device->enclosure_level = 0;
8203 		pcie_device->connector_name[0] = '\0';
8204 	}
8205 
8206 	/* get enclosure_logical_id */
8207 	if (pcie_device->enclosure_handle) {
8208 		enclosure_dev =
8209 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8210 						pcie_device->enclosure_handle);
8211 		if (enclosure_dev)
8212 			pcie_device->enclosure_logical_id =
8213 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8214 	}
8215 	/* TODO -- Add device name once FW supports it */
8216 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8217 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8218 		pcie_device->nvme_mdts =
8219 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8220 		pcie_device->shutdown_latency =
8221 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8222 		/*
8223 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8224 		 * if drive's RTD3 Entry Latency is greater then IOC's
8225 		 * max_shutdown_latency.
8226 		 */
8227 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8228 			ioc->max_shutdown_latency =
8229 				pcie_device->shutdown_latency;
8230 		if (pcie_device_pg2.ControllerResetTO)
8231 			pcie_device->reset_timeout =
8232 			    pcie_device_pg2.ControllerResetTO;
8233 		else
8234 			pcie_device->reset_timeout = 30;
8235 	} else
8236 		pcie_device->reset_timeout = 30;
8237 
8238 	if (ioc->wait_for_discovery_to_complete)
8239 		_scsih_pcie_device_init_add(ioc, pcie_device);
8240 	else
8241 		_scsih_pcie_device_add(ioc, pcie_device);
8242 
8243 	pcie_device_put(pcie_device);
8244 	return 0;
8245 }
8246 
8247 /**
8248  * _scsih_pcie_topology_change_event_debug - debug for topology
8249  * event
8250  * @ioc: per adapter object
8251  * @event_data: event data payload
8252  * Context: user.
8253  */
8254 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)8255 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8256 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8257 {
8258 	int i;
8259 	u16 handle;
8260 	u16 reason_code;
8261 	u8 port_number;
8262 	char *status_str = NULL;
8263 	u8 link_rate, prev_link_rate;
8264 
8265 	switch (event_data->SwitchStatus) {
8266 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8267 		status_str = "add";
8268 		break;
8269 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8270 		status_str = "remove";
8271 		break;
8272 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8273 	case 0:
8274 		status_str =  "responding";
8275 		break;
8276 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8277 		status_str = "remove delay";
8278 		break;
8279 	default:
8280 		status_str = "unknown status";
8281 		break;
8282 	}
8283 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8284 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8285 		"start_port(%02d), count(%d)\n",
8286 		le16_to_cpu(event_data->SwitchDevHandle),
8287 		le16_to_cpu(event_data->EnclosureHandle),
8288 		event_data->StartPortNum, event_data->NumEntries);
8289 	for (i = 0; i < event_data->NumEntries; i++) {
8290 		handle =
8291 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8292 		if (!handle)
8293 			continue;
8294 		port_number = event_data->StartPortNum + i;
8295 		reason_code = event_data->PortEntry[i].PortStatus;
8296 		switch (reason_code) {
8297 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8298 			status_str = "target add";
8299 			break;
8300 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8301 			status_str = "target remove";
8302 			break;
8303 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8304 			status_str = "delay target remove";
8305 			break;
8306 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8307 			status_str = "link rate change";
8308 			break;
8309 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8310 			status_str = "target responding";
8311 			break;
8312 		default:
8313 			status_str = "unknown";
8314 			break;
8315 		}
8316 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
8317 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8318 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8319 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8320 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8321 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
8322 			handle, status_str, link_rate, prev_link_rate);
8323 	}
8324 }
8325 
8326 /**
8327  * _scsih_pcie_topology_change_event - handle PCIe topology
8328  *  changes
8329  * @ioc: per adapter object
8330  * @fw_event: The fw_event_work object
8331  * Context: user.
8332  *
8333  */
8334 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8335 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8336 	struct fw_event_work *fw_event)
8337 {
8338 	int i;
8339 	u16 handle;
8340 	u16 reason_code;
8341 	u8 link_rate, prev_link_rate;
8342 	unsigned long flags;
8343 	int rc;
8344 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8345 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8346 	struct _pcie_device *pcie_device;
8347 
8348 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8349 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
8350 
8351 	if (ioc->shost_recovery || ioc->remove_host ||
8352 		ioc->pci_error_recovery)
8353 		return;
8354 
8355 	if (fw_event->ignore) {
8356 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8357 		return;
8358 	}
8359 
8360 	/* handle siblings events */
8361 	for (i = 0; i < event_data->NumEntries; i++) {
8362 		if (fw_event->ignore) {
8363 			dewtprintk(ioc,
8364 				   ioc_info(ioc, "ignoring switch event\n"));
8365 			return;
8366 		}
8367 		if (ioc->remove_host || ioc->pci_error_recovery)
8368 			return;
8369 		reason_code = event_data->PortEntry[i].PortStatus;
8370 		handle =
8371 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8372 		if (!handle)
8373 			continue;
8374 
8375 		link_rate = event_data->PortEntry[i].CurrentPortInfo
8376 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8377 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8378 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8379 
8380 		switch (reason_code) {
8381 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8382 			if (ioc->shost_recovery)
8383 				break;
8384 			if (link_rate == prev_link_rate)
8385 				break;
8386 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8387 				break;
8388 
8389 			_scsih_pcie_check_device(ioc, handle);
8390 
8391 			/* This code after this point handles the test case
8392 			 * where a device has been added, however its returning
8393 			 * BUSY for sometime.  Then before the Device Missing
8394 			 * Delay expires and the device becomes READY, the
8395 			 * device is removed and added back.
8396 			 */
8397 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8398 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8399 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8400 
8401 			if (pcie_device) {
8402 				pcie_device_put(pcie_device);
8403 				break;
8404 			}
8405 
8406 			if (!test_bit(handle, ioc->pend_os_device_add))
8407 				break;
8408 
8409 			dewtprintk(ioc,
8410 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8411 					    handle));
8412 			event_data->PortEntry[i].PortStatus &= 0xF0;
8413 			event_data->PortEntry[i].PortStatus |=
8414 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8415 			fallthrough;
8416 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8417 			if (ioc->shost_recovery)
8418 				break;
8419 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8420 				break;
8421 
8422 			rc = _scsih_pcie_add_device(ioc, handle);
8423 			if (!rc) {
8424 				/* mark entry vacant */
8425 				/* TODO This needs to be reviewed and fixed,
8426 				 * we dont have an entry
8427 				 * to make an event void like vacant
8428 				 */
8429 				event_data->PortEntry[i].PortStatus |=
8430 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8431 			}
8432 			break;
8433 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8434 			_scsih_pcie_device_remove_by_handle(ioc, handle);
8435 			break;
8436 		}
8437 	}
8438 }
8439 
8440 /**
8441  * _scsih_pcie_device_status_change_event_debug - debug for device event
8442  * @ioc: ?
8443  * @event_data: event data payload
8444  * Context: user.
8445  */
8446 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)8447 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8448 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8449 {
8450 	char *reason_str = NULL;
8451 
8452 	switch (event_data->ReasonCode) {
8453 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8454 		reason_str = "smart data";
8455 		break;
8456 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8457 		reason_str = "unsupported device discovered";
8458 		break;
8459 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8460 		reason_str = "internal device reset";
8461 		break;
8462 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8463 		reason_str = "internal task abort";
8464 		break;
8465 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8466 		reason_str = "internal task abort set";
8467 		break;
8468 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8469 		reason_str = "internal clear task set";
8470 		break;
8471 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8472 		reason_str = "internal query task";
8473 		break;
8474 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8475 		reason_str = "device init failure";
8476 		break;
8477 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8478 		reason_str = "internal device reset complete";
8479 		break;
8480 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8481 		reason_str = "internal task abort complete";
8482 		break;
8483 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8484 		reason_str = "internal async notification";
8485 		break;
8486 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8487 		reason_str = "pcie hot reset failed";
8488 		break;
8489 	default:
8490 		reason_str = "unknown reason";
8491 		break;
8492 	}
8493 
8494 	ioc_info(ioc, "PCIE device status change: (%s)\n"
8495 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8496 		 reason_str, le16_to_cpu(event_data->DevHandle),
8497 		 (u64)le64_to_cpu(event_data->WWID),
8498 		 le16_to_cpu(event_data->TaskTag));
8499 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8500 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8501 			event_data->ASC, event_data->ASCQ);
8502 	pr_cont("\n");
8503 }
8504 
8505 /**
8506  * _scsih_pcie_device_status_change_event - handle device status
8507  * change
8508  * @ioc: per adapter object
8509  * @fw_event: The fw_event_work object
8510  * Context: user.
8511  */
8512 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8513 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8514 	struct fw_event_work *fw_event)
8515 {
8516 	struct MPT3SAS_TARGET *target_priv_data;
8517 	struct _pcie_device *pcie_device;
8518 	u64 wwid;
8519 	unsigned long flags;
8520 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8521 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8522 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8523 		_scsih_pcie_device_status_change_event_debug(ioc,
8524 			event_data);
8525 
8526 	if (event_data->ReasonCode !=
8527 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8528 		event_data->ReasonCode !=
8529 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8530 		return;
8531 
8532 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8533 	wwid = le64_to_cpu(event_data->WWID);
8534 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8535 
8536 	if (!pcie_device || !pcie_device->starget)
8537 		goto out;
8538 
8539 	target_priv_data = pcie_device->starget->hostdata;
8540 	if (!target_priv_data)
8541 		goto out;
8542 
8543 	if (event_data->ReasonCode ==
8544 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8545 		target_priv_data->tm_busy = 1;
8546 	else
8547 		target_priv_data->tm_busy = 0;
8548 out:
8549 	if (pcie_device)
8550 		pcie_device_put(pcie_device);
8551 
8552 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8553 }
8554 
8555 /**
8556  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8557  * event
8558  * @ioc: per adapter object
8559  * @event_data: event data payload
8560  * Context: user.
8561  */
8562 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)8563 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8564 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8565 {
8566 	char *reason_str = NULL;
8567 
8568 	switch (event_data->ReasonCode) {
8569 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8570 		reason_str = "enclosure add";
8571 		break;
8572 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8573 		reason_str = "enclosure remove";
8574 		break;
8575 	default:
8576 		reason_str = "unknown reason";
8577 		break;
8578 	}
8579 
8580 	ioc_info(ioc, "enclosure status change: (%s)\n"
8581 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8582 		 reason_str,
8583 		 le16_to_cpu(event_data->EnclosureHandle),
8584 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8585 		 le16_to_cpu(event_data->StartSlot));
8586 }
8587 
8588 /**
8589  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8590  * @ioc: per adapter object
8591  * @fw_event: The fw_event_work object
8592  * Context: user.
8593  */
8594 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8595 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8596 	struct fw_event_work *fw_event)
8597 {
8598 	Mpi2ConfigReply_t mpi_reply;
8599 	struct _enclosure_node *enclosure_dev = NULL;
8600 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8601 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8602 	int rc;
8603 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8604 
8605 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8606 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8607 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
8608 		     fw_event->event_data);
8609 	if (ioc->shost_recovery)
8610 		return;
8611 
8612 	if (enclosure_handle)
8613 		enclosure_dev =
8614 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8615 						enclosure_handle);
8616 	switch (event_data->ReasonCode) {
8617 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8618 		if (!enclosure_dev) {
8619 			enclosure_dev =
8620 				kzalloc(sizeof(struct _enclosure_node),
8621 					GFP_KERNEL);
8622 			if (!enclosure_dev) {
8623 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
8624 					 __FILE__, __LINE__, __func__);
8625 				return;
8626 			}
8627 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8628 				&enclosure_dev->pg0,
8629 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8630 				enclosure_handle);
8631 
8632 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8633 						MPI2_IOCSTATUS_MASK)) {
8634 				kfree(enclosure_dev);
8635 				return;
8636 			}
8637 
8638 			list_add_tail(&enclosure_dev->list,
8639 							&ioc->enclosure_list);
8640 		}
8641 		break;
8642 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8643 		if (enclosure_dev) {
8644 			list_del(&enclosure_dev->list);
8645 			kfree(enclosure_dev);
8646 		}
8647 		break;
8648 	default:
8649 		break;
8650 	}
8651 }
8652 
8653 /**
8654  * _scsih_sas_broadcast_primitive_event - handle broadcast events
8655  * @ioc: per adapter object
8656  * @fw_event: The fw_event_work object
8657  * Context: user.
8658  */
8659 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8660 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8661 	struct fw_event_work *fw_event)
8662 {
8663 	struct scsi_cmnd *scmd;
8664 	struct scsi_device *sdev;
8665 	struct scsiio_tracker *st;
8666 	u16 smid, handle;
8667 	u32 lun;
8668 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8669 	u32 termination_count;
8670 	u32 query_count;
8671 	Mpi2SCSITaskManagementReply_t *mpi_reply;
8672 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8673 		(Mpi2EventDataSasBroadcastPrimitive_t *)
8674 		fw_event->event_data;
8675 	u16 ioc_status;
8676 	unsigned long flags;
8677 	int r;
8678 	u8 max_retries = 0;
8679 	u8 task_abort_retries;
8680 
8681 	mutex_lock(&ioc->tm_cmds.mutex);
8682 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8683 		 __func__, event_data->PhyNum, event_data->PortWidth);
8684 
8685 	_scsih_block_io_all_device(ioc);
8686 
8687 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8688 	mpi_reply = ioc->tm_cmds.reply;
8689  broadcast_aen_retry:
8690 
8691 	/* sanity checks for retrying this loop */
8692 	if (max_retries++ == 5) {
8693 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8694 		goto out;
8695 	} else if (max_retries > 1)
8696 		dewtprintk(ioc,
8697 			   ioc_info(ioc, "%s: %d retry\n",
8698 				    __func__, max_retries - 1));
8699 
8700 	termination_count = 0;
8701 	query_count = 0;
8702 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8703 		if (ioc->shost_recovery)
8704 			goto out;
8705 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8706 		if (!scmd)
8707 			continue;
8708 		st = scsi_cmd_priv(scmd);
8709 		sdev = scmd->device;
8710 		sas_device_priv_data = sdev->hostdata;
8711 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8712 			continue;
8713 		 /* skip hidden raid components */
8714 		if (sas_device_priv_data->sas_target->flags &
8715 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
8716 			continue;
8717 		 /* skip volumes */
8718 		if (sas_device_priv_data->sas_target->flags &
8719 		    MPT_TARGET_FLAGS_VOLUME)
8720 			continue;
8721 		 /* skip PCIe devices */
8722 		if (sas_device_priv_data->sas_target->flags &
8723 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
8724 			continue;
8725 
8726 		handle = sas_device_priv_data->sas_target->handle;
8727 		lun = sas_device_priv_data->lun;
8728 		query_count++;
8729 
8730 		if (ioc->shost_recovery)
8731 			goto out;
8732 
8733 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8734 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8735 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8736 			st->msix_io, 30, 0);
8737 		if (r == FAILED) {
8738 			sdev_printk(KERN_WARNING, sdev,
8739 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
8740 			    "QUERY_TASK: scmd(%p)\n", scmd);
8741 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8742 			goto broadcast_aen_retry;
8743 		}
8744 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8745 		    & MPI2_IOCSTATUS_MASK;
8746 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8747 			sdev_printk(KERN_WARNING, sdev,
8748 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8749 				ioc_status, scmd);
8750 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8751 			goto broadcast_aen_retry;
8752 		}
8753 
8754 		/* see if IO is still owned by IOC and target */
8755 		if (mpi_reply->ResponseCode ==
8756 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8757 		     mpi_reply->ResponseCode ==
8758 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8759 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8760 			continue;
8761 		}
8762 		task_abort_retries = 0;
8763  tm_retry:
8764 		if (task_abort_retries++ == 60) {
8765 			dewtprintk(ioc,
8766 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8767 					    __func__));
8768 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8769 			goto broadcast_aen_retry;
8770 		}
8771 
8772 		if (ioc->shost_recovery)
8773 			goto out_no_lock;
8774 
8775 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8776 			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8777 			st->smid, st->msix_io, 30, 0);
8778 		if (r == FAILED || st->cb_idx != 0xFF) {
8779 			sdev_printk(KERN_WARNING, sdev,
8780 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8781 			    "scmd(%p)\n", scmd);
8782 			goto tm_retry;
8783 		}
8784 
8785 		if (task_abort_retries > 1)
8786 			sdev_printk(KERN_WARNING, sdev,
8787 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8788 			    " scmd(%p)\n",
8789 			    task_abort_retries - 1, scmd);
8790 
8791 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8792 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8793 	}
8794 
8795 	if (ioc->broadcast_aen_pending) {
8796 		dewtprintk(ioc,
8797 			   ioc_info(ioc,
8798 				    "%s: loop back due to pending AEN\n",
8799 				    __func__));
8800 		 ioc->broadcast_aen_pending = 0;
8801 		 goto broadcast_aen_retry;
8802 	}
8803 
8804  out:
8805 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8806  out_no_lock:
8807 
8808 	dewtprintk(ioc,
8809 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8810 			    __func__, query_count, termination_count));
8811 
8812 	ioc->broadcast_aen_busy = 0;
8813 	if (!ioc->shost_recovery)
8814 		_scsih_ublock_io_all_device(ioc);
8815 	mutex_unlock(&ioc->tm_cmds.mutex);
8816 }
8817 
8818 /**
8819  * _scsih_sas_discovery_event - handle discovery events
8820  * @ioc: per adapter object
8821  * @fw_event: The fw_event_work object
8822  * Context: user.
8823  */
8824 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8825 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8826 	struct fw_event_work *fw_event)
8827 {
8828 	Mpi2EventDataSasDiscovery_t *event_data =
8829 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8830 
8831 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8832 		ioc_info(ioc, "discovery event: (%s)",
8833 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8834 			 "start" : "stop");
8835 		if (event_data->DiscoveryStatus)
8836 			pr_cont("discovery_status(0x%08x)",
8837 				le32_to_cpu(event_data->DiscoveryStatus));
8838 		pr_cont("\n");
8839 	}
8840 
8841 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8842 	    !ioc->sas_hba.num_phys) {
8843 		if (disable_discovery > 0 && ioc->shost_recovery) {
8844 			/* Wait for the reset to complete */
8845 			while (ioc->shost_recovery)
8846 				ssleep(1);
8847 		}
8848 		_scsih_sas_host_add(ioc);
8849 	}
8850 }
8851 
8852 /**
8853  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8854  *						events
8855  * @ioc: per adapter object
8856  * @fw_event: The fw_event_work object
8857  * Context: user.
8858  */
8859 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8860 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8861 	struct fw_event_work *fw_event)
8862 {
8863 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8864 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8865 
8866 	switch (event_data->ReasonCode) {
8867 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8868 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8869 			 le16_to_cpu(event_data->DevHandle),
8870 			 (u64)le64_to_cpu(event_data->SASAddress),
8871 			 event_data->PhysicalPort);
8872 		break;
8873 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8874 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8875 			 le16_to_cpu(event_data->DevHandle),
8876 			 (u64)le64_to_cpu(event_data->SASAddress),
8877 			 event_data->PhysicalPort);
8878 		break;
8879 	default:
8880 		break;
8881 	}
8882 }
8883 
8884 /**
8885  * _scsih_pcie_enumeration_event - handle enumeration events
8886  * @ioc: per adapter object
8887  * @fw_event: The fw_event_work object
8888  * Context: user.
8889  */
8890 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8891 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8892 	struct fw_event_work *fw_event)
8893 {
8894 	Mpi26EventDataPCIeEnumeration_t *event_data =
8895 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8896 
8897 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8898 		return;
8899 
8900 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8901 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8902 		 "started" : "completed",
8903 		 event_data->Flags);
8904 	if (event_data->EnumerationStatus)
8905 		pr_cont("enumeration_status(0x%08x)",
8906 			le32_to_cpu(event_data->EnumerationStatus));
8907 	pr_cont("\n");
8908 }
8909 
8910 /**
8911  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8912  * @ioc: per adapter object
8913  * @handle: device handle for physical disk
8914  * @phys_disk_num: physical disk number
8915  *
8916  * Return: 0 for success, else failure.
8917  */
8918 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)8919 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8920 {
8921 	Mpi2RaidActionRequest_t *mpi_request;
8922 	Mpi2RaidActionReply_t *mpi_reply;
8923 	u16 smid;
8924 	u8 issue_reset = 0;
8925 	int rc = 0;
8926 	u16 ioc_status;
8927 	u32 log_info;
8928 
8929 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8930 		return rc;
8931 
8932 	mutex_lock(&ioc->scsih_cmds.mutex);
8933 
8934 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8935 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8936 		rc = -EAGAIN;
8937 		goto out;
8938 	}
8939 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8940 
8941 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8942 	if (!smid) {
8943 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8944 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8945 		rc = -EAGAIN;
8946 		goto out;
8947 	}
8948 
8949 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8950 	ioc->scsih_cmds.smid = smid;
8951 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8952 
8953 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8954 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8955 	mpi_request->PhysDiskNum = phys_disk_num;
8956 
8957 	dewtprintk(ioc,
8958 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8959 			    handle, phys_disk_num));
8960 
8961 	init_completion(&ioc->scsih_cmds.done);
8962 	ioc->put_smid_default(ioc, smid);
8963 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8964 
8965 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8966 		mpt3sas_check_cmd_timeout(ioc,
8967 		    ioc->scsih_cmds.status, mpi_request,
8968 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8969 		rc = -EFAULT;
8970 		goto out;
8971 	}
8972 
8973 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8974 
8975 		mpi_reply = ioc->scsih_cmds.reply;
8976 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8977 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8978 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
8979 		else
8980 			log_info = 0;
8981 		ioc_status &= MPI2_IOCSTATUS_MASK;
8982 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8983 			dewtprintk(ioc,
8984 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8985 					    ioc_status, log_info));
8986 			rc = -EFAULT;
8987 		} else
8988 			dewtprintk(ioc,
8989 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8990 	}
8991 
8992  out:
8993 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8994 	mutex_unlock(&ioc->scsih_cmds.mutex);
8995 
8996 	if (issue_reset)
8997 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8998 	return rc;
8999 }
9000 
9001 /**
9002  * _scsih_reprobe_lun - reprobing lun
9003  * @sdev: scsi device struct
9004  * @no_uld_attach: sdev->no_uld_attach flag setting
9005  *
9006  **/
9007 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)9008 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
9009 {
9010 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
9011 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
9012 	    sdev->no_uld_attach ? "hiding" : "exposing");
9013 	WARN_ON(scsi_device_reprobe(sdev));
9014 }
9015 
9016 /**
9017  * _scsih_sas_volume_add - add new volume
9018  * @ioc: per adapter object
9019  * @element: IR config element data
9020  * Context: user.
9021  */
9022 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9023 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9024 	Mpi2EventIrConfigElement_t *element)
9025 {
9026 	struct _raid_device *raid_device;
9027 	unsigned long flags;
9028 	u64 wwid;
9029 	u16 handle = le16_to_cpu(element->VolDevHandle);
9030 	int rc;
9031 
9032 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9033 	if (!wwid) {
9034 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9035 			__FILE__, __LINE__, __func__);
9036 		return;
9037 	}
9038 
9039 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9040 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9041 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9042 
9043 	if (raid_device)
9044 		return;
9045 
9046 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9047 	if (!raid_device) {
9048 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9049 			__FILE__, __LINE__, __func__);
9050 		return;
9051 	}
9052 
9053 	raid_device->id = ioc->sas_id++;
9054 	raid_device->channel = RAID_CHANNEL;
9055 	raid_device->handle = handle;
9056 	raid_device->wwid = wwid;
9057 	_scsih_raid_device_add(ioc, raid_device);
9058 	if (!ioc->wait_for_discovery_to_complete) {
9059 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9060 		    raid_device->id, 0);
9061 		if (rc)
9062 			_scsih_raid_device_remove(ioc, raid_device);
9063 	} else {
9064 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9065 		_scsih_determine_boot_device(ioc, raid_device, 1);
9066 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9067 	}
9068 }
9069 
9070 /**
9071  * _scsih_sas_volume_delete - delete volume
9072  * @ioc: per adapter object
9073  * @handle: volume device handle
9074  * Context: user.
9075  */
9076 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)9077 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9078 {
9079 	struct _raid_device *raid_device;
9080 	unsigned long flags;
9081 	struct MPT3SAS_TARGET *sas_target_priv_data;
9082 	struct scsi_target *starget = NULL;
9083 
9084 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9085 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9086 	if (raid_device) {
9087 		if (raid_device->starget) {
9088 			starget = raid_device->starget;
9089 			sas_target_priv_data = starget->hostdata;
9090 			sas_target_priv_data->deleted = 1;
9091 		}
9092 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9093 			 raid_device->handle, (u64)raid_device->wwid);
9094 		list_del(&raid_device->list);
9095 		kfree(raid_device);
9096 	}
9097 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9098 	if (starget)
9099 		scsi_remove_target(&starget->dev);
9100 }
9101 
9102 /**
9103  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9104  * @ioc: per adapter object
9105  * @element: IR config element data
9106  * Context: user.
9107  */
9108 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9109 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9110 	Mpi2EventIrConfigElement_t *element)
9111 {
9112 	struct _sas_device *sas_device;
9113 	struct scsi_target *starget = NULL;
9114 	struct MPT3SAS_TARGET *sas_target_priv_data;
9115 	unsigned long flags;
9116 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9117 
9118 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9119 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9120 	if (sas_device) {
9121 		sas_device->volume_handle = 0;
9122 		sas_device->volume_wwid = 0;
9123 		clear_bit(handle, ioc->pd_handles);
9124 		if (sas_device->starget && sas_device->starget->hostdata) {
9125 			starget = sas_device->starget;
9126 			sas_target_priv_data = starget->hostdata;
9127 			sas_target_priv_data->flags &=
9128 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9129 		}
9130 	}
9131 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9132 	if (!sas_device)
9133 		return;
9134 
9135 	/* exposing raid component */
9136 	if (starget)
9137 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9138 
9139 	sas_device_put(sas_device);
9140 }
9141 
9142 /**
9143  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9144  * @ioc: per adapter object
9145  * @element: IR config element data
9146  * Context: user.
9147  */
9148 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9149 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9150 	Mpi2EventIrConfigElement_t *element)
9151 {
9152 	struct _sas_device *sas_device;
9153 	struct scsi_target *starget = NULL;
9154 	struct MPT3SAS_TARGET *sas_target_priv_data;
9155 	unsigned long flags;
9156 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9157 	u16 volume_handle = 0;
9158 	u64 volume_wwid = 0;
9159 
9160 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9161 	if (volume_handle)
9162 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9163 		    &volume_wwid);
9164 
9165 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9166 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9167 	if (sas_device) {
9168 		set_bit(handle, ioc->pd_handles);
9169 		if (sas_device->starget && sas_device->starget->hostdata) {
9170 			starget = sas_device->starget;
9171 			sas_target_priv_data = starget->hostdata;
9172 			sas_target_priv_data->flags |=
9173 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
9174 			sas_device->volume_handle = volume_handle;
9175 			sas_device->volume_wwid = volume_wwid;
9176 		}
9177 	}
9178 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9179 	if (!sas_device)
9180 		return;
9181 
9182 	/* hiding raid component */
9183 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9184 
9185 	if (starget)
9186 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9187 
9188 	sas_device_put(sas_device);
9189 }
9190 
9191 /**
9192  * _scsih_sas_pd_delete - delete pd component
9193  * @ioc: per adapter object
9194  * @element: IR config element data
9195  * Context: user.
9196  */
9197 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9198 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9199 	Mpi2EventIrConfigElement_t *element)
9200 {
9201 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9202 
9203 	_scsih_device_remove_by_handle(ioc, handle);
9204 }
9205 
9206 /**
9207  * _scsih_sas_pd_add - remove pd component
9208  * @ioc: per adapter object
9209  * @element: IR config element data
9210  * Context: user.
9211  */
9212 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9213 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9214 	Mpi2EventIrConfigElement_t *element)
9215 {
9216 	struct _sas_device *sas_device;
9217 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9218 	Mpi2ConfigReply_t mpi_reply;
9219 	Mpi2SasDevicePage0_t sas_device_pg0;
9220 	u32 ioc_status;
9221 	u64 sas_address;
9222 	u16 parent_handle;
9223 
9224 	set_bit(handle, ioc->pd_handles);
9225 
9226 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9227 	if (sas_device) {
9228 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9229 		sas_device_put(sas_device);
9230 		return;
9231 	}
9232 
9233 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9234 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9235 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9236 			__FILE__, __LINE__, __func__);
9237 		return;
9238 	}
9239 
9240 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9241 	    MPI2_IOCSTATUS_MASK;
9242 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9243 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9244 			__FILE__, __LINE__, __func__);
9245 		return;
9246 	}
9247 
9248 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9249 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9250 		mpt3sas_transport_update_links(ioc, sas_address, handle,
9251 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9252 		    mpt3sas_get_port_by_id(ioc,
9253 		    sas_device_pg0.PhysicalPort, 0));
9254 
9255 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9256 	_scsih_add_device(ioc, handle, 0, 1);
9257 }
9258 
9259 /**
9260  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9261  * @ioc: per adapter object
9262  * @event_data: event data payload
9263  * Context: user.
9264  */
9265 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)9266 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9267 	Mpi2EventDataIrConfigChangeList_t *event_data)
9268 {
9269 	Mpi2EventIrConfigElement_t *element;
9270 	u8 element_type;
9271 	int i;
9272 	char *reason_str = NULL, *element_str = NULL;
9273 
9274 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9275 
9276 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9277 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9278 		 "foreign" : "native",
9279 		 event_data->NumElements);
9280 	for (i = 0; i < event_data->NumElements; i++, element++) {
9281 		switch (element->ReasonCode) {
9282 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9283 			reason_str = "add";
9284 			break;
9285 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9286 			reason_str = "remove";
9287 			break;
9288 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9289 			reason_str = "no change";
9290 			break;
9291 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9292 			reason_str = "hide";
9293 			break;
9294 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9295 			reason_str = "unhide";
9296 			break;
9297 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9298 			reason_str = "volume_created";
9299 			break;
9300 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9301 			reason_str = "volume_deleted";
9302 			break;
9303 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9304 			reason_str = "pd_created";
9305 			break;
9306 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9307 			reason_str = "pd_deleted";
9308 			break;
9309 		default:
9310 			reason_str = "unknown reason";
9311 			break;
9312 		}
9313 		element_type = le16_to_cpu(element->ElementFlags) &
9314 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9315 		switch (element_type) {
9316 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9317 			element_str = "volume";
9318 			break;
9319 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9320 			element_str = "phys disk";
9321 			break;
9322 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9323 			element_str = "hot spare";
9324 			break;
9325 		default:
9326 			element_str = "unknown element";
9327 			break;
9328 		}
9329 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
9330 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9331 		    reason_str, le16_to_cpu(element->VolDevHandle),
9332 		    le16_to_cpu(element->PhysDiskDevHandle),
9333 		    element->PhysDiskNum);
9334 	}
9335 }
9336 
9337 /**
9338  * _scsih_sas_ir_config_change_event - handle ir configuration change events
9339  * @ioc: per adapter object
9340  * @fw_event: The fw_event_work object
9341  * Context: user.
9342  */
9343 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9344 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9345 	struct fw_event_work *fw_event)
9346 {
9347 	Mpi2EventIrConfigElement_t *element;
9348 	int i;
9349 	u8 foreign_config;
9350 	Mpi2EventDataIrConfigChangeList_t *event_data =
9351 		(Mpi2EventDataIrConfigChangeList_t *)
9352 		fw_event->event_data;
9353 
9354 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9355 	     (!ioc->hide_ir_msg))
9356 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
9357 
9358 	foreign_config = (le32_to_cpu(event_data->Flags) &
9359 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9360 
9361 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9362 	if (ioc->shost_recovery &&
9363 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9364 		for (i = 0; i < event_data->NumElements; i++, element++) {
9365 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9366 				_scsih_ir_fastpath(ioc,
9367 					le16_to_cpu(element->PhysDiskDevHandle),
9368 					element->PhysDiskNum);
9369 		}
9370 		return;
9371 	}
9372 
9373 	for (i = 0; i < event_data->NumElements; i++, element++) {
9374 
9375 		switch (element->ReasonCode) {
9376 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9377 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9378 			if (!foreign_config)
9379 				_scsih_sas_volume_add(ioc, element);
9380 			break;
9381 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9382 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9383 			if (!foreign_config)
9384 				_scsih_sas_volume_delete(ioc,
9385 				    le16_to_cpu(element->VolDevHandle));
9386 			break;
9387 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9388 			if (!ioc->is_warpdrive)
9389 				_scsih_sas_pd_hide(ioc, element);
9390 			break;
9391 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9392 			if (!ioc->is_warpdrive)
9393 				_scsih_sas_pd_expose(ioc, element);
9394 			break;
9395 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9396 			if (!ioc->is_warpdrive)
9397 				_scsih_sas_pd_add(ioc, element);
9398 			break;
9399 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9400 			if (!ioc->is_warpdrive)
9401 				_scsih_sas_pd_delete(ioc, element);
9402 			break;
9403 		}
9404 	}
9405 }
9406 
9407 /**
9408  * _scsih_sas_ir_volume_event - IR volume event
9409  * @ioc: per adapter object
9410  * @fw_event: The fw_event_work object
9411  * Context: user.
9412  */
9413 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9414 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9415 	struct fw_event_work *fw_event)
9416 {
9417 	u64 wwid;
9418 	unsigned long flags;
9419 	struct _raid_device *raid_device;
9420 	u16 handle;
9421 	u32 state;
9422 	int rc;
9423 	Mpi2EventDataIrVolume_t *event_data =
9424 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
9425 
9426 	if (ioc->shost_recovery)
9427 		return;
9428 
9429 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9430 		return;
9431 
9432 	handle = le16_to_cpu(event_data->VolDevHandle);
9433 	state = le32_to_cpu(event_data->NewValue);
9434 	if (!ioc->hide_ir_msg)
9435 		dewtprintk(ioc,
9436 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9437 				    __func__, handle,
9438 				    le32_to_cpu(event_data->PreviousValue),
9439 				    state));
9440 	switch (state) {
9441 	case MPI2_RAID_VOL_STATE_MISSING:
9442 	case MPI2_RAID_VOL_STATE_FAILED:
9443 		_scsih_sas_volume_delete(ioc, handle);
9444 		break;
9445 
9446 	case MPI2_RAID_VOL_STATE_ONLINE:
9447 	case MPI2_RAID_VOL_STATE_DEGRADED:
9448 	case MPI2_RAID_VOL_STATE_OPTIMAL:
9449 
9450 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9451 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9452 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9453 
9454 		if (raid_device)
9455 			break;
9456 
9457 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9458 		if (!wwid) {
9459 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9460 				__FILE__, __LINE__, __func__);
9461 			break;
9462 		}
9463 
9464 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9465 		if (!raid_device) {
9466 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9467 				__FILE__, __LINE__, __func__);
9468 			break;
9469 		}
9470 
9471 		raid_device->id = ioc->sas_id++;
9472 		raid_device->channel = RAID_CHANNEL;
9473 		raid_device->handle = handle;
9474 		raid_device->wwid = wwid;
9475 		_scsih_raid_device_add(ioc, raid_device);
9476 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9477 		    raid_device->id, 0);
9478 		if (rc)
9479 			_scsih_raid_device_remove(ioc, raid_device);
9480 		break;
9481 
9482 	case MPI2_RAID_VOL_STATE_INITIALIZING:
9483 	default:
9484 		break;
9485 	}
9486 }
9487 
9488 /**
9489  * _scsih_sas_ir_physical_disk_event - PD event
9490  * @ioc: per adapter object
9491  * @fw_event: The fw_event_work object
9492  * Context: user.
9493  */
9494 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9495 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9496 	struct fw_event_work *fw_event)
9497 {
9498 	u16 handle, parent_handle;
9499 	u32 state;
9500 	struct _sas_device *sas_device;
9501 	Mpi2ConfigReply_t mpi_reply;
9502 	Mpi2SasDevicePage0_t sas_device_pg0;
9503 	u32 ioc_status;
9504 	Mpi2EventDataIrPhysicalDisk_t *event_data =
9505 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9506 	u64 sas_address;
9507 
9508 	if (ioc->shost_recovery)
9509 		return;
9510 
9511 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9512 		return;
9513 
9514 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9515 	state = le32_to_cpu(event_data->NewValue);
9516 
9517 	if (!ioc->hide_ir_msg)
9518 		dewtprintk(ioc,
9519 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9520 				    __func__, handle,
9521 				    le32_to_cpu(event_data->PreviousValue),
9522 				    state));
9523 
9524 	switch (state) {
9525 	case MPI2_RAID_PD_STATE_ONLINE:
9526 	case MPI2_RAID_PD_STATE_DEGRADED:
9527 	case MPI2_RAID_PD_STATE_REBUILDING:
9528 	case MPI2_RAID_PD_STATE_OPTIMAL:
9529 	case MPI2_RAID_PD_STATE_HOT_SPARE:
9530 
9531 		if (!ioc->is_warpdrive)
9532 			set_bit(handle, ioc->pd_handles);
9533 
9534 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9535 		if (sas_device) {
9536 			sas_device_put(sas_device);
9537 			return;
9538 		}
9539 
9540 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9541 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9542 		    handle))) {
9543 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9544 				__FILE__, __LINE__, __func__);
9545 			return;
9546 		}
9547 
9548 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9549 		    MPI2_IOCSTATUS_MASK;
9550 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9551 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9552 				__FILE__, __LINE__, __func__);
9553 			return;
9554 		}
9555 
9556 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9557 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9558 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9559 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9560 			    mpt3sas_get_port_by_id(ioc,
9561 			    sas_device_pg0.PhysicalPort, 0));
9562 
9563 		_scsih_add_device(ioc, handle, 0, 1);
9564 
9565 		break;
9566 
9567 	case MPI2_RAID_PD_STATE_OFFLINE:
9568 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9569 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9570 	default:
9571 		break;
9572 	}
9573 }
9574 
9575 /**
9576  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9577  * @ioc: per adapter object
9578  * @event_data: event data payload
9579  * Context: user.
9580  */
9581 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)9582 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9583 	Mpi2EventDataIrOperationStatus_t *event_data)
9584 {
9585 	char *reason_str = NULL;
9586 
9587 	switch (event_data->RAIDOperation) {
9588 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
9589 		reason_str = "resync";
9590 		break;
9591 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9592 		reason_str = "online capacity expansion";
9593 		break;
9594 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9595 		reason_str = "consistency check";
9596 		break;
9597 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9598 		reason_str = "background init";
9599 		break;
9600 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9601 		reason_str = "make data consistent";
9602 		break;
9603 	}
9604 
9605 	if (!reason_str)
9606 		return;
9607 
9608 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9609 		 reason_str,
9610 		 le16_to_cpu(event_data->VolDevHandle),
9611 		 event_data->PercentComplete);
9612 }
9613 
9614 /**
9615  * _scsih_sas_ir_operation_status_event - handle RAID operation events
9616  * @ioc: per adapter object
9617  * @fw_event: The fw_event_work object
9618  * Context: user.
9619  */
9620 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9621 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9622 	struct fw_event_work *fw_event)
9623 {
9624 	Mpi2EventDataIrOperationStatus_t *event_data =
9625 		(Mpi2EventDataIrOperationStatus_t *)
9626 		fw_event->event_data;
9627 	static struct _raid_device *raid_device;
9628 	unsigned long flags;
9629 	u16 handle;
9630 
9631 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9632 	    (!ioc->hide_ir_msg))
9633 		_scsih_sas_ir_operation_status_event_debug(ioc,
9634 		     event_data);
9635 
9636 	/* code added for raid transport support */
9637 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9638 
9639 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9640 		handle = le16_to_cpu(event_data->VolDevHandle);
9641 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9642 		if (raid_device)
9643 			raid_device->percent_complete =
9644 			    event_data->PercentComplete;
9645 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9646 	}
9647 }
9648 
9649 /**
9650  * _scsih_prep_device_scan - initialize parameters prior to device scan
9651  * @ioc: per adapter object
9652  *
9653  * Set the deleted flag prior to device scan.  If the device is found during
9654  * the scan, then we clear the deleted flag.
9655  */
9656 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)9657 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9658 {
9659 	struct MPT3SAS_DEVICE *sas_device_priv_data;
9660 	struct scsi_device *sdev;
9661 
9662 	shost_for_each_device(sdev, ioc->shost) {
9663 		sas_device_priv_data = sdev->hostdata;
9664 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
9665 			sas_device_priv_data->sas_target->deleted = 1;
9666 	}
9667 }
9668 
9669 /**
9670  * _scsih_update_device_qdepth - Update QD during Reset.
9671  * @ioc: per adapter object
9672  *
9673  */
9674 static void
_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER * ioc)9675 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9676 {
9677 	struct MPT3SAS_DEVICE *sas_device_priv_data;
9678 	struct MPT3SAS_TARGET *sas_target_priv_data;
9679 	struct _sas_device *sas_device;
9680 	struct scsi_device *sdev;
9681 	u16 qdepth;
9682 
9683 	ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9684 	shost_for_each_device(sdev, ioc->shost) {
9685 		sas_device_priv_data = sdev->hostdata;
9686 		if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9687 			sas_target_priv_data = sas_device_priv_data->sas_target;
9688 			sas_device = sas_device_priv_data->sas_target->sas_dev;
9689 			if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9690 				qdepth = ioc->max_nvme_qd;
9691 			else if (sas_device &&
9692 			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9693 				qdepth = (sas_device->port_type > 1) ?
9694 				    ioc->max_wideport_qd : ioc->max_narrowport_qd;
9695 			else if (sas_device &&
9696 			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9697 				qdepth = ioc->max_sata_qd;
9698 			else
9699 				continue;
9700 			mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9701 		}
9702 	}
9703 }
9704 
9705 /**
9706  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9707  * @ioc: per adapter object
9708  * @sas_device_pg0: SAS Device page 0
9709  *
9710  * After host reset, find out whether devices are still responding.
9711  * Used in _scsih_remove_unresponsive_sas_devices.
9712  */
9713 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)9714 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9715 Mpi2SasDevicePage0_t *sas_device_pg0)
9716 {
9717 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9718 	struct scsi_target *starget;
9719 	struct _sas_device *sas_device = NULL;
9720 	struct _enclosure_node *enclosure_dev = NULL;
9721 	unsigned long flags;
9722 	struct hba_port *port = mpt3sas_get_port_by_id(
9723 	    ioc, sas_device_pg0->PhysicalPort, 0);
9724 
9725 	if (sas_device_pg0->EnclosureHandle) {
9726 		enclosure_dev =
9727 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9728 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
9729 		if (enclosure_dev == NULL)
9730 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9731 				 sas_device_pg0->EnclosureHandle);
9732 	}
9733 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9734 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9735 		if (sas_device->sas_address != le64_to_cpu(
9736 		    sas_device_pg0->SASAddress))
9737 			continue;
9738 		if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9739 			continue;
9740 		if (sas_device->port != port)
9741 			continue;
9742 		sas_device->responding = 1;
9743 		starget = sas_device->starget;
9744 		if (starget && starget->hostdata) {
9745 			sas_target_priv_data = starget->hostdata;
9746 			sas_target_priv_data->tm_busy = 0;
9747 			sas_target_priv_data->deleted = 0;
9748 		} else
9749 			sas_target_priv_data = NULL;
9750 		if (starget) {
9751 			starget_printk(KERN_INFO, starget,
9752 			    "handle(0x%04x), sas_addr(0x%016llx)\n",
9753 			    le16_to_cpu(sas_device_pg0->DevHandle),
9754 			    (unsigned long long)
9755 			    sas_device->sas_address);
9756 
9757 			if (sas_device->enclosure_handle != 0)
9758 				starget_printk(KERN_INFO, starget,
9759 				 "enclosure logical id(0x%016llx), slot(%d)\n",
9760 				 (unsigned long long)
9761 				 sas_device->enclosure_logical_id,
9762 				 sas_device->slot);
9763 		}
9764 		if (le16_to_cpu(sas_device_pg0->Flags) &
9765 		      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9766 			sas_device->enclosure_level =
9767 			   sas_device_pg0->EnclosureLevel;
9768 			memcpy(&sas_device->connector_name[0],
9769 				&sas_device_pg0->ConnectorName[0], 4);
9770 		} else {
9771 			sas_device->enclosure_level = 0;
9772 			sas_device->connector_name[0] = '\0';
9773 		}
9774 
9775 		sas_device->enclosure_handle =
9776 			le16_to_cpu(sas_device_pg0->EnclosureHandle);
9777 		sas_device->is_chassis_slot_valid = 0;
9778 		if (enclosure_dev) {
9779 			sas_device->enclosure_logical_id = le64_to_cpu(
9780 				enclosure_dev->pg0.EnclosureLogicalID);
9781 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9782 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9783 				sas_device->is_chassis_slot_valid = 1;
9784 				sas_device->chassis_slot =
9785 					enclosure_dev->pg0.ChassisSlot;
9786 			}
9787 		}
9788 
9789 		if (sas_device->handle == le16_to_cpu(
9790 		    sas_device_pg0->DevHandle))
9791 			goto out;
9792 		pr_info("\thandle changed from(0x%04x)!!!\n",
9793 		    sas_device->handle);
9794 		sas_device->handle = le16_to_cpu(
9795 		    sas_device_pg0->DevHandle);
9796 		if (sas_target_priv_data)
9797 			sas_target_priv_data->handle =
9798 			    le16_to_cpu(sas_device_pg0->DevHandle);
9799 		goto out;
9800 	}
9801  out:
9802 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9803 }
9804 
9805 /**
9806  * _scsih_create_enclosure_list_after_reset - Free Existing list,
9807  *	And create enclosure list by scanning all Enclosure Page(0)s
9808  * @ioc: per adapter object
9809  */
9810 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)9811 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9812 {
9813 	struct _enclosure_node *enclosure_dev;
9814 	Mpi2ConfigReply_t mpi_reply;
9815 	u16 enclosure_handle;
9816 	int rc;
9817 
9818 	/* Free existing enclosure list */
9819 	mpt3sas_free_enclosure_list(ioc);
9820 
9821 	/* Re constructing enclosure list after reset*/
9822 	enclosure_handle = 0xFFFF;
9823 	do {
9824 		enclosure_dev =
9825 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9826 		if (!enclosure_dev) {
9827 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9828 				__FILE__, __LINE__, __func__);
9829 			return;
9830 		}
9831 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9832 				&enclosure_dev->pg0,
9833 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9834 				enclosure_handle);
9835 
9836 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9837 						MPI2_IOCSTATUS_MASK)) {
9838 			kfree(enclosure_dev);
9839 			return;
9840 		}
9841 		list_add_tail(&enclosure_dev->list,
9842 						&ioc->enclosure_list);
9843 		enclosure_handle =
9844 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9845 	} while (1);
9846 }
9847 
9848 /**
9849  * _scsih_search_responding_sas_devices -
9850  * @ioc: per adapter object
9851  *
9852  * After host reset, find out whether devices are still responding.
9853  * If not remove.
9854  */
9855 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)9856 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9857 {
9858 	Mpi2SasDevicePage0_t sas_device_pg0;
9859 	Mpi2ConfigReply_t mpi_reply;
9860 	u16 ioc_status;
9861 	u16 handle;
9862 	u32 device_info;
9863 
9864 	ioc_info(ioc, "search for end-devices: start\n");
9865 
9866 	if (list_empty(&ioc->sas_device_list))
9867 		goto out;
9868 
9869 	handle = 0xFFFF;
9870 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9871 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9872 	    handle))) {
9873 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9874 		    MPI2_IOCSTATUS_MASK;
9875 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9876 			break;
9877 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9878 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9879 		if (!(_scsih_is_end_device(device_info)))
9880 			continue;
9881 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9882 	}
9883 
9884  out:
9885 	ioc_info(ioc, "search for end-devices: complete\n");
9886 }
9887 
9888 /**
9889  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9890  * @ioc: per adapter object
9891  * @pcie_device_pg0: PCIe Device page 0
9892  *
9893  * After host reset, find out whether devices are still responding.
9894  * Used in _scsih_remove_unresponding_devices.
9895  */
9896 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)9897 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9898 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9899 {
9900 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9901 	struct scsi_target *starget;
9902 	struct _pcie_device *pcie_device;
9903 	unsigned long flags;
9904 
9905 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9906 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9907 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9908 		    && (pcie_device->slot == le16_to_cpu(
9909 		    pcie_device_pg0->Slot))) {
9910 			pcie_device->access_status =
9911 					pcie_device_pg0->AccessStatus;
9912 			pcie_device->responding = 1;
9913 			starget = pcie_device->starget;
9914 			if (starget && starget->hostdata) {
9915 				sas_target_priv_data = starget->hostdata;
9916 				sas_target_priv_data->tm_busy = 0;
9917 				sas_target_priv_data->deleted = 0;
9918 			} else
9919 				sas_target_priv_data = NULL;
9920 			if (starget) {
9921 				starget_printk(KERN_INFO, starget,
9922 				    "handle(0x%04x), wwid(0x%016llx) ",
9923 				    pcie_device->handle,
9924 				    (unsigned long long)pcie_device->wwid);
9925 				if (pcie_device->enclosure_handle != 0)
9926 					starget_printk(KERN_INFO, starget,
9927 					    "enclosure logical id(0x%016llx), "
9928 					    "slot(%d)\n",
9929 					    (unsigned long long)
9930 					    pcie_device->enclosure_logical_id,
9931 					    pcie_device->slot);
9932 			}
9933 
9934 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9935 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9936 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9937 				pcie_device->enclosure_level =
9938 				    pcie_device_pg0->EnclosureLevel;
9939 				memcpy(&pcie_device->connector_name[0],
9940 				    &pcie_device_pg0->ConnectorName[0], 4);
9941 			} else {
9942 				pcie_device->enclosure_level = 0;
9943 				pcie_device->connector_name[0] = '\0';
9944 			}
9945 
9946 			if (pcie_device->handle == le16_to_cpu(
9947 			    pcie_device_pg0->DevHandle))
9948 				goto out;
9949 			pr_info("\thandle changed from(0x%04x)!!!\n",
9950 			    pcie_device->handle);
9951 			pcie_device->handle = le16_to_cpu(
9952 			    pcie_device_pg0->DevHandle);
9953 			if (sas_target_priv_data)
9954 				sas_target_priv_data->handle =
9955 				    le16_to_cpu(pcie_device_pg0->DevHandle);
9956 			goto out;
9957 		}
9958 	}
9959 
9960  out:
9961 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9962 }
9963 
9964 /**
9965  * _scsih_search_responding_pcie_devices -
9966  * @ioc: per adapter object
9967  *
9968  * After host reset, find out whether devices are still responding.
9969  * If not remove.
9970  */
9971 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)9972 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9973 {
9974 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9975 	Mpi2ConfigReply_t mpi_reply;
9976 	u16 ioc_status;
9977 	u16 handle;
9978 	u32 device_info;
9979 
9980 	ioc_info(ioc, "search for end-devices: start\n");
9981 
9982 	if (list_empty(&ioc->pcie_device_list))
9983 		goto out;
9984 
9985 	handle = 0xFFFF;
9986 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9987 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9988 		handle))) {
9989 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9990 		    MPI2_IOCSTATUS_MASK;
9991 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9992 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9993 				 __func__, ioc_status,
9994 				 le32_to_cpu(mpi_reply.IOCLogInfo));
9995 			break;
9996 		}
9997 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9998 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9999 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
10000 			continue;
10001 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
10002 	}
10003 out:
10004 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
10005 }
10006 
10007 /**
10008  * _scsih_mark_responding_raid_device - mark a raid_device as responding
10009  * @ioc: per adapter object
10010  * @wwid: world wide identifier for raid volume
10011  * @handle: device handle
10012  *
10013  * After host reset, find out whether devices are still responding.
10014  * Used in _scsih_remove_unresponsive_raid_devices.
10015  */
10016 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)10017 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10018 	u16 handle)
10019 {
10020 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10021 	struct scsi_target *starget;
10022 	struct _raid_device *raid_device;
10023 	unsigned long flags;
10024 
10025 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
10026 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10027 		if (raid_device->wwid == wwid && raid_device->starget) {
10028 			starget = raid_device->starget;
10029 			if (starget && starget->hostdata) {
10030 				sas_target_priv_data = starget->hostdata;
10031 				sas_target_priv_data->deleted = 0;
10032 			} else
10033 				sas_target_priv_data = NULL;
10034 			raid_device->responding = 1;
10035 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10036 			starget_printk(KERN_INFO, raid_device->starget,
10037 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
10038 			    (unsigned long long)raid_device->wwid);
10039 
10040 			/*
10041 			 * WARPDRIVE: The handles of the PDs might have changed
10042 			 * across the host reset so re-initialize the
10043 			 * required data for Direct IO
10044 			 */
10045 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
10046 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
10047 			if (raid_device->handle == handle) {
10048 				spin_unlock_irqrestore(&ioc->raid_device_lock,
10049 				    flags);
10050 				return;
10051 			}
10052 			pr_info("\thandle changed from(0x%04x)!!!\n",
10053 			    raid_device->handle);
10054 			raid_device->handle = handle;
10055 			if (sas_target_priv_data)
10056 				sas_target_priv_data->handle = handle;
10057 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10058 			return;
10059 		}
10060 	}
10061 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10062 }
10063 
10064 /**
10065  * _scsih_search_responding_raid_devices -
10066  * @ioc: per adapter object
10067  *
10068  * After host reset, find out whether devices are still responding.
10069  * If not remove.
10070  */
10071 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)10072 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10073 {
10074 	Mpi2RaidVolPage1_t volume_pg1;
10075 	Mpi2RaidVolPage0_t volume_pg0;
10076 	Mpi2RaidPhysDiskPage0_t pd_pg0;
10077 	Mpi2ConfigReply_t mpi_reply;
10078 	u16 ioc_status;
10079 	u16 handle;
10080 	u8 phys_disk_num;
10081 
10082 	if (!ioc->ir_firmware)
10083 		return;
10084 
10085 	ioc_info(ioc, "search for raid volumes: start\n");
10086 
10087 	if (list_empty(&ioc->raid_device_list))
10088 		goto out;
10089 
10090 	handle = 0xFFFF;
10091 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10092 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10093 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10094 		    MPI2_IOCSTATUS_MASK;
10095 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10096 			break;
10097 		handle = le16_to_cpu(volume_pg1.DevHandle);
10098 
10099 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10100 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10101 		     sizeof(Mpi2RaidVolPage0_t)))
10102 			continue;
10103 
10104 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10105 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10106 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10107 			_scsih_mark_responding_raid_device(ioc,
10108 			    le64_to_cpu(volume_pg1.WWID), handle);
10109 	}
10110 
10111 	/* refresh the pd_handles */
10112 	if (!ioc->is_warpdrive) {
10113 		phys_disk_num = 0xFF;
10114 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10115 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10116 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10117 		    phys_disk_num))) {
10118 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10119 			    MPI2_IOCSTATUS_MASK;
10120 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10121 				break;
10122 			phys_disk_num = pd_pg0.PhysDiskNum;
10123 			handle = le16_to_cpu(pd_pg0.DevHandle);
10124 			set_bit(handle, ioc->pd_handles);
10125 		}
10126 	}
10127  out:
10128 	ioc_info(ioc, "search for responding raid volumes: complete\n");
10129 }
10130 
10131 /**
10132  * _scsih_mark_responding_expander - mark a expander as responding
10133  * @ioc: per adapter object
10134  * @expander_pg0:SAS Expander Config Page0
10135  *
10136  * After host reset, find out whether devices are still responding.
10137  * Used in _scsih_remove_unresponsive_expanders.
10138  */
10139 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)10140 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10141 	Mpi2ExpanderPage0_t *expander_pg0)
10142 {
10143 	struct _sas_node *sas_expander = NULL;
10144 	unsigned long flags;
10145 	int i;
10146 	struct _enclosure_node *enclosure_dev = NULL;
10147 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10148 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10149 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10150 	struct hba_port *port = mpt3sas_get_port_by_id(
10151 	    ioc, expander_pg0->PhysicalPort, 0);
10152 
10153 	if (enclosure_handle)
10154 		enclosure_dev =
10155 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
10156 							enclosure_handle);
10157 
10158 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10159 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10160 		if (sas_expander->sas_address != sas_address)
10161 			continue;
10162 		if (sas_expander->port != port)
10163 			continue;
10164 		sas_expander->responding = 1;
10165 
10166 		if (enclosure_dev) {
10167 			sas_expander->enclosure_logical_id =
10168 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10169 			sas_expander->enclosure_handle =
10170 			    le16_to_cpu(expander_pg0->EnclosureHandle);
10171 		}
10172 
10173 		if (sas_expander->handle == handle)
10174 			goto out;
10175 		pr_info("\texpander(0x%016llx): handle changed" \
10176 		    " from(0x%04x) to (0x%04x)!!!\n",
10177 		    (unsigned long long)sas_expander->sas_address,
10178 		    sas_expander->handle, handle);
10179 		sas_expander->handle = handle;
10180 		for (i = 0 ; i < sas_expander->num_phys ; i++)
10181 			sas_expander->phy[i].handle = handle;
10182 		goto out;
10183 	}
10184  out:
10185 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10186 }
10187 
10188 /**
10189  * _scsih_search_responding_expanders -
10190  * @ioc: per adapter object
10191  *
10192  * After host reset, find out whether devices are still responding.
10193  * If not remove.
10194  */
10195 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)10196 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10197 {
10198 	Mpi2ExpanderPage0_t expander_pg0;
10199 	Mpi2ConfigReply_t mpi_reply;
10200 	u16 ioc_status;
10201 	u64 sas_address;
10202 	u16 handle;
10203 	u8 port;
10204 
10205 	ioc_info(ioc, "search for expanders: start\n");
10206 
10207 	if (list_empty(&ioc->sas_expander_list))
10208 		goto out;
10209 
10210 	handle = 0xFFFF;
10211 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10212 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10213 
10214 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10215 		    MPI2_IOCSTATUS_MASK;
10216 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10217 			break;
10218 
10219 		handle = le16_to_cpu(expander_pg0.DevHandle);
10220 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
10221 		port = expander_pg0.PhysicalPort;
10222 		pr_info(
10223 		    "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10224 		    handle, (unsigned long long)sas_address,
10225 		    (ioc->multipath_on_hba ?
10226 		    port : MULTIPATH_DISABLED_PORT_ID));
10227 		_scsih_mark_responding_expander(ioc, &expander_pg0);
10228 	}
10229 
10230  out:
10231 	ioc_info(ioc, "search for expanders: complete\n");
10232 }
10233 
10234 /**
10235  * _scsih_remove_unresponding_devices - removing unresponding devices
10236  * @ioc: per adapter object
10237  */
10238 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)10239 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10240 {
10241 	struct _sas_device *sas_device, *sas_device_next;
10242 	struct _sas_node *sas_expander, *sas_expander_next;
10243 	struct _raid_device *raid_device, *raid_device_next;
10244 	struct _pcie_device *pcie_device, *pcie_device_next;
10245 	struct list_head tmp_list;
10246 	unsigned long flags;
10247 	LIST_HEAD(head);
10248 
10249 	ioc_info(ioc, "removing unresponding devices: start\n");
10250 
10251 	/* removing unresponding end devices */
10252 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
10253 	/*
10254 	 * Iterate, pulling off devices marked as non-responding. We become the
10255 	 * owner for the reference the list had on any object we prune.
10256 	 */
10257 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10258 
10259 	/*
10260 	 * Clean up the sas_device_init_list list as
10261 	 * driver goes for fresh scan as part of diag reset.
10262 	 */
10263 	list_for_each_entry_safe(sas_device, sas_device_next,
10264 	    &ioc->sas_device_init_list, list) {
10265 		list_del_init(&sas_device->list);
10266 		sas_device_put(sas_device);
10267 	}
10268 
10269 	list_for_each_entry_safe(sas_device, sas_device_next,
10270 	    &ioc->sas_device_list, list) {
10271 		if (!sas_device->responding)
10272 			list_move_tail(&sas_device->list, &head);
10273 		else
10274 			sas_device->responding = 0;
10275 	}
10276 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10277 
10278 	/*
10279 	 * Now, uninitialize and remove the unresponding devices we pruned.
10280 	 */
10281 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10282 		_scsih_remove_device(ioc, sas_device);
10283 		list_del_init(&sas_device->list);
10284 		sas_device_put(sas_device);
10285 	}
10286 
10287 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10288 	INIT_LIST_HEAD(&head);
10289 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10290 	/*
10291 	 * Clean up the pcie_device_init_list list as
10292 	 * driver goes for fresh scan as part of diag reset.
10293 	 */
10294 	list_for_each_entry_safe(pcie_device, pcie_device_next,
10295 	    &ioc->pcie_device_init_list, list) {
10296 		list_del_init(&pcie_device->list);
10297 		pcie_device_put(pcie_device);
10298 	}
10299 
10300 	list_for_each_entry_safe(pcie_device, pcie_device_next,
10301 	    &ioc->pcie_device_list, list) {
10302 		if (!pcie_device->responding)
10303 			list_move_tail(&pcie_device->list, &head);
10304 		else
10305 			pcie_device->responding = 0;
10306 	}
10307 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10308 
10309 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10310 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10311 		list_del_init(&pcie_device->list);
10312 		pcie_device_put(pcie_device);
10313 	}
10314 
10315 	/* removing unresponding volumes */
10316 	if (ioc->ir_firmware) {
10317 		ioc_info(ioc, "removing unresponding devices: volumes\n");
10318 		list_for_each_entry_safe(raid_device, raid_device_next,
10319 		    &ioc->raid_device_list, list) {
10320 			if (!raid_device->responding)
10321 				_scsih_sas_volume_delete(ioc,
10322 				    raid_device->handle);
10323 			else
10324 				raid_device->responding = 0;
10325 		}
10326 	}
10327 
10328 	/* removing unresponding expanders */
10329 	ioc_info(ioc, "removing unresponding devices: expanders\n");
10330 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10331 	INIT_LIST_HEAD(&tmp_list);
10332 	list_for_each_entry_safe(sas_expander, sas_expander_next,
10333 	    &ioc->sas_expander_list, list) {
10334 		if (!sas_expander->responding)
10335 			list_move_tail(&sas_expander->list, &tmp_list);
10336 		else
10337 			sas_expander->responding = 0;
10338 	}
10339 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10340 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10341 	    list) {
10342 		_scsih_expander_node_remove(ioc, sas_expander);
10343 	}
10344 
10345 	ioc_info(ioc, "removing unresponding devices: complete\n");
10346 
10347 	/* unblock devices */
10348 	_scsih_ublock_io_all_device(ioc);
10349 }
10350 
10351 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)10352 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10353 	struct _sas_node *sas_expander, u16 handle)
10354 {
10355 	Mpi2ExpanderPage1_t expander_pg1;
10356 	Mpi2ConfigReply_t mpi_reply;
10357 	int i;
10358 
10359 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
10360 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10361 		    &expander_pg1, i, handle))) {
10362 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10363 				__FILE__, __LINE__, __func__);
10364 			return;
10365 		}
10366 
10367 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10368 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10369 		    expander_pg1.NegotiatedLinkRate >> 4,
10370 		    sas_expander->port);
10371 	}
10372 }
10373 
10374 /**
10375  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10376  * @ioc: per adapter object
10377  */
10378 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)10379 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10380 {
10381 	Mpi2ExpanderPage0_t expander_pg0;
10382 	Mpi2SasDevicePage0_t sas_device_pg0;
10383 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
10384 	Mpi2RaidVolPage1_t volume_pg1;
10385 	Mpi2RaidVolPage0_t volume_pg0;
10386 	Mpi2RaidPhysDiskPage0_t pd_pg0;
10387 	Mpi2EventIrConfigElement_t element;
10388 	Mpi2ConfigReply_t mpi_reply;
10389 	u8 phys_disk_num, port_id;
10390 	u16 ioc_status;
10391 	u16 handle, parent_handle;
10392 	u64 sas_address;
10393 	struct _sas_device *sas_device;
10394 	struct _pcie_device *pcie_device;
10395 	struct _sas_node *expander_device;
10396 	static struct _raid_device *raid_device;
10397 	u8 retry_count;
10398 	unsigned long flags;
10399 
10400 	ioc_info(ioc, "scan devices: start\n");
10401 
10402 	_scsih_sas_host_refresh(ioc);
10403 
10404 	ioc_info(ioc, "\tscan devices: expanders start\n");
10405 
10406 	/* expanders */
10407 	handle = 0xFFFF;
10408 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10409 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10410 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10411 		    MPI2_IOCSTATUS_MASK;
10412 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10413 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10414 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10415 			break;
10416 		}
10417 		handle = le16_to_cpu(expander_pg0.DevHandle);
10418 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
10419 		port_id = expander_pg0.PhysicalPort;
10420 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10421 		    ioc, le64_to_cpu(expander_pg0.SASAddress),
10422 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10423 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10424 		if (expander_device)
10425 			_scsih_refresh_expander_links(ioc, expander_device,
10426 			    handle);
10427 		else {
10428 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10429 				 handle,
10430 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10431 			_scsih_expander_add(ioc, handle);
10432 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10433 				 handle,
10434 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10435 		}
10436 	}
10437 
10438 	ioc_info(ioc, "\tscan devices: expanders complete\n");
10439 
10440 	if (!ioc->ir_firmware)
10441 		goto skip_to_sas;
10442 
10443 	ioc_info(ioc, "\tscan devices: phys disk start\n");
10444 
10445 	/* phys disk */
10446 	phys_disk_num = 0xFF;
10447 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10448 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10449 	    phys_disk_num))) {
10450 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10451 		    MPI2_IOCSTATUS_MASK;
10452 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10453 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10454 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10455 			break;
10456 		}
10457 		phys_disk_num = pd_pg0.PhysDiskNum;
10458 		handle = le16_to_cpu(pd_pg0.DevHandle);
10459 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10460 		if (sas_device) {
10461 			sas_device_put(sas_device);
10462 			continue;
10463 		}
10464 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10465 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10466 		    handle) != 0)
10467 			continue;
10468 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10469 		    MPI2_IOCSTATUS_MASK;
10470 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10471 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10472 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10473 			break;
10474 		}
10475 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10476 		if (!_scsih_get_sas_address(ioc, parent_handle,
10477 		    &sas_address)) {
10478 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10479 				 handle,
10480 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10481 			port_id = sas_device_pg0.PhysicalPort;
10482 			mpt3sas_transport_update_links(ioc, sas_address,
10483 			    handle, sas_device_pg0.PhyNum,
10484 			    MPI2_SAS_NEG_LINK_RATE_1_5,
10485 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10486 			set_bit(handle, ioc->pd_handles);
10487 			retry_count = 0;
10488 			/* This will retry adding the end device.
10489 			 * _scsih_add_device() will decide on retries and
10490 			 * return "1" when it should be retried
10491 			 */
10492 			while (_scsih_add_device(ioc, handle, retry_count++,
10493 			    1)) {
10494 				ssleep(1);
10495 			}
10496 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10497 				 handle,
10498 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10499 		}
10500 	}
10501 
10502 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
10503 
10504 	ioc_info(ioc, "\tscan devices: volumes start\n");
10505 
10506 	/* volumes */
10507 	handle = 0xFFFF;
10508 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10509 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10510 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10511 		    MPI2_IOCSTATUS_MASK;
10512 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10513 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10514 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10515 			break;
10516 		}
10517 		handle = le16_to_cpu(volume_pg1.DevHandle);
10518 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10519 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
10520 		    le64_to_cpu(volume_pg1.WWID));
10521 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10522 		if (raid_device)
10523 			continue;
10524 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10525 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10526 		     sizeof(Mpi2RaidVolPage0_t)))
10527 			continue;
10528 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10529 		    MPI2_IOCSTATUS_MASK;
10530 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10531 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10532 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10533 			break;
10534 		}
10535 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10536 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10537 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10538 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10539 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10540 			element.VolDevHandle = volume_pg1.DevHandle;
10541 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10542 				 volume_pg1.DevHandle);
10543 			_scsih_sas_volume_add(ioc, &element);
10544 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10545 				 volume_pg1.DevHandle);
10546 		}
10547 	}
10548 
10549 	ioc_info(ioc, "\tscan devices: volumes complete\n");
10550 
10551  skip_to_sas:
10552 
10553 	ioc_info(ioc, "\tscan devices: end devices start\n");
10554 
10555 	/* sas devices */
10556 	handle = 0xFFFF;
10557 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10558 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10559 	    handle))) {
10560 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10561 		    MPI2_IOCSTATUS_MASK;
10562 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10563 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10564 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10565 			break;
10566 		}
10567 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
10568 		if (!(_scsih_is_end_device(
10569 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
10570 			continue;
10571 		port_id = sas_device_pg0.PhysicalPort;
10572 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
10573 		    le64_to_cpu(sas_device_pg0.SASAddress),
10574 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10575 		if (sas_device) {
10576 			sas_device_put(sas_device);
10577 			continue;
10578 		}
10579 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10580 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10581 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10582 				 handle,
10583 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10584 			mpt3sas_transport_update_links(ioc, sas_address, handle,
10585 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10586 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10587 			retry_count = 0;
10588 			/* This will retry adding the end device.
10589 			 * _scsih_add_device() will decide on retries and
10590 			 * return "1" when it should be retried
10591 			 */
10592 			while (_scsih_add_device(ioc, handle, retry_count++,
10593 			    0)) {
10594 				ssleep(1);
10595 			}
10596 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10597 				 handle,
10598 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10599 		}
10600 	}
10601 	ioc_info(ioc, "\tscan devices: end devices complete\n");
10602 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10603 
10604 	/* pcie devices */
10605 	handle = 0xFFFF;
10606 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10607 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10608 		handle))) {
10609 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10610 				& MPI2_IOCSTATUS_MASK;
10611 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10612 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10613 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10614 			break;
10615 		}
10616 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10617 		if (!(_scsih_is_nvme_pciescsi_device(
10618 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10619 			continue;
10620 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10621 				le64_to_cpu(pcie_device_pg0.WWID));
10622 		if (pcie_device) {
10623 			pcie_device_put(pcie_device);
10624 			continue;
10625 		}
10626 		retry_count = 0;
10627 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10628 		_scsih_pcie_add_device(ioc, handle);
10629 
10630 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10631 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10632 	}
10633 
10634 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10635 	ioc_info(ioc, "scan devices: complete\n");
10636 }
10637 
10638 /**
10639  * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10640  * @ioc: per adapter object
10641  *
10642  * The handler for doing any required cleanup or initialization.
10643  */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)10644 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10645 {
10646 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10647 }
10648 
10649 /**
10650  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10651  *							scsi & tm cmds.
10652  * @ioc: per adapter object
10653  *
10654  * The handler for doing any required cleanup or initialization.
10655  */
10656 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)10657 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10658 {
10659 	dtmprintk(ioc,
10660 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10661 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10662 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10663 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10664 		complete(&ioc->scsih_cmds.done);
10665 	}
10666 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10667 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
10668 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10669 		complete(&ioc->tm_cmds.done);
10670 	}
10671 
10672 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10673 	memset(ioc->device_remove_in_progress, 0,
10674 	       ioc->device_remove_in_progress_sz);
10675 	_scsih_fw_event_cleanup_queue(ioc);
10676 	_scsih_flush_running_cmds(ioc);
10677 }
10678 
10679 /**
10680  * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10681  * @ioc: per adapter object
10682  *
10683  * The handler for doing any required cleanup or initialization.
10684  */
10685 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)10686 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10687 {
10688 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10689 	if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10690 		if (ioc->multipath_on_hba) {
10691 			_scsih_sas_port_refresh(ioc);
10692 			_scsih_update_vphys_after_reset(ioc);
10693 		}
10694 		_scsih_prep_device_scan(ioc);
10695 		_scsih_create_enclosure_list_after_reset(ioc);
10696 		_scsih_search_responding_sas_devices(ioc);
10697 		_scsih_search_responding_pcie_devices(ioc);
10698 		_scsih_search_responding_raid_devices(ioc);
10699 		_scsih_search_responding_expanders(ioc);
10700 		_scsih_error_recovery_delete_devices(ioc);
10701 	}
10702 }
10703 
10704 /**
10705  * _mpt3sas_fw_work - delayed task for processing firmware events
10706  * @ioc: per adapter object
10707  * @fw_event: The fw_event_work object
10708  * Context: user.
10709  */
10710 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10711 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10712 {
10713 	ioc->current_event = fw_event;
10714 	_scsih_fw_event_del_from_list(ioc, fw_event);
10715 
10716 	/* the queue is being flushed so ignore this event */
10717 	if (ioc->remove_host || ioc->pci_error_recovery) {
10718 		fw_event_work_put(fw_event);
10719 		ioc->current_event = NULL;
10720 		return;
10721 	}
10722 
10723 	switch (fw_event->event) {
10724 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
10725 		mpt3sas_process_trigger_data(ioc,
10726 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10727 			fw_event->event_data);
10728 		break;
10729 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10730 		while (scsi_host_in_recovery(ioc->shost) ||
10731 					 ioc->shost_recovery) {
10732 			/*
10733 			 * If we're unloading or cancelling the work, bail.
10734 			 * Otherwise, this can become an infinite loop.
10735 			 */
10736 			if (ioc->remove_host || ioc->fw_events_cleanup)
10737 				goto out;
10738 			ssleep(1);
10739 		}
10740 		_scsih_remove_unresponding_devices(ioc);
10741 		_scsih_del_dirty_vphy(ioc);
10742 		_scsih_del_dirty_port_entries(ioc);
10743 		if (ioc->is_gen35_ioc)
10744 			_scsih_update_device_qdepth(ioc);
10745 		_scsih_scan_for_devices_after_reset(ioc);
10746 		/*
10747 		 * If diag reset has occurred during the driver load
10748 		 * then driver has to complete the driver load operation
10749 		 * by executing the following items:
10750 		 *- Register the devices from sas_device_init_list to SML
10751 		 *- clear is_driver_loading flag,
10752 		 *- start the watchdog thread.
10753 		 * In happy driver load path, above things are taken care of when
10754 		 * driver executes scsih_scan_finished().
10755 		 */
10756 		if (ioc->is_driver_loading)
10757 			_scsih_complete_devices_scanning(ioc);
10758 		_scsih_set_nvme_max_shutdown_latency(ioc);
10759 		break;
10760 	case MPT3SAS_PORT_ENABLE_COMPLETE:
10761 		ioc->start_scan = 0;
10762 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
10763 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10764 			    missing_delay[1]);
10765 		dewtprintk(ioc,
10766 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
10767 		break;
10768 	case MPT3SAS_TURN_ON_PFA_LED:
10769 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10770 		break;
10771 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10772 		_scsih_sas_topology_change_event(ioc, fw_event);
10773 		break;
10774 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10775 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10776 			_scsih_sas_device_status_change_event_debug(ioc,
10777 			    (Mpi2EventDataSasDeviceStatusChange_t *)
10778 			    fw_event->event_data);
10779 		break;
10780 	case MPI2_EVENT_SAS_DISCOVERY:
10781 		_scsih_sas_discovery_event(ioc, fw_event);
10782 		break;
10783 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10784 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
10785 		break;
10786 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10787 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
10788 		break;
10789 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10790 		_scsih_sas_enclosure_dev_status_change_event(ioc,
10791 		    fw_event);
10792 		break;
10793 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10794 		_scsih_sas_ir_config_change_event(ioc, fw_event);
10795 		break;
10796 	case MPI2_EVENT_IR_VOLUME:
10797 		_scsih_sas_ir_volume_event(ioc, fw_event);
10798 		break;
10799 	case MPI2_EVENT_IR_PHYSICAL_DISK:
10800 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
10801 		break;
10802 	case MPI2_EVENT_IR_OPERATION_STATUS:
10803 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
10804 		break;
10805 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10806 		_scsih_pcie_device_status_change_event(ioc, fw_event);
10807 		break;
10808 	case MPI2_EVENT_PCIE_ENUMERATION:
10809 		_scsih_pcie_enumeration_event(ioc, fw_event);
10810 		break;
10811 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10812 		_scsih_pcie_topology_change_event(ioc, fw_event);
10813 		break;
10814 	}
10815 out:
10816 	fw_event_work_put(fw_event);
10817 	ioc->current_event = NULL;
10818 }
10819 
10820 /**
10821  * _firmware_event_work
10822  * @work: The fw_event_work object
10823  * Context: user.
10824  *
10825  * wrappers for the work thread handling firmware events
10826  */
10827 
10828 static void
_firmware_event_work(struct work_struct * work)10829 _firmware_event_work(struct work_struct *work)
10830 {
10831 	struct fw_event_work *fw_event = container_of(work,
10832 	    struct fw_event_work, work);
10833 
10834 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
10835 }
10836 
10837 /**
10838  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10839  * @ioc: per adapter object
10840  * @msix_index: MSIX table index supplied by the OS
10841  * @reply: reply message frame(lower 32bit addr)
10842  * Context: interrupt.
10843  *
10844  * This function merely adds a new work task into ioc->firmware_event_thread.
10845  * The tasks are worked from _firmware_event_work in user context.
10846  *
10847  * Return: 1 meaning mf should be freed from _base_interrupt
10848  *         0 means the mf is freed from this function.
10849  */
10850 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)10851 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10852 	u32 reply)
10853 {
10854 	struct fw_event_work *fw_event;
10855 	Mpi2EventNotificationReply_t *mpi_reply;
10856 	u16 event;
10857 	u16 sz;
10858 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10859 
10860 	/* events turned off due to host reset */
10861 	if (ioc->pci_error_recovery)
10862 		return 1;
10863 
10864 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10865 
10866 	if (unlikely(!mpi_reply)) {
10867 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10868 			__FILE__, __LINE__, __func__);
10869 		return 1;
10870 	}
10871 
10872 	event = le16_to_cpu(mpi_reply->Event);
10873 
10874 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10875 		mpt3sas_trigger_event(ioc, event, 0);
10876 
10877 	switch (event) {
10878 	/* handle these */
10879 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10880 	{
10881 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10882 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
10883 		    mpi_reply->EventData;
10884 
10885 		if (baen_data->Primitive !=
10886 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10887 			return 1;
10888 
10889 		if (ioc->broadcast_aen_busy) {
10890 			ioc->broadcast_aen_pending++;
10891 			return 1;
10892 		} else
10893 			ioc->broadcast_aen_busy = 1;
10894 		break;
10895 	}
10896 
10897 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10898 		_scsih_check_topo_delete_events(ioc,
10899 		    (Mpi2EventDataSasTopologyChangeList_t *)
10900 		    mpi_reply->EventData);
10901 		/*
10902 		 * No need to add the topology change list
10903 		 * event to fw event work queue when
10904 		 * diag reset is going on. Since during diag
10905 		 * reset driver scan the devices by reading
10906 		 * sas device page0's not by processing the
10907 		 * events.
10908 		 */
10909 		if (ioc->shost_recovery)
10910 			return 1;
10911 		break;
10912 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10913 	_scsih_check_pcie_topo_remove_events(ioc,
10914 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
10915 		    mpi_reply->EventData);
10916 		if (ioc->shost_recovery)
10917 			return 1;
10918 		break;
10919 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10920 		_scsih_check_ir_config_unhide_events(ioc,
10921 		    (Mpi2EventDataIrConfigChangeList_t *)
10922 		    mpi_reply->EventData);
10923 		break;
10924 	case MPI2_EVENT_IR_VOLUME:
10925 		_scsih_check_volume_delete_events(ioc,
10926 		    (Mpi2EventDataIrVolume_t *)
10927 		    mpi_reply->EventData);
10928 		break;
10929 	case MPI2_EVENT_LOG_ENTRY_ADDED:
10930 	{
10931 		Mpi2EventDataLogEntryAdded_t *log_entry;
10932 		u32 log_code;
10933 
10934 		if (!ioc->is_warpdrive)
10935 			break;
10936 
10937 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
10938 		    mpi_reply->EventData;
10939 		log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10940 
10941 		if (le16_to_cpu(log_entry->LogEntryQualifier)
10942 		    != MPT2_WARPDRIVE_LOGENTRY)
10943 			break;
10944 
10945 		switch (log_code) {
10946 		case MPT2_WARPDRIVE_LC_SSDT:
10947 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10948 			break;
10949 		case MPT2_WARPDRIVE_LC_SSDLW:
10950 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10951 			break;
10952 		case MPT2_WARPDRIVE_LC_SSDLF:
10953 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10954 			break;
10955 		case MPT2_WARPDRIVE_LC_BRMF:
10956 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10957 			break;
10958 		}
10959 
10960 		break;
10961 	}
10962 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10963 		_scsih_sas_device_status_change_event(ioc,
10964 		    (Mpi2EventDataSasDeviceStatusChange_t *)
10965 		    mpi_reply->EventData);
10966 		break;
10967 	case MPI2_EVENT_IR_OPERATION_STATUS:
10968 	case MPI2_EVENT_SAS_DISCOVERY:
10969 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10970 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10971 	case MPI2_EVENT_IR_PHYSICAL_DISK:
10972 	case MPI2_EVENT_PCIE_ENUMERATION:
10973 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10974 		break;
10975 
10976 	case MPI2_EVENT_TEMP_THRESHOLD:
10977 		_scsih_temp_threshold_events(ioc,
10978 			(Mpi2EventDataTemperature_t *)
10979 			mpi_reply->EventData);
10980 		break;
10981 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10982 		ActiveCableEventData =
10983 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10984 		switch (ActiveCableEventData->ReasonCode) {
10985 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10986 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10987 				   ActiveCableEventData->ReceptacleID);
10988 			pr_notice("cannot be powered and devices connected\n");
10989 			pr_notice("to this active cable will not be seen\n");
10990 			pr_notice("This active cable requires %d mW of power\n",
10991 			    le32_to_cpu(
10992 			    ActiveCableEventData->ActiveCablePowerRequirement));
10993 			break;
10994 
10995 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10996 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10997 				   ActiveCableEventData->ReceptacleID);
10998 			pr_notice(
10999 			    "is not running at optimal speed(12 Gb/s rate)\n");
11000 			break;
11001 		}
11002 
11003 		break;
11004 
11005 	default: /* ignore the rest */
11006 		return 1;
11007 	}
11008 
11009 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
11010 	fw_event = alloc_fw_event_work(sz);
11011 	if (!fw_event) {
11012 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11013 			__FILE__, __LINE__, __func__);
11014 		return 1;
11015 	}
11016 
11017 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11018 	fw_event->ioc = ioc;
11019 	fw_event->VF_ID = mpi_reply->VF_ID;
11020 	fw_event->VP_ID = mpi_reply->VP_ID;
11021 	fw_event->event = event;
11022 	_scsih_fw_event_add(ioc, fw_event);
11023 	fw_event_work_put(fw_event);
11024 	return 1;
11025 }
11026 
11027 /**
11028  * _scsih_expander_node_remove - removing expander device from list.
11029  * @ioc: per adapter object
11030  * @sas_expander: the sas_device object
11031  *
11032  * Removing object and freeing associated memory from the
11033  * ioc->sas_expander_list.
11034  */
11035 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)11036 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11037 	struct _sas_node *sas_expander)
11038 {
11039 	struct _sas_port *mpt3sas_port, *next;
11040 	unsigned long flags;
11041 	int port_id;
11042 
11043 	/* remove sibling ports attached to this expander */
11044 	list_for_each_entry_safe(mpt3sas_port, next,
11045 	   &sas_expander->sas_port_list, port_list) {
11046 		if (ioc->shost_recovery)
11047 			return;
11048 		if (mpt3sas_port->remote_identify.device_type ==
11049 		    SAS_END_DEVICE)
11050 			mpt3sas_device_remove_by_sas_address(ioc,
11051 			    mpt3sas_port->remote_identify.sas_address,
11052 			    mpt3sas_port->hba_port);
11053 		else if (mpt3sas_port->remote_identify.device_type ==
11054 		    SAS_EDGE_EXPANDER_DEVICE ||
11055 		    mpt3sas_port->remote_identify.device_type ==
11056 		    SAS_FANOUT_EXPANDER_DEVICE)
11057 			mpt3sas_expander_remove(ioc,
11058 			    mpt3sas_port->remote_identify.sas_address,
11059 			    mpt3sas_port->hba_port);
11060 	}
11061 
11062 	port_id = sas_expander->port->port_id;
11063 
11064 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11065 	    sas_expander->sas_address_parent, sas_expander->port);
11066 
11067 	ioc_info(ioc,
11068 	    "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11069 	    sas_expander->handle, (unsigned long long)
11070 	    sas_expander->sas_address,
11071 	    port_id);
11072 
11073 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
11074 	list_del(&sas_expander->list);
11075 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11076 
11077 	kfree(sas_expander->phy);
11078 	kfree(sas_expander);
11079 }
11080 
11081 /**
11082  * _scsih_nvme_shutdown - NVMe shutdown notification
11083  * @ioc: per adapter object
11084  *
11085  * Sending IoUnitControl request with shutdown operation code to alert IOC that
11086  * the host system is shutting down so that IOC can issue NVMe shutdown to
11087  * NVMe drives attached to it.
11088  */
11089 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)11090 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11091 {
11092 	Mpi26IoUnitControlRequest_t *mpi_request;
11093 	Mpi26IoUnitControlReply_t *mpi_reply;
11094 	u16 smid;
11095 
11096 	/* are there any NVMe devices ? */
11097 	if (list_empty(&ioc->pcie_device_list))
11098 		return;
11099 
11100 	mutex_lock(&ioc->scsih_cmds.mutex);
11101 
11102 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11103 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11104 		goto out;
11105 	}
11106 
11107 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11108 
11109 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11110 	if (!smid) {
11111 		ioc_err(ioc,
11112 		    "%s: failed obtaining a smid\n", __func__);
11113 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11114 		goto out;
11115 	}
11116 
11117 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11118 	ioc->scsih_cmds.smid = smid;
11119 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11120 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11121 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11122 
11123 	init_completion(&ioc->scsih_cmds.done);
11124 	ioc->put_smid_default(ioc, smid);
11125 	/* Wait for max_shutdown_latency seconds */
11126 	ioc_info(ioc,
11127 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11128 		ioc->max_shutdown_latency);
11129 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
11130 			ioc->max_shutdown_latency*HZ);
11131 
11132 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11133 		ioc_err(ioc, "%s: timeout\n", __func__);
11134 		goto out;
11135 	}
11136 
11137 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11138 		mpi_reply = ioc->scsih_cmds.reply;
11139 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
11140 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
11141 			le16_to_cpu(mpi_reply->IOCStatus),
11142 			le32_to_cpu(mpi_reply->IOCLogInfo));
11143 	}
11144  out:
11145 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11146 	mutex_unlock(&ioc->scsih_cmds.mutex);
11147 }
11148 
11149 
11150 /**
11151  * _scsih_ir_shutdown - IR shutdown notification
11152  * @ioc: per adapter object
11153  *
11154  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11155  * the host system is shutting down.
11156  */
11157 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)11158 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11159 {
11160 	Mpi2RaidActionRequest_t *mpi_request;
11161 	Mpi2RaidActionReply_t *mpi_reply;
11162 	u16 smid;
11163 
11164 	/* is IR firmware build loaded ? */
11165 	if (!ioc->ir_firmware)
11166 		return;
11167 
11168 	/* are there any volumes ? */
11169 	if (list_empty(&ioc->raid_device_list))
11170 		return;
11171 
11172 	mutex_lock(&ioc->scsih_cmds.mutex);
11173 
11174 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11175 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11176 		goto out;
11177 	}
11178 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11179 
11180 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11181 	if (!smid) {
11182 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11183 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11184 		goto out;
11185 	}
11186 
11187 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11188 	ioc->scsih_cmds.smid = smid;
11189 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11190 
11191 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11192 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11193 
11194 	if (!ioc->hide_ir_msg)
11195 		ioc_info(ioc, "IR shutdown (sending)\n");
11196 	init_completion(&ioc->scsih_cmds.done);
11197 	ioc->put_smid_default(ioc, smid);
11198 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11199 
11200 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11201 		ioc_err(ioc, "%s: timeout\n", __func__);
11202 		goto out;
11203 	}
11204 
11205 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11206 		mpi_reply = ioc->scsih_cmds.reply;
11207 		if (!ioc->hide_ir_msg)
11208 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11209 				 le16_to_cpu(mpi_reply->IOCStatus),
11210 				 le32_to_cpu(mpi_reply->IOCLogInfo));
11211 	}
11212 
11213  out:
11214 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11215 	mutex_unlock(&ioc->scsih_cmds.mutex);
11216 }
11217 
11218 /**
11219  * _scsih_get_shost_and_ioc - get shost and ioc
11220  *			and verify whether they are NULL or not
11221  * @pdev: PCI device struct
11222  * @shost: address of scsi host pointer
11223  * @ioc: address of HBA adapter pointer
11224  *
11225  * Return zero if *shost and *ioc are not NULL otherwise return error number.
11226  */
11227 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)11228 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11229 	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11230 {
11231 	*shost = pci_get_drvdata(pdev);
11232 	if (*shost == NULL) {
11233 		dev_err(&pdev->dev, "pdev's driver data is null\n");
11234 		return -ENXIO;
11235 	}
11236 
11237 	*ioc = shost_priv(*shost);
11238 	if (*ioc == NULL) {
11239 		dev_err(&pdev->dev, "shost's private data is null\n");
11240 		return -ENXIO;
11241 	}
11242 
11243 	return 0;
11244 }
11245 
11246 /**
11247  * scsih_remove - detach and remove add host
11248  * @pdev: PCI device struct
11249  *
11250  * Routine called when unloading the driver.
11251  */
scsih_remove(struct pci_dev * pdev)11252 static void scsih_remove(struct pci_dev *pdev)
11253 {
11254 	struct Scsi_Host *shost;
11255 	struct MPT3SAS_ADAPTER *ioc;
11256 	struct _sas_port *mpt3sas_port, *next_port;
11257 	struct _raid_device *raid_device, *next;
11258 	struct MPT3SAS_TARGET *sas_target_priv_data;
11259 	struct _pcie_device *pcie_device, *pcienext;
11260 	struct workqueue_struct	*wq;
11261 	unsigned long flags;
11262 	Mpi2ConfigReply_t mpi_reply;
11263 	struct hba_port *port, *port_next;
11264 
11265 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11266 		return;
11267 
11268 	ioc->remove_host = 1;
11269 
11270 	if (!pci_device_is_present(pdev)) {
11271 		mpt3sas_base_pause_mq_polling(ioc);
11272 		_scsih_flush_running_cmds(ioc);
11273 	}
11274 
11275 	_scsih_fw_event_cleanup_queue(ioc);
11276 
11277 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11278 	wq = ioc->firmware_event_thread;
11279 	ioc->firmware_event_thread = NULL;
11280 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11281 	if (wq)
11282 		destroy_workqueue(wq);
11283 	/*
11284 	 * Copy back the unmodified ioc page1. so that on next driver load,
11285 	 * current modified changes on ioc page1 won't take effect.
11286 	 */
11287 	if (ioc->is_aero_ioc)
11288 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11289 				&ioc->ioc_pg1_copy);
11290 	/* release all the volumes */
11291 	_scsih_ir_shutdown(ioc);
11292 	mpt3sas_destroy_debugfs(ioc);
11293 	sas_remove_host(shost);
11294 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11295 	    list) {
11296 		if (raid_device->starget) {
11297 			sas_target_priv_data =
11298 			    raid_device->starget->hostdata;
11299 			sas_target_priv_data->deleted = 1;
11300 			scsi_remove_target(&raid_device->starget->dev);
11301 		}
11302 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11303 			 raid_device->handle, (u64)raid_device->wwid);
11304 		_scsih_raid_device_remove(ioc, raid_device);
11305 	}
11306 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11307 		list) {
11308 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11309 		list_del_init(&pcie_device->list);
11310 		pcie_device_put(pcie_device);
11311 	}
11312 
11313 	/* free ports attached to the sas_host */
11314 	list_for_each_entry_safe(mpt3sas_port, next_port,
11315 	   &ioc->sas_hba.sas_port_list, port_list) {
11316 		if (mpt3sas_port->remote_identify.device_type ==
11317 		    SAS_END_DEVICE)
11318 			mpt3sas_device_remove_by_sas_address(ioc,
11319 			    mpt3sas_port->remote_identify.sas_address,
11320 			    mpt3sas_port->hba_port);
11321 		else if (mpt3sas_port->remote_identify.device_type ==
11322 		    SAS_EDGE_EXPANDER_DEVICE ||
11323 		    mpt3sas_port->remote_identify.device_type ==
11324 		    SAS_FANOUT_EXPANDER_DEVICE)
11325 			mpt3sas_expander_remove(ioc,
11326 			    mpt3sas_port->remote_identify.sas_address,
11327 			    mpt3sas_port->hba_port);
11328 	}
11329 
11330 	list_for_each_entry_safe(port, port_next,
11331 	    &ioc->port_table_list, list) {
11332 		list_del(&port->list);
11333 		kfree(port);
11334 	}
11335 
11336 	/* free phys attached to the sas_host */
11337 	if (ioc->sas_hba.num_phys) {
11338 		kfree(ioc->sas_hba.phy);
11339 		ioc->sas_hba.phy = NULL;
11340 		ioc->sas_hba.num_phys = 0;
11341 	}
11342 
11343 	mpt3sas_base_detach(ioc);
11344 	mpt3sas_ctl_release(ioc);
11345 	spin_lock(&gioc_lock);
11346 	list_del(&ioc->list);
11347 	spin_unlock(&gioc_lock);
11348 	scsi_host_put(shost);
11349 }
11350 
11351 /**
11352  * scsih_shutdown - routine call during system shutdown
11353  * @pdev: PCI device struct
11354  */
11355 static void
scsih_shutdown(struct pci_dev * pdev)11356 scsih_shutdown(struct pci_dev *pdev)
11357 {
11358 	struct Scsi_Host *shost;
11359 	struct MPT3SAS_ADAPTER *ioc;
11360 	struct workqueue_struct	*wq;
11361 	unsigned long flags;
11362 	Mpi2ConfigReply_t mpi_reply;
11363 
11364 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11365 		return;
11366 
11367 	ioc->remove_host = 1;
11368 
11369 	if (!pci_device_is_present(pdev)) {
11370 		mpt3sas_base_pause_mq_polling(ioc);
11371 		_scsih_flush_running_cmds(ioc);
11372 	}
11373 
11374 	_scsih_fw_event_cleanup_queue(ioc);
11375 
11376 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11377 	wq = ioc->firmware_event_thread;
11378 	ioc->firmware_event_thread = NULL;
11379 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11380 	if (wq)
11381 		destroy_workqueue(wq);
11382 	/*
11383 	 * Copy back the unmodified ioc page1 so that on next driver load,
11384 	 * current modified changes on ioc page1 won't take effect.
11385 	 */
11386 	if (ioc->is_aero_ioc)
11387 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11388 				&ioc->ioc_pg1_copy);
11389 
11390 	_scsih_ir_shutdown(ioc);
11391 	_scsih_nvme_shutdown(ioc);
11392 	mpt3sas_base_mask_interrupts(ioc);
11393 	mpt3sas_base_stop_watchdog(ioc);
11394 	ioc->shost_recovery = 1;
11395 	mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11396 	ioc->shost_recovery = 0;
11397 	mpt3sas_base_free_irq(ioc);
11398 	mpt3sas_base_disable_msix(ioc);
11399 }
11400 
11401 
11402 /**
11403  * _scsih_probe_boot_devices - reports 1st device
11404  * @ioc: per adapter object
11405  *
11406  * If specified in bios page 2, this routine reports the 1st
11407  * device scsi-ml or sas transport for persistent boot device
11408  * purposes.  Please refer to function _scsih_determine_boot_device()
11409  */
11410 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)11411 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11412 {
11413 	u32 channel;
11414 	void *device;
11415 	struct _sas_device *sas_device;
11416 	struct _raid_device *raid_device;
11417 	struct _pcie_device *pcie_device;
11418 	u16 handle;
11419 	u64 sas_address_parent;
11420 	u64 sas_address;
11421 	unsigned long flags;
11422 	int rc;
11423 	int tid;
11424 	struct hba_port *port;
11425 
11426 	 /* no Bios, return immediately */
11427 	if (!ioc->bios_pg3.BiosVersion)
11428 		return;
11429 
11430 	device = NULL;
11431 	if (ioc->req_boot_device.device) {
11432 		device =  ioc->req_boot_device.device;
11433 		channel = ioc->req_boot_device.channel;
11434 	} else if (ioc->req_alt_boot_device.device) {
11435 		device =  ioc->req_alt_boot_device.device;
11436 		channel = ioc->req_alt_boot_device.channel;
11437 	} else if (ioc->current_boot_device.device) {
11438 		device =  ioc->current_boot_device.device;
11439 		channel = ioc->current_boot_device.channel;
11440 	}
11441 
11442 	if (!device)
11443 		return;
11444 
11445 	if (channel == RAID_CHANNEL) {
11446 		raid_device = device;
11447 		/*
11448 		 * If this boot vd is already registered with SML then
11449 		 * no need to register it again as part of device scanning
11450 		 * after diag reset during driver load operation.
11451 		 */
11452 		if (raid_device->starget)
11453 			return;
11454 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11455 		    raid_device->id, 0);
11456 		if (rc)
11457 			_scsih_raid_device_remove(ioc, raid_device);
11458 	} else if (channel == PCIE_CHANNEL) {
11459 		pcie_device = device;
11460 		/*
11461 		 * If this boot NVMe device is already registered with SML then
11462 		 * no need to register it again as part of device scanning
11463 		 * after diag reset during driver load operation.
11464 		 */
11465 		if (pcie_device->starget)
11466 			return;
11467 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11468 		tid = pcie_device->id;
11469 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11470 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11471 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11472 		if (rc)
11473 			_scsih_pcie_device_remove(ioc, pcie_device);
11474 	} else {
11475 		sas_device = device;
11476 		/*
11477 		 * If this boot sas/sata device is already registered with SML
11478 		 * then no need to register it again as part of device scanning
11479 		 * after diag reset during driver load operation.
11480 		 */
11481 		if (sas_device->starget)
11482 			return;
11483 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
11484 		handle = sas_device->handle;
11485 		sas_address_parent = sas_device->sas_address_parent;
11486 		sas_address = sas_device->sas_address;
11487 		port = sas_device->port;
11488 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
11489 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11490 
11491 		if (ioc->hide_drives)
11492 			return;
11493 
11494 		if (!port)
11495 			return;
11496 
11497 		if (!mpt3sas_transport_port_add(ioc, handle,
11498 		    sas_address_parent, port)) {
11499 			_scsih_sas_device_remove(ioc, sas_device);
11500 		} else if (!sas_device->starget) {
11501 			if (!ioc->is_driver_loading) {
11502 				mpt3sas_transport_port_remove(ioc,
11503 				    sas_address,
11504 				    sas_address_parent, port);
11505 				_scsih_sas_device_remove(ioc, sas_device);
11506 			}
11507 		}
11508 	}
11509 }
11510 
11511 /**
11512  * _scsih_probe_raid - reporting raid volumes to scsi-ml
11513  * @ioc: per adapter object
11514  *
11515  * Called during initial loading of the driver.
11516  */
11517 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)11518 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11519 {
11520 	struct _raid_device *raid_device, *raid_next;
11521 	int rc;
11522 
11523 	list_for_each_entry_safe(raid_device, raid_next,
11524 	    &ioc->raid_device_list, list) {
11525 		if (raid_device->starget)
11526 			continue;
11527 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11528 		    raid_device->id, 0);
11529 		if (rc)
11530 			_scsih_raid_device_remove(ioc, raid_device);
11531 	}
11532 }
11533 
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)11534 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11535 {
11536 	struct _sas_device *sas_device = NULL;
11537 	unsigned long flags;
11538 
11539 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11540 	if (!list_empty(&ioc->sas_device_init_list)) {
11541 		sas_device = list_first_entry(&ioc->sas_device_init_list,
11542 				struct _sas_device, list);
11543 		sas_device_get(sas_device);
11544 	}
11545 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11546 
11547 	return sas_device;
11548 }
11549 
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)11550 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11551 		struct _sas_device *sas_device)
11552 {
11553 	unsigned long flags;
11554 
11555 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11556 
11557 	/*
11558 	 * Since we dropped the lock during the call to port_add(), we need to
11559 	 * be careful here that somebody else didn't move or delete this item
11560 	 * while we were busy with other things.
11561 	 *
11562 	 * If it was on the list, we need a put() for the reference the list
11563 	 * had. Either way, we need a get() for the destination list.
11564 	 */
11565 	if (!list_empty(&sas_device->list)) {
11566 		list_del_init(&sas_device->list);
11567 		sas_device_put(sas_device);
11568 	}
11569 
11570 	sas_device_get(sas_device);
11571 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
11572 
11573 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11574 }
11575 
11576 /**
11577  * _scsih_probe_sas - reporting sas devices to sas transport
11578  * @ioc: per adapter object
11579  *
11580  * Called during initial loading of the driver.
11581  */
11582 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)11583 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11584 {
11585 	struct _sas_device *sas_device;
11586 
11587 	if (ioc->hide_drives)
11588 		return;
11589 
11590 	while ((sas_device = get_next_sas_device(ioc))) {
11591 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11592 		    sas_device->sas_address_parent, sas_device->port)) {
11593 			_scsih_sas_device_remove(ioc, sas_device);
11594 			sas_device_put(sas_device);
11595 			continue;
11596 		} else if (!sas_device->starget) {
11597 			/*
11598 			 * When asyn scanning is enabled, its not possible to
11599 			 * remove devices while scanning is turned on due to an
11600 			 * oops in scsi_sysfs_add_sdev()->add_device()->
11601 			 * sysfs_addrm_start()
11602 			 */
11603 			if (!ioc->is_driver_loading) {
11604 				mpt3sas_transport_port_remove(ioc,
11605 				    sas_device->sas_address,
11606 				    sas_device->sas_address_parent,
11607 				    sas_device->port);
11608 				_scsih_sas_device_remove(ioc, sas_device);
11609 				sas_device_put(sas_device);
11610 				continue;
11611 			}
11612 		}
11613 		sas_device_make_active(ioc, sas_device);
11614 		sas_device_put(sas_device);
11615 	}
11616 }
11617 
11618 /**
11619  * get_next_pcie_device - Get the next pcie device
11620  * @ioc: per adapter object
11621  *
11622  * Get the next pcie device from pcie_device_init_list list.
11623  *
11624  * Return: pcie device structure if pcie_device_init_list list is not empty
11625  * otherwise returns NULL
11626  */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)11627 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11628 {
11629 	struct _pcie_device *pcie_device = NULL;
11630 	unsigned long flags;
11631 
11632 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11633 	if (!list_empty(&ioc->pcie_device_init_list)) {
11634 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11635 				struct _pcie_device, list);
11636 		pcie_device_get(pcie_device);
11637 	}
11638 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11639 
11640 	return pcie_device;
11641 }
11642 
11643 /**
11644  * pcie_device_make_active - Add pcie device to pcie_device_list list
11645  * @ioc: per adapter object
11646  * @pcie_device: pcie device object
11647  *
11648  * Add the pcie device which has registered with SCSI Transport Later to
11649  * pcie_device_list list
11650  */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)11651 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11652 		struct _pcie_device *pcie_device)
11653 {
11654 	unsigned long flags;
11655 
11656 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11657 
11658 	if (!list_empty(&pcie_device->list)) {
11659 		list_del_init(&pcie_device->list);
11660 		pcie_device_put(pcie_device);
11661 	}
11662 	pcie_device_get(pcie_device);
11663 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11664 
11665 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11666 }
11667 
11668 /**
11669  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11670  * @ioc: per adapter object
11671  *
11672  * Called during initial loading of the driver.
11673  */
11674 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)11675 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11676 {
11677 	struct _pcie_device *pcie_device;
11678 	int rc;
11679 
11680 	/* PCIe Device List */
11681 	while ((pcie_device = get_next_pcie_device(ioc))) {
11682 		if (pcie_device->starget) {
11683 			pcie_device_put(pcie_device);
11684 			continue;
11685 		}
11686 		if (pcie_device->access_status ==
11687 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11688 			pcie_device_make_active(ioc, pcie_device);
11689 			pcie_device_put(pcie_device);
11690 			continue;
11691 		}
11692 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11693 			pcie_device->id, 0);
11694 		if (rc) {
11695 			_scsih_pcie_device_remove(ioc, pcie_device);
11696 			pcie_device_put(pcie_device);
11697 			continue;
11698 		} else if (!pcie_device->starget) {
11699 			/*
11700 			 * When async scanning is enabled, its not possible to
11701 			 * remove devices while scanning is turned on due to an
11702 			 * oops in scsi_sysfs_add_sdev()->add_device()->
11703 			 * sysfs_addrm_start()
11704 			 */
11705 			if (!ioc->is_driver_loading) {
11706 			/* TODO-- Need to find out whether this condition will
11707 			 * occur or not
11708 			 */
11709 				_scsih_pcie_device_remove(ioc, pcie_device);
11710 				pcie_device_put(pcie_device);
11711 				continue;
11712 			}
11713 		}
11714 		pcie_device_make_active(ioc, pcie_device);
11715 		pcie_device_put(pcie_device);
11716 	}
11717 }
11718 
11719 /**
11720  * _scsih_probe_devices - probing for devices
11721  * @ioc: per adapter object
11722  *
11723  * Called during initial loading of the driver.
11724  */
11725 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)11726 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11727 {
11728 	u16 volume_mapping_flags;
11729 
11730 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11731 		return;  /* return when IOC doesn't support initiator mode */
11732 
11733 	_scsih_probe_boot_devices(ioc);
11734 
11735 	if (ioc->ir_firmware) {
11736 		volume_mapping_flags =
11737 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11738 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11739 		if (volume_mapping_flags ==
11740 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11741 			_scsih_probe_raid(ioc);
11742 			_scsih_probe_sas(ioc);
11743 		} else {
11744 			_scsih_probe_sas(ioc);
11745 			_scsih_probe_raid(ioc);
11746 		}
11747 	} else {
11748 		_scsih_probe_sas(ioc);
11749 		_scsih_probe_pcie(ioc);
11750 	}
11751 }
11752 
11753 /**
11754  * scsih_scan_start - scsi lld callback for .scan_start
11755  * @shost: SCSI host pointer
11756  *
11757  * The shost has the ability to discover targets on its own instead
11758  * of scanning the entire bus.  In our implemention, we will kick off
11759  * firmware discovery.
11760  */
11761 static void
scsih_scan_start(struct Scsi_Host * shost)11762 scsih_scan_start(struct Scsi_Host *shost)
11763 {
11764 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11765 	int rc;
11766 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11767 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11768 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11769 		mpt3sas_enable_diag_buffer(ioc, 1);
11770 
11771 	if (disable_discovery > 0)
11772 		return;
11773 
11774 	ioc->start_scan = 1;
11775 	rc = mpt3sas_port_enable(ioc);
11776 
11777 	if (rc != 0)
11778 		ioc_info(ioc, "port enable: FAILED\n");
11779 }
11780 
11781 /**
11782  * _scsih_complete_devices_scanning - add the devices to sml and
11783  * complete ioc initialization.
11784  * @ioc: per adapter object
11785  *
11786  * Return nothing.
11787  */
_scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER * ioc)11788 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11789 {
11790 
11791 	if (ioc->wait_for_discovery_to_complete) {
11792 		ioc->wait_for_discovery_to_complete = 0;
11793 		_scsih_probe_devices(ioc);
11794 	}
11795 
11796 	mpt3sas_base_start_watchdog(ioc);
11797 	ioc->is_driver_loading = 0;
11798 }
11799 
11800 /**
11801  * scsih_scan_finished - scsi lld callback for .scan_finished
11802  * @shost: SCSI host pointer
11803  * @time: elapsed time of the scan in jiffies
11804  *
11805  * This function will be called periodicallyn until it returns 1 with the
11806  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11807  * we wait for firmware discovery to complete, then return 1.
11808  */
11809 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)11810 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11811 {
11812 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11813 	u32 ioc_state;
11814 	int issue_hard_reset = 0;
11815 
11816 	if (disable_discovery > 0) {
11817 		ioc->is_driver_loading = 0;
11818 		ioc->wait_for_discovery_to_complete = 0;
11819 		return 1;
11820 	}
11821 
11822 	if (time >= (300 * HZ)) {
11823 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11824 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11825 		ioc->is_driver_loading = 0;
11826 		return 1;
11827 	}
11828 
11829 	if (ioc->start_scan) {
11830 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11831 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11832 			mpt3sas_print_fault_code(ioc, ioc_state &
11833 			    MPI2_DOORBELL_DATA_MASK);
11834 			issue_hard_reset = 1;
11835 			goto out;
11836 		} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11837 				MPI2_IOC_STATE_COREDUMP) {
11838 			mpt3sas_base_coredump_info(ioc, ioc_state &
11839 			    MPI2_DOORBELL_DATA_MASK);
11840 			mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11841 			issue_hard_reset = 1;
11842 			goto out;
11843 		}
11844 		return 0;
11845 	}
11846 
11847 	if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11848 		ioc_info(ioc,
11849 		    "port enable: aborted due to diag reset\n");
11850 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11851 		goto out;
11852 	}
11853 	if (ioc->start_scan_failed) {
11854 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11855 			 ioc->start_scan_failed);
11856 		ioc->is_driver_loading = 0;
11857 		ioc->wait_for_discovery_to_complete = 0;
11858 		ioc->remove_host = 1;
11859 		return 1;
11860 	}
11861 
11862 	ioc_info(ioc, "port enable: SUCCESS\n");
11863 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11864 	_scsih_complete_devices_scanning(ioc);
11865 
11866 out:
11867 	if (issue_hard_reset) {
11868 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11869 		if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11870 			ioc->is_driver_loading = 0;
11871 	}
11872 	return 1;
11873 }
11874 
11875 /**
11876  * scsih_map_queues - map reply queues with request queues
11877  * @shost: SCSI host pointer
11878  */
scsih_map_queues(struct Scsi_Host * shost)11879 static void scsih_map_queues(struct Scsi_Host *shost)
11880 {
11881 	struct MPT3SAS_ADAPTER *ioc =
11882 	    (struct MPT3SAS_ADAPTER *)shost->hostdata;
11883 	struct blk_mq_queue_map *map;
11884 	int i, qoff, offset;
11885 	int nr_msix_vectors = ioc->iopoll_q_start_index;
11886 	int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11887 
11888 	if (shost->nr_hw_queues == 1)
11889 		return;
11890 
11891 	for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11892 		map = &shost->tag_set.map[i];
11893 		map->nr_queues = 0;
11894 		offset = 0;
11895 		if (i == HCTX_TYPE_DEFAULT) {
11896 			map->nr_queues =
11897 			    nr_msix_vectors - ioc->high_iops_queues;
11898 			offset = ioc->high_iops_queues;
11899 		} else if (i == HCTX_TYPE_POLL)
11900 			map->nr_queues = iopoll_q_count;
11901 
11902 		if (!map->nr_queues)
11903 			BUG_ON(i == HCTX_TYPE_DEFAULT);
11904 
11905 		/*
11906 		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11907 		 * affinity), so use the regular blk-mq cpu mapping
11908 		 */
11909 		map->queue_offset = qoff;
11910 		if (i != HCTX_TYPE_POLL)
11911 			blk_mq_pci_map_queues(map, ioc->pdev, offset);
11912 		else
11913 			blk_mq_map_queues(map);
11914 
11915 		qoff += map->nr_queues;
11916 	}
11917 }
11918 
11919 /* shost template for SAS 2.0 HBA devices */
11920 static const struct scsi_host_template mpt2sas_driver_template = {
11921 	.module				= THIS_MODULE,
11922 	.name				= "Fusion MPT SAS Host",
11923 	.proc_name			= MPT2SAS_DRIVER_NAME,
11924 	.queuecommand			= scsih_qcmd,
11925 	.target_alloc			= scsih_target_alloc,
11926 	.slave_alloc			= scsih_slave_alloc,
11927 	.device_configure		= scsih_device_configure,
11928 	.target_destroy			= scsih_target_destroy,
11929 	.slave_destroy			= scsih_slave_destroy,
11930 	.scan_finished			= scsih_scan_finished,
11931 	.scan_start			= scsih_scan_start,
11932 	.change_queue_depth		= scsih_change_queue_depth,
11933 	.eh_abort_handler		= scsih_abort,
11934 	.eh_device_reset_handler	= scsih_dev_reset,
11935 	.eh_target_reset_handler	= scsih_target_reset,
11936 	.eh_host_reset_handler		= scsih_host_reset,
11937 	.bios_param			= scsih_bios_param,
11938 	.can_queue			= 1,
11939 	.this_id			= -1,
11940 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
11941 	.max_sectors			= 32767,
11942 	.cmd_per_lun			= 7,
11943 	.shost_groups			= mpt3sas_host_groups,
11944 	.sdev_groups			= mpt3sas_dev_groups,
11945 	.track_queue_depth		= 1,
11946 	.cmd_size			= sizeof(struct scsiio_tracker),
11947 };
11948 
11949 /* raid transport support for SAS 2.0 HBA devices */
11950 static struct raid_function_template mpt2sas_raid_functions = {
11951 	.cookie		= &mpt2sas_driver_template,
11952 	.is_raid	= scsih_is_raid,
11953 	.get_resync	= scsih_get_resync,
11954 	.get_state	= scsih_get_state,
11955 };
11956 
11957 /* shost template for SAS 3.0 HBA devices */
11958 static const struct scsi_host_template mpt3sas_driver_template = {
11959 	.module				= THIS_MODULE,
11960 	.name				= "Fusion MPT SAS Host",
11961 	.proc_name			= MPT3SAS_DRIVER_NAME,
11962 	.queuecommand			= scsih_qcmd,
11963 	.target_alloc			= scsih_target_alloc,
11964 	.slave_alloc			= scsih_slave_alloc,
11965 	.device_configure		= scsih_device_configure,
11966 	.target_destroy			= scsih_target_destroy,
11967 	.slave_destroy			= scsih_slave_destroy,
11968 	.scan_finished			= scsih_scan_finished,
11969 	.scan_start			= scsih_scan_start,
11970 	.change_queue_depth		= scsih_change_queue_depth,
11971 	.eh_abort_handler		= scsih_abort,
11972 	.eh_device_reset_handler	= scsih_dev_reset,
11973 	.eh_target_reset_handler	= scsih_target_reset,
11974 	.eh_host_reset_handler		= scsih_host_reset,
11975 	.bios_param			= scsih_bios_param,
11976 	.can_queue			= 1,
11977 	.this_id			= -1,
11978 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
11979 	.max_sectors			= 32767,
11980 	.max_segment_size		= 0xffffffff,
11981 	.cmd_per_lun			= 128,
11982 	.shost_groups			= mpt3sas_host_groups,
11983 	.sdev_groups			= mpt3sas_dev_groups,
11984 	.track_queue_depth		= 1,
11985 	.cmd_size			= sizeof(struct scsiio_tracker),
11986 	.map_queues			= scsih_map_queues,
11987 	.mq_poll			= mpt3sas_blk_mq_poll,
11988 };
11989 
11990 /* raid transport support for SAS 3.0 HBA devices */
11991 static struct raid_function_template mpt3sas_raid_functions = {
11992 	.cookie		= &mpt3sas_driver_template,
11993 	.is_raid	= scsih_is_raid,
11994 	.get_resync	= scsih_get_resync,
11995 	.get_state	= scsih_get_state,
11996 };
11997 
11998 /**
11999  * _scsih_determine_hba_mpi_version - determine in which MPI version class
12000  *					this device belongs to.
12001  * @pdev: PCI device struct
12002  *
12003  * return MPI2_VERSION for SAS 2.0 HBA devices,
12004  *	MPI25_VERSION for SAS 3.0 HBA devices, and
12005  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
12006  */
12007 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)12008 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
12009 {
12010 
12011 	switch (pdev->device) {
12012 	case MPI2_MFGPAGE_DEVID_SSS6200:
12013 	case MPI2_MFGPAGE_DEVID_SAS2004:
12014 	case MPI2_MFGPAGE_DEVID_SAS2008:
12015 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
12016 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
12017 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
12018 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
12019 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
12020 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
12021 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
12022 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
12023 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
12024 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
12025 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
12026 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
12027 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
12028 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
12029 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12030 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12031 		return MPI2_VERSION;
12032 	case MPI25_MFGPAGE_DEVID_SAS3004:
12033 	case MPI25_MFGPAGE_DEVID_SAS3008:
12034 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
12035 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
12036 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
12037 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
12038 		return MPI25_VERSION;
12039 	case MPI26_MFGPAGE_DEVID_SAS3216:
12040 	case MPI26_MFGPAGE_DEVID_SAS3224:
12041 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
12042 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
12043 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
12044 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
12045 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
12046 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
12047 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
12048 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
12049 	case MPI26_MFGPAGE_DEVID_SAS3508:
12050 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
12051 	case MPI26_MFGPAGE_DEVID_SAS3408:
12052 	case MPI26_MFGPAGE_DEVID_SAS3516:
12053 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
12054 	case MPI26_MFGPAGE_DEVID_SAS3416:
12055 	case MPI26_MFGPAGE_DEVID_SAS3616:
12056 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12057 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12058 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12059 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12060 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12061 	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12062 	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12063 	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12064 	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12065 		return MPI26_VERSION;
12066 	}
12067 	return 0;
12068 }
12069 
12070 /**
12071  * _scsih_probe - attach and add scsi host
12072  * @pdev: PCI device struct
12073  * @id: pci device id
12074  *
12075  * Return: 0 success, anything else error.
12076  */
12077 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)12078 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12079 {
12080 	struct MPT3SAS_ADAPTER *ioc;
12081 	struct Scsi_Host *shost = NULL;
12082 	int rv;
12083 	u16 hba_mpi_version;
12084 	int iopoll_q_count = 0;
12085 
12086 	/* Determine in which MPI version class this pci device belongs */
12087 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12088 	if (hba_mpi_version == 0)
12089 		return -ENODEV;
12090 
12091 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12092 	 * for other generation HBA's return with -ENODEV
12093 	 */
12094 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
12095 		return -ENODEV;
12096 
12097 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12098 	 * for other generation HBA's return with -ENODEV
12099 	 */
12100 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
12101 		|| hba_mpi_version ==  MPI26_VERSION)))
12102 		return -ENODEV;
12103 
12104 	switch (hba_mpi_version) {
12105 	case MPI2_VERSION:
12106 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12107 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12108 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
12109 		shost = scsi_host_alloc(&mpt2sas_driver_template,
12110 		  sizeof(struct MPT3SAS_ADAPTER));
12111 		if (!shost)
12112 			return -ENODEV;
12113 		ioc = shost_priv(shost);
12114 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12115 		ioc->hba_mpi_version_belonged = hba_mpi_version;
12116 		ioc->id = mpt2_ids++;
12117 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12118 		switch (pdev->device) {
12119 		case MPI2_MFGPAGE_DEVID_SSS6200:
12120 			ioc->is_warpdrive = 1;
12121 			ioc->hide_ir_msg = 1;
12122 			break;
12123 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12124 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12125 			ioc->is_mcpu_endpoint = 1;
12126 			break;
12127 		default:
12128 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12129 			break;
12130 		}
12131 
12132 		if (multipath_on_hba == -1 || multipath_on_hba == 0)
12133 			ioc->multipath_on_hba = 0;
12134 		else
12135 			ioc->multipath_on_hba = 1;
12136 
12137 		break;
12138 	case MPI25_VERSION:
12139 	case MPI26_VERSION:
12140 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
12141 		shost = scsi_host_alloc(&mpt3sas_driver_template,
12142 		  sizeof(struct MPT3SAS_ADAPTER));
12143 		if (!shost)
12144 			return -ENODEV;
12145 		ioc = shost_priv(shost);
12146 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12147 		ioc->hba_mpi_version_belonged = hba_mpi_version;
12148 		ioc->id = mpt3_ids++;
12149 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12150 		switch (pdev->device) {
12151 		case MPI26_MFGPAGE_DEVID_SAS3508:
12152 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
12153 		case MPI26_MFGPAGE_DEVID_SAS3408:
12154 		case MPI26_MFGPAGE_DEVID_SAS3516:
12155 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
12156 		case MPI26_MFGPAGE_DEVID_SAS3416:
12157 		case MPI26_MFGPAGE_DEVID_SAS3616:
12158 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12159 			ioc->is_gen35_ioc = 1;
12160 			break;
12161 		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12162 		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12163 			dev_err(&pdev->dev,
12164 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12165 			    pdev->device, pdev->subsystem_vendor,
12166 			    pdev->subsystem_device);
12167 			return 1;
12168 		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12169 		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12170 			dev_err(&pdev->dev,
12171 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12172 			    pdev->device, pdev->subsystem_vendor,
12173 			    pdev->subsystem_device);
12174 			return 1;
12175 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12176 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12177 			dev_info(&pdev->dev,
12178 			    "HBA is in Configurable Secure mode\n");
12179 			fallthrough;
12180 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12181 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12182 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12183 			break;
12184 		default:
12185 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12186 		}
12187 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12188 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12189 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12190 			ioc->combined_reply_queue = 1;
12191 			if (ioc->is_gen35_ioc)
12192 				ioc->combined_reply_index_count =
12193 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12194 			else
12195 				ioc->combined_reply_index_count =
12196 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12197 		}
12198 
12199 		switch (ioc->is_gen35_ioc) {
12200 		case 0:
12201 			if (multipath_on_hba == -1 || multipath_on_hba == 0)
12202 				ioc->multipath_on_hba = 0;
12203 			else
12204 				ioc->multipath_on_hba = 1;
12205 			break;
12206 		case 1:
12207 			if (multipath_on_hba == -1 || multipath_on_hba > 0)
12208 				ioc->multipath_on_hba = 1;
12209 			else
12210 				ioc->multipath_on_hba = 0;
12211 			break;
12212 		default:
12213 			break;
12214 		}
12215 
12216 		break;
12217 	default:
12218 		return -ENODEV;
12219 	}
12220 
12221 	INIT_LIST_HEAD(&ioc->list);
12222 	spin_lock(&gioc_lock);
12223 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12224 	spin_unlock(&gioc_lock);
12225 	ioc->shost = shost;
12226 	ioc->pdev = pdev;
12227 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12228 	ioc->tm_cb_idx = tm_cb_idx;
12229 	ioc->ctl_cb_idx = ctl_cb_idx;
12230 	ioc->base_cb_idx = base_cb_idx;
12231 	ioc->port_enable_cb_idx = port_enable_cb_idx;
12232 	ioc->transport_cb_idx = transport_cb_idx;
12233 	ioc->scsih_cb_idx = scsih_cb_idx;
12234 	ioc->config_cb_idx = config_cb_idx;
12235 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12236 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12237 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12238 	ioc->logging_level = logging_level;
12239 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12240 	/* Host waits for minimum of six seconds */
12241 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12242 	/*
12243 	 * Enable MEMORY MOVE support flag.
12244 	 */
12245 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12246 	/* Enable ADDITIONAL QUERY support flag. */
12247 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12248 
12249 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12250 
12251 	/* misc semaphores and spin locks */
12252 	mutex_init(&ioc->reset_in_progress_mutex);
12253 	mutex_init(&ioc->hostdiag_unlock_mutex);
12254 	/* initializing pci_access_mutex lock */
12255 	mutex_init(&ioc->pci_access_mutex);
12256 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12257 	spin_lock_init(&ioc->scsi_lookup_lock);
12258 	spin_lock_init(&ioc->sas_device_lock);
12259 	spin_lock_init(&ioc->sas_node_lock);
12260 	spin_lock_init(&ioc->fw_event_lock);
12261 	spin_lock_init(&ioc->raid_device_lock);
12262 	spin_lock_init(&ioc->pcie_device_lock);
12263 	spin_lock_init(&ioc->diag_trigger_lock);
12264 
12265 	INIT_LIST_HEAD(&ioc->sas_device_list);
12266 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
12267 	INIT_LIST_HEAD(&ioc->sas_expander_list);
12268 	INIT_LIST_HEAD(&ioc->enclosure_list);
12269 	INIT_LIST_HEAD(&ioc->pcie_device_list);
12270 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12271 	INIT_LIST_HEAD(&ioc->fw_event_list);
12272 	INIT_LIST_HEAD(&ioc->raid_device_list);
12273 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12274 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
12275 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
12276 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12277 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12278 	INIT_LIST_HEAD(&ioc->reply_queue_list);
12279 	INIT_LIST_HEAD(&ioc->port_table_list);
12280 
12281 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12282 
12283 	/* init shost parameters */
12284 	shost->max_cmd_len = 32;
12285 	shost->max_lun = max_lun;
12286 	shost->transportt = mpt3sas_transport_template;
12287 	shost->unique_id = ioc->id;
12288 
12289 	if (ioc->is_mcpu_endpoint) {
12290 		/* mCPU MPI support 64K max IO */
12291 		shost->max_sectors = 128;
12292 		ioc_info(ioc, "The max_sectors value is set to %d\n",
12293 			 shost->max_sectors);
12294 	} else {
12295 		if (max_sectors != 0xFFFF) {
12296 			if (max_sectors < 64) {
12297 				shost->max_sectors = 64;
12298 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12299 					 max_sectors);
12300 			} else if (max_sectors > 32767) {
12301 				shost->max_sectors = 32767;
12302 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12303 					 max_sectors);
12304 			} else {
12305 				shost->max_sectors = max_sectors & 0xFFFE;
12306 				ioc_info(ioc, "The max_sectors value is set to %d\n",
12307 					 shost->max_sectors);
12308 			}
12309 		}
12310 	}
12311 	/* register EEDP capabilities with SCSI layer */
12312 	if (prot_mask >= 0)
12313 		scsi_host_set_prot(shost, (prot_mask & 0x07));
12314 	else
12315 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12316 				   | SHOST_DIF_TYPE2_PROTECTION
12317 				   | SHOST_DIF_TYPE3_PROTECTION);
12318 
12319 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12320 
12321 	/* event thread */
12322 	ioc->firmware_event_thread = alloc_ordered_workqueue(
12323 		"fw_event_%s%d", 0, ioc->driver_name, ioc->id);
12324 	if (!ioc->firmware_event_thread) {
12325 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12326 			__FILE__, __LINE__, __func__);
12327 		rv = -ENODEV;
12328 		goto out_thread_fail;
12329 	}
12330 
12331 	shost->host_tagset = 0;
12332 
12333 	if (ioc->is_gen35_ioc && host_tagset_enable)
12334 		shost->host_tagset = 1;
12335 
12336 	ioc->is_driver_loading = 1;
12337 	if ((mpt3sas_base_attach(ioc))) {
12338 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12339 			__FILE__, __LINE__, __func__);
12340 		rv = -ENODEV;
12341 		goto out_attach_fail;
12342 	}
12343 
12344 	if (ioc->is_warpdrive) {
12345 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
12346 			ioc->hide_drives = 0;
12347 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
12348 			ioc->hide_drives = 1;
12349 		else {
12350 			if (mpt3sas_get_num_volumes(ioc))
12351 				ioc->hide_drives = 1;
12352 			else
12353 				ioc->hide_drives = 0;
12354 		}
12355 	} else
12356 		ioc->hide_drives = 0;
12357 
12358 	shost->nr_hw_queues = 1;
12359 
12360 	if (shost->host_tagset) {
12361 		shost->nr_hw_queues =
12362 		    ioc->reply_queue_count - ioc->high_iops_queues;
12363 
12364 		iopoll_q_count =
12365 		    ioc->reply_queue_count - ioc->iopoll_q_start_index;
12366 
12367 		shost->nr_maps = iopoll_q_count ? 3 : 1;
12368 
12369 		dev_info(&ioc->pdev->dev,
12370 		    "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12371 		    shost->can_queue, shost->nr_hw_queues);
12372 	}
12373 
12374 	rv = scsi_add_host(shost, &pdev->dev);
12375 	if (rv) {
12376 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12377 			__FILE__, __LINE__, __func__);
12378 		goto out_add_shost_fail;
12379 	}
12380 
12381 	scsi_scan_host(shost);
12382 	mpt3sas_setup_debugfs(ioc);
12383 	return 0;
12384 out_add_shost_fail:
12385 	mpt3sas_base_detach(ioc);
12386  out_attach_fail:
12387 	destroy_workqueue(ioc->firmware_event_thread);
12388  out_thread_fail:
12389 	spin_lock(&gioc_lock);
12390 	list_del(&ioc->list);
12391 	spin_unlock(&gioc_lock);
12392 	scsi_host_put(shost);
12393 	return rv;
12394 }
12395 
12396 /**
12397  * scsih_suspend - power management suspend main entry point
12398  * @dev: Device struct
12399  *
12400  * Return: 0 success, anything else error.
12401  */
12402 static int __maybe_unused
scsih_suspend(struct device * dev)12403 scsih_suspend(struct device *dev)
12404 {
12405 	struct pci_dev *pdev = to_pci_dev(dev);
12406 	struct Scsi_Host *shost;
12407 	struct MPT3SAS_ADAPTER *ioc;
12408 	int rc;
12409 
12410 	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12411 	if (rc)
12412 		return rc;
12413 
12414 	mpt3sas_base_stop_watchdog(ioc);
12415 	scsi_block_requests(shost);
12416 	_scsih_nvme_shutdown(ioc);
12417 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12418 		 pdev, pci_name(pdev));
12419 
12420 	mpt3sas_base_free_resources(ioc);
12421 	return 0;
12422 }
12423 
12424 /**
12425  * scsih_resume - power management resume main entry point
12426  * @dev: Device struct
12427  *
12428  * Return: 0 success, anything else error.
12429  */
12430 static int __maybe_unused
scsih_resume(struct device * dev)12431 scsih_resume(struct device *dev)
12432 {
12433 	struct pci_dev *pdev = to_pci_dev(dev);
12434 	struct Scsi_Host *shost;
12435 	struct MPT3SAS_ADAPTER *ioc;
12436 	pci_power_t device_state = pdev->current_state;
12437 	int r;
12438 
12439 	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12440 	if (r)
12441 		return r;
12442 
12443 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12444 		 pdev, pci_name(pdev), device_state);
12445 
12446 	ioc->pdev = pdev;
12447 	r = mpt3sas_base_map_resources(ioc);
12448 	if (r)
12449 		return r;
12450 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12451 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12452 	scsi_unblock_requests(shost);
12453 	mpt3sas_base_start_watchdog(ioc);
12454 	return 0;
12455 }
12456 
12457 /**
12458  * scsih_pci_error_detected - Called when a PCI error is detected.
12459  * @pdev: PCI device struct
12460  * @state: PCI channel state
12461  *
12462  * Description: Called when a PCI error is detected.
12463  *
12464  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12465  */
12466 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)12467 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12468 {
12469 	struct Scsi_Host *shost;
12470 	struct MPT3SAS_ADAPTER *ioc;
12471 
12472 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12473 		return PCI_ERS_RESULT_DISCONNECT;
12474 
12475 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12476 
12477 	switch (state) {
12478 	case pci_channel_io_normal:
12479 		return PCI_ERS_RESULT_CAN_RECOVER;
12480 	case pci_channel_io_frozen:
12481 		/* Fatal error, prepare for slot reset */
12482 		ioc->pci_error_recovery = 1;
12483 		scsi_block_requests(ioc->shost);
12484 		mpt3sas_base_stop_watchdog(ioc);
12485 		mpt3sas_base_free_resources(ioc);
12486 		return PCI_ERS_RESULT_NEED_RESET;
12487 	case pci_channel_io_perm_failure:
12488 		/* Permanent error, prepare for device removal */
12489 		ioc->pci_error_recovery = 1;
12490 		mpt3sas_base_stop_watchdog(ioc);
12491 		mpt3sas_base_pause_mq_polling(ioc);
12492 		_scsih_flush_running_cmds(ioc);
12493 		return PCI_ERS_RESULT_DISCONNECT;
12494 	}
12495 	return PCI_ERS_RESULT_NEED_RESET;
12496 }
12497 
12498 /**
12499  * scsih_pci_slot_reset - Called when PCI slot has been reset.
12500  * @pdev: PCI device struct
12501  *
12502  * Description: This routine is called by the pci error recovery
12503  * code after the PCI slot has been reset, just before we
12504  * should resume normal operations.
12505  */
12506 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)12507 scsih_pci_slot_reset(struct pci_dev *pdev)
12508 {
12509 	struct Scsi_Host *shost;
12510 	struct MPT3SAS_ADAPTER *ioc;
12511 	int rc;
12512 
12513 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12514 		return PCI_ERS_RESULT_DISCONNECT;
12515 
12516 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
12517 
12518 	ioc->pci_error_recovery = 0;
12519 	ioc->pdev = pdev;
12520 	pci_restore_state(pdev);
12521 	rc = mpt3sas_base_map_resources(ioc);
12522 	if (rc)
12523 		return PCI_ERS_RESULT_DISCONNECT;
12524 
12525 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12526 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12527 
12528 	ioc_warn(ioc, "hard reset: %s\n",
12529 		 (rc == 0) ? "success" : "failed");
12530 
12531 	if (!rc)
12532 		return PCI_ERS_RESULT_RECOVERED;
12533 	else
12534 		return PCI_ERS_RESULT_DISCONNECT;
12535 }
12536 
12537 /**
12538  * scsih_pci_resume() - resume normal ops after PCI reset
12539  * @pdev: pointer to PCI device
12540  *
12541  * Called when the error recovery driver tells us that its
12542  * OK to resume normal operation. Use completion to allow
12543  * halted scsi ops to resume.
12544  */
12545 static void
scsih_pci_resume(struct pci_dev * pdev)12546 scsih_pci_resume(struct pci_dev *pdev)
12547 {
12548 	struct Scsi_Host *shost;
12549 	struct MPT3SAS_ADAPTER *ioc;
12550 
12551 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12552 		return;
12553 
12554 	ioc_info(ioc, "PCI error: resume callback!!\n");
12555 
12556 	mpt3sas_base_start_watchdog(ioc);
12557 	scsi_unblock_requests(ioc->shost);
12558 }
12559 
12560 /**
12561  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12562  * @pdev: pointer to PCI device
12563  */
12564 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)12565 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12566 {
12567 	struct Scsi_Host *shost;
12568 	struct MPT3SAS_ADAPTER *ioc;
12569 
12570 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12571 		return PCI_ERS_RESULT_DISCONNECT;
12572 
12573 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12574 
12575 	/* TODO - dump whatever for debugging purposes */
12576 
12577 	/* This called only if scsih_pci_error_detected returns
12578 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12579 	 * works, no need to reset slot.
12580 	 */
12581 	return PCI_ERS_RESULT_RECOVERED;
12582 }
12583 
12584 /*
12585  * The pci device ids are defined in mpi/mpi2_cnfg.h.
12586  */
12587 static const struct pci_device_id mpt3sas_pci_table[] = {
12588 	/* Spitfire ~ 2004 */
12589 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12590 		PCI_ANY_ID, PCI_ANY_ID },
12591 	/* Falcon ~ 2008 */
12592 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12593 		PCI_ANY_ID, PCI_ANY_ID },
12594 	/* Liberator ~ 2108 */
12595 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12596 		PCI_ANY_ID, PCI_ANY_ID },
12597 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12598 		PCI_ANY_ID, PCI_ANY_ID },
12599 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12600 		PCI_ANY_ID, PCI_ANY_ID },
12601 	/* Meteor ~ 2116 */
12602 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12603 		PCI_ANY_ID, PCI_ANY_ID },
12604 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12605 		PCI_ANY_ID, PCI_ANY_ID },
12606 	/* Thunderbolt ~ 2208 */
12607 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12608 		PCI_ANY_ID, PCI_ANY_ID },
12609 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12610 		PCI_ANY_ID, PCI_ANY_ID },
12611 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12612 		PCI_ANY_ID, PCI_ANY_ID },
12613 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12614 		PCI_ANY_ID, PCI_ANY_ID },
12615 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12616 		PCI_ANY_ID, PCI_ANY_ID },
12617 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12618 		PCI_ANY_ID, PCI_ANY_ID },
12619 	/* Mustang ~ 2308 */
12620 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12621 		PCI_ANY_ID, PCI_ANY_ID },
12622 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12623 		PCI_ANY_ID, PCI_ANY_ID },
12624 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12625 		PCI_ANY_ID, PCI_ANY_ID },
12626 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12627 		PCI_ANY_ID, PCI_ANY_ID },
12628 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12629 		PCI_ANY_ID, PCI_ANY_ID },
12630 	/* SSS6200 */
12631 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12632 		PCI_ANY_ID, PCI_ANY_ID },
12633 	/* Fury ~ 3004 and 3008 */
12634 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12635 		PCI_ANY_ID, PCI_ANY_ID },
12636 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12637 		PCI_ANY_ID, PCI_ANY_ID },
12638 	/* Invader ~ 3108 */
12639 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12640 		PCI_ANY_ID, PCI_ANY_ID },
12641 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12642 		PCI_ANY_ID, PCI_ANY_ID },
12643 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12644 		PCI_ANY_ID, PCI_ANY_ID },
12645 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12646 		PCI_ANY_ID, PCI_ANY_ID },
12647 	/* Cutlass ~ 3216 and 3224 */
12648 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12649 		PCI_ANY_ID, PCI_ANY_ID },
12650 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12651 		PCI_ANY_ID, PCI_ANY_ID },
12652 	/* Intruder ~ 3316 and 3324 */
12653 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12654 		PCI_ANY_ID, PCI_ANY_ID },
12655 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12656 		PCI_ANY_ID, PCI_ANY_ID },
12657 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12658 		PCI_ANY_ID, PCI_ANY_ID },
12659 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12660 		PCI_ANY_ID, PCI_ANY_ID },
12661 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12662 		PCI_ANY_ID, PCI_ANY_ID },
12663 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12664 		PCI_ANY_ID, PCI_ANY_ID },
12665 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12666 		PCI_ANY_ID, PCI_ANY_ID },
12667 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12668 		PCI_ANY_ID, PCI_ANY_ID },
12669 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12670 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12671 		PCI_ANY_ID, PCI_ANY_ID },
12672 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12673 		PCI_ANY_ID, PCI_ANY_ID },
12674 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12675 		PCI_ANY_ID, PCI_ANY_ID },
12676 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12677 		PCI_ANY_ID, PCI_ANY_ID },
12678 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12679 		PCI_ANY_ID, PCI_ANY_ID },
12680 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12681 		PCI_ANY_ID, PCI_ANY_ID },
12682 	/* Mercator ~ 3616*/
12683 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12684 		PCI_ANY_ID, PCI_ANY_ID },
12685 
12686 	/* Aero SI 0x00E1 Configurable Secure
12687 	 * 0x00E2 Hard Secure
12688 	 */
12689 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12690 		PCI_ANY_ID, PCI_ANY_ID },
12691 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12692 		PCI_ANY_ID, PCI_ANY_ID },
12693 
12694 	/*
12695 	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12696 	 */
12697 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12698 		PCI_ANY_ID, PCI_ANY_ID },
12699 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12700 		PCI_ANY_ID, PCI_ANY_ID },
12701 
12702 	/* Atlas PCIe Switch Management Port */
12703 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12704 		PCI_ANY_ID, PCI_ANY_ID },
12705 
12706 	/* Sea SI 0x00E5 Configurable Secure
12707 	 * 0x00E6 Hard Secure
12708 	 */
12709 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12710 		PCI_ANY_ID, PCI_ANY_ID },
12711 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12712 		PCI_ANY_ID, PCI_ANY_ID },
12713 
12714 	/*
12715 	 * ATTO Branded ExpressSAS H12xx GT
12716 	 */
12717 	{ MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12718 		PCI_ANY_ID, PCI_ANY_ID },
12719 
12720 	/*
12721 	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12722 	 */
12723 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12724 		PCI_ANY_ID, PCI_ANY_ID },
12725 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12726 		PCI_ANY_ID, PCI_ANY_ID },
12727 
12728 	{0}     /* Terminating entry */
12729 };
12730 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12731 
12732 static struct pci_error_handlers _mpt3sas_err_handler = {
12733 	.error_detected	= scsih_pci_error_detected,
12734 	.mmio_enabled	= scsih_pci_mmio_enabled,
12735 	.slot_reset	= scsih_pci_slot_reset,
12736 	.resume		= scsih_pci_resume,
12737 };
12738 
12739 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12740 
12741 static struct pci_driver mpt3sas_driver = {
12742 	.name		= MPT3SAS_DRIVER_NAME,
12743 	.id_table	= mpt3sas_pci_table,
12744 	.probe		= _scsih_probe,
12745 	.remove		= scsih_remove,
12746 	.shutdown	= scsih_shutdown,
12747 	.err_handler	= &_mpt3sas_err_handler,
12748 	.driver.pm	= &scsih_pm_ops,
12749 };
12750 
12751 /**
12752  * scsih_init - main entry point for this driver.
12753  *
12754  * Return: 0 success, anything else error.
12755  */
12756 static int
scsih_init(void)12757 scsih_init(void)
12758 {
12759 	mpt2_ids = 0;
12760 	mpt3_ids = 0;
12761 
12762 	mpt3sas_base_initialize_callback_handler();
12763 
12764 	 /* queuecommand callback hander */
12765 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12766 
12767 	/* task management callback handler */
12768 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12769 
12770 	/* base internal commands callback handler */
12771 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12772 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12773 	    mpt3sas_port_enable_done);
12774 
12775 	/* transport internal commands callback handler */
12776 	transport_cb_idx = mpt3sas_base_register_callback_handler(
12777 	    mpt3sas_transport_done);
12778 
12779 	/* scsih internal commands callback handler */
12780 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12781 
12782 	/* configuration page API internal commands callback handler */
12783 	config_cb_idx = mpt3sas_base_register_callback_handler(
12784 	    mpt3sas_config_done);
12785 
12786 	/* ctl module callback handler */
12787 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12788 
12789 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12790 	    _scsih_tm_tr_complete);
12791 
12792 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12793 	    _scsih_tm_volume_tr_complete);
12794 
12795 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12796 	    _scsih_sas_control_complete);
12797 
12798 	mpt3sas_init_debugfs();
12799 	return 0;
12800 }
12801 
12802 /**
12803  * scsih_exit - exit point for this driver (when it is a module).
12804  *
12805  * Return: 0 success, anything else error.
12806  */
12807 static void
scsih_exit(void)12808 scsih_exit(void)
12809 {
12810 
12811 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12812 	mpt3sas_base_release_callback_handler(tm_cb_idx);
12813 	mpt3sas_base_release_callback_handler(base_cb_idx);
12814 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12815 	mpt3sas_base_release_callback_handler(transport_cb_idx);
12816 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
12817 	mpt3sas_base_release_callback_handler(config_cb_idx);
12818 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
12819 
12820 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12821 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12822 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12823 
12824 /* raid transport support */
12825 	if (hbas_to_enumerate != 1)
12826 		raid_class_release(mpt3sas_raid_template);
12827 	if (hbas_to_enumerate != 2)
12828 		raid_class_release(mpt2sas_raid_template);
12829 	sas_release_transport(mpt3sas_transport_template);
12830 	mpt3sas_exit_debugfs();
12831 }
12832 
12833 /**
12834  * _mpt3sas_init - main entry point for this driver.
12835  *
12836  * Return: 0 success, anything else error.
12837  */
12838 static int __init
_mpt3sas_init(void)12839 _mpt3sas_init(void)
12840 {
12841 	int error;
12842 
12843 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12844 					MPT3SAS_DRIVER_VERSION);
12845 
12846 	mpt3sas_transport_template =
12847 	    sas_attach_transport(&mpt3sas_transport_functions);
12848 	if (!mpt3sas_transport_template)
12849 		return -ENODEV;
12850 
12851 	/* No need attach mpt3sas raid functions template
12852 	 * if hbas_to_enumarate value is one.
12853 	 */
12854 	if (hbas_to_enumerate != 1) {
12855 		mpt3sas_raid_template =
12856 				raid_class_attach(&mpt3sas_raid_functions);
12857 		if (!mpt3sas_raid_template) {
12858 			sas_release_transport(mpt3sas_transport_template);
12859 			return -ENODEV;
12860 		}
12861 	}
12862 
12863 	/* No need to attach mpt2sas raid functions template
12864 	 * if hbas_to_enumarate value is two
12865 	 */
12866 	if (hbas_to_enumerate != 2) {
12867 		mpt2sas_raid_template =
12868 				raid_class_attach(&mpt2sas_raid_functions);
12869 		if (!mpt2sas_raid_template) {
12870 			sas_release_transport(mpt3sas_transport_template);
12871 			return -ENODEV;
12872 		}
12873 	}
12874 
12875 	error = scsih_init();
12876 	if (error) {
12877 		scsih_exit();
12878 		return error;
12879 	}
12880 
12881 	mpt3sas_ctl_init(hbas_to_enumerate);
12882 
12883 	error = pci_register_driver(&mpt3sas_driver);
12884 	if (error) {
12885 		mpt3sas_ctl_exit(hbas_to_enumerate);
12886 		scsih_exit();
12887 	}
12888 
12889 	return error;
12890 }
12891 
12892 /**
12893  * _mpt3sas_exit - exit point for this driver (when it is a module).
12894  *
12895  */
12896 static void __exit
_mpt3sas_exit(void)12897 _mpt3sas_exit(void)
12898 {
12899 	pr_info("mpt3sas version %s unloading\n",
12900 				MPT3SAS_DRIVER_VERSION);
12901 
12902 	pci_unregister_driver(&mpt3sas_driver);
12903 
12904 	mpt3sas_ctl_exit(hbas_to_enumerate);
12905 
12906 	scsih_exit();
12907 }
12908 
12909 module_init(_mpt3sas_init);
12910 module_exit(_mpt3sas_exit);
12911