• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
58 
59 #include "mpt3sas_base.h"
60 
61 #define RAID_CHANNEL 1
62 
63 #define PCIE_CHANNEL 2
64 
65 /* forward proto's */
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 	struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
69 
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 	struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 	u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 	struct _pcie_device *pcie_device);
77 static void
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
85 
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
91 
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
101 static int mpt2_ids;
102 static int mpt3_ids;
103 
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
107 
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 	" bits for enabling additional logging info (default=0)");
112 
113 
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
117 
118 
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122 
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128 
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 		  1 - enumerates only SAS 2.0 generation HBAs\n \
134 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135 
136 /* diag_buffer_enable is bitwise
137  * bit 0 set = TRACE
138  * bit 1 set = SNAPSHOT
139  * bit 2 set = EXTENDED
140  *
141  * Either bit can be set, or both
142  */
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150 
151 
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156 
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 	"Enable sdev max qd as can_queue, def=disabled(0)");
161 
162 /* raid transport support */
163 static struct raid_template *mpt3sas_raid_template;
164 static struct raid_template *mpt2sas_raid_template;
165 
166 
167 /**
168  * struct sense_info - common structure for obtaining sense keys
169  * @skey: sense key
170  * @asc: additional sense code
171  * @ascq: additional sense code qualifier
172  */
173 struct sense_info {
174 	u8 skey;
175 	u8 asc;
176 	u8 ascq;
177 };
178 
179 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
180 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
181 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
182 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
183 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
184 /**
185  * struct fw_event_work - firmware event struct
186  * @list: link list framework
187  * @work: work object (ioc->fault_reset_work_q)
188  * @ioc: per adapter object
189  * @device_handle: device handle
190  * @VF_ID: virtual function id
191  * @VP_ID: virtual port id
192  * @ignore: flag meaning this event has been marked to ignore
193  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
194  * @refcount: kref for this event
195  * @event_data: reply event data payload follows
196  *
197  * This object stored on ioc->fw_event_list.
198  */
199 struct fw_event_work {
200 	struct list_head	list;
201 	struct work_struct	work;
202 
203 	struct MPT3SAS_ADAPTER *ioc;
204 	u16			device_handle;
205 	u8			VF_ID;
206 	u8			VP_ID;
207 	u8			ignore;
208 	u16			event;
209 	struct kref		refcount;
210 	char			event_data[] __aligned(4);
211 };
212 
fw_event_work_free(struct kref * r)213 static void fw_event_work_free(struct kref *r)
214 {
215 	kfree(container_of(r, struct fw_event_work, refcount));
216 }
217 
fw_event_work_get(struct fw_event_work * fw_work)218 static void fw_event_work_get(struct fw_event_work *fw_work)
219 {
220 	kref_get(&fw_work->refcount);
221 }
222 
fw_event_work_put(struct fw_event_work * fw_work)223 static void fw_event_work_put(struct fw_event_work *fw_work)
224 {
225 	kref_put(&fw_work->refcount, fw_event_work_free);
226 }
227 
alloc_fw_event_work(int len)228 static struct fw_event_work *alloc_fw_event_work(int len)
229 {
230 	struct fw_event_work *fw_event;
231 
232 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
233 	if (!fw_event)
234 		return NULL;
235 
236 	kref_init(&fw_event->refcount);
237 	return fw_event;
238 }
239 
240 /**
241  * struct _scsi_io_transfer - scsi io transfer
242  * @handle: sas device handle (assigned by firmware)
243  * @is_raid: flag set for hidden raid components
244  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
245  * @data_length: data transfer length
246  * @data_dma: dma pointer to data
247  * @sense: sense data
248  * @lun: lun number
249  * @cdb_length: cdb length
250  * @cdb: cdb contents
251  * @timeout: timeout for this command
252  * @VF_ID: virtual function id
253  * @VP_ID: virtual port id
254  * @valid_reply: flag set for reply message
255  * @sense_length: sense length
256  * @ioc_status: ioc status
257  * @scsi_state: scsi state
258  * @scsi_status: scsi staus
259  * @log_info: log information
260  * @transfer_length: data length transfer when there is a reply message
261  *
262  * Used for sending internal scsi commands to devices within this module.
263  * Refer to _scsi_send_scsi_io().
264  */
265 struct _scsi_io_transfer {
266 	u16	handle;
267 	u8	is_raid;
268 	enum dma_data_direction dir;
269 	u32	data_length;
270 	dma_addr_t data_dma;
271 	u8	sense[SCSI_SENSE_BUFFERSIZE];
272 	u32	lun;
273 	u8	cdb_length;
274 	u8	cdb[32];
275 	u8	timeout;
276 	u8	VF_ID;
277 	u8	VP_ID;
278 	u8	valid_reply;
279   /* the following bits are only valid when 'valid_reply = 1' */
280 	u32	sense_length;
281 	u16	ioc_status;
282 	u8	scsi_state;
283 	u8	scsi_status;
284 	u32	log_info;
285 	u32	transfer_length;
286 };
287 
288 /**
289  * _scsih_set_debug_level - global setting of ioc->logging_level.
290  * @val: ?
291  * @kp: ?
292  *
293  * Note: The logging levels are defined in mpt3sas_debug.h.
294  */
295 static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)296 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
297 {
298 	int ret = param_set_int(val, kp);
299 	struct MPT3SAS_ADAPTER *ioc;
300 
301 	if (ret)
302 		return ret;
303 
304 	pr_info("setting logging_level(0x%08x)\n", logging_level);
305 	spin_lock(&gioc_lock);
306 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
307 		ioc->logging_level = logging_level;
308 	spin_unlock(&gioc_lock);
309 	return 0;
310 }
311 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
312 	&logging_level, 0644);
313 
314 /**
315  * _scsih_srch_boot_sas_address - search based on sas_address
316  * @sas_address: sas address
317  * @boot_device: boot device object from bios page 2
318  *
319  * Return: 1 when there's a match, 0 means no match.
320  */
321 static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)322 _scsih_srch_boot_sas_address(u64 sas_address,
323 	Mpi2BootDeviceSasWwid_t *boot_device)
324 {
325 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
326 }
327 
328 /**
329  * _scsih_srch_boot_device_name - search based on device name
330  * @device_name: device name specified in INDENTIFY fram
331  * @boot_device: boot device object from bios page 2
332  *
333  * Return: 1 when there's a match, 0 means no match.
334  */
335 static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)336 _scsih_srch_boot_device_name(u64 device_name,
337 	Mpi2BootDeviceDeviceName_t *boot_device)
338 {
339 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
340 }
341 
342 /**
343  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
344  * @enclosure_logical_id: enclosure logical id
345  * @slot_number: slot number
346  * @boot_device: boot device object from bios page 2
347  *
348  * Return: 1 when there's a match, 0 means no match.
349  */
350 static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)351 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
352 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
353 {
354 	return (enclosure_logical_id == le64_to_cpu(boot_device->
355 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
356 	    SlotNumber)) ? 1 : 0;
357 }
358 
359 /**
360  * _scsih_is_boot_device - search for matching boot device.
361  * @sas_address: sas address
362  * @device_name: device name specified in INDENTIFY fram
363  * @enclosure_logical_id: enclosure logical id
364  * @slot: slot number
365  * @form: specifies boot device form
366  * @boot_device: boot device object from bios page 2
367  *
368  * Return: 1 when there's a match, 0 means no match.
369  */
370 static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)371 _scsih_is_boot_device(u64 sas_address, u64 device_name,
372 	u64 enclosure_logical_id, u16 slot, u8 form,
373 	Mpi2BiosPage2BootDevice_t *boot_device)
374 {
375 	int rc = 0;
376 
377 	switch (form) {
378 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
379 		if (!sas_address)
380 			break;
381 		rc = _scsih_srch_boot_sas_address(
382 		    sas_address, &boot_device->SasWwid);
383 		break;
384 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
385 		if (!enclosure_logical_id)
386 			break;
387 		rc = _scsih_srch_boot_encl_slot(
388 		    enclosure_logical_id,
389 		    slot, &boot_device->EnclosureSlot);
390 		break;
391 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
392 		if (!device_name)
393 			break;
394 		rc = _scsih_srch_boot_device_name(
395 		    device_name, &boot_device->DeviceName);
396 		break;
397 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
398 		break;
399 	}
400 
401 	return rc;
402 }
403 
404 /**
405  * _scsih_get_sas_address - set the sas_address for given device handle
406  * @ioc: ?
407  * @handle: device handle
408  * @sas_address: sas address
409  *
410  * Return: 0 success, non-zero when failure
411  */
412 static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)413 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
414 	u64 *sas_address)
415 {
416 	Mpi2SasDevicePage0_t sas_device_pg0;
417 	Mpi2ConfigReply_t mpi_reply;
418 	u32 ioc_status;
419 
420 	*sas_address = 0;
421 
422 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
423 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
424 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
425 			__FILE__, __LINE__, __func__);
426 		return -ENXIO;
427 	}
428 
429 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
430 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
431 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
432 		 * vSES's sas address.
433 		 */
434 		if ((handle <= ioc->sas_hba.num_phys) &&
435 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
436 		   MPI2_SAS_DEVICE_INFO_SEP)))
437 			*sas_address = ioc->sas_hba.sas_address;
438 		else
439 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
440 		return 0;
441 	}
442 
443 	/* we hit this because the given parent handle doesn't exist */
444 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
445 		return -ENXIO;
446 
447 	/* else error case */
448 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
449 		handle, ioc_status, __FILE__, __LINE__, __func__);
450 	return -EIO;
451 }
452 
453 /**
454  * _scsih_determine_boot_device - determine boot device.
455  * @ioc: per adapter object
456  * @device: sas_device or pcie_device object
457  * @channel: SAS or PCIe channel
458  *
459  * Determines whether this device should be first reported device to
460  * to scsi-ml or sas transport, this purpose is for persistent boot device.
461  * There are primary, alternate, and current entries in bios page 2. The order
462  * priority is primary, alternate, then current.  This routine saves
463  * the corresponding device object.
464  * The saved data to be used later in _scsih_probe_boot_devices().
465  */
466 static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)467 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
468 	u32 channel)
469 {
470 	struct _sas_device *sas_device;
471 	struct _pcie_device *pcie_device;
472 	struct _raid_device *raid_device;
473 	u64 sas_address;
474 	u64 device_name;
475 	u64 enclosure_logical_id;
476 	u16 slot;
477 
478 	 /* only process this function when driver loads */
479 	if (!ioc->is_driver_loading)
480 		return;
481 
482 	 /* no Bios, return immediately */
483 	if (!ioc->bios_pg3.BiosVersion)
484 		return;
485 
486 	if (channel == RAID_CHANNEL) {
487 		raid_device = device;
488 		sas_address = raid_device->wwid;
489 		device_name = 0;
490 		enclosure_logical_id = 0;
491 		slot = 0;
492 	} else if (channel == PCIE_CHANNEL) {
493 		pcie_device = device;
494 		sas_address = pcie_device->wwid;
495 		device_name = 0;
496 		enclosure_logical_id = 0;
497 		slot = 0;
498 	} else {
499 		sas_device = device;
500 		sas_address = sas_device->sas_address;
501 		device_name = sas_device->device_name;
502 		enclosure_logical_id = sas_device->enclosure_logical_id;
503 		slot = sas_device->slot;
504 	}
505 
506 	if (!ioc->req_boot_device.device) {
507 		if (_scsih_is_boot_device(sas_address, device_name,
508 		    enclosure_logical_id, slot,
509 		    (ioc->bios_pg2.ReqBootDeviceForm &
510 		    MPI2_BIOSPAGE2_FORM_MASK),
511 		    &ioc->bios_pg2.RequestedBootDevice)) {
512 			dinitprintk(ioc,
513 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
514 					     __func__, (u64)sas_address));
515 			ioc->req_boot_device.device = device;
516 			ioc->req_boot_device.channel = channel;
517 		}
518 	}
519 
520 	if (!ioc->req_alt_boot_device.device) {
521 		if (_scsih_is_boot_device(sas_address, device_name,
522 		    enclosure_logical_id, slot,
523 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
524 		    MPI2_BIOSPAGE2_FORM_MASK),
525 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
526 			dinitprintk(ioc,
527 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 					     __func__, (u64)sas_address));
529 			ioc->req_alt_boot_device.device = device;
530 			ioc->req_alt_boot_device.channel = channel;
531 		}
532 	}
533 
534 	if (!ioc->current_boot_device.device) {
535 		if (_scsih_is_boot_device(sas_address, device_name,
536 		    enclosure_logical_id, slot,
537 		    (ioc->bios_pg2.CurrentBootDeviceForm &
538 		    MPI2_BIOSPAGE2_FORM_MASK),
539 		    &ioc->bios_pg2.CurrentBootDevice)) {
540 			dinitprintk(ioc,
541 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
542 					     __func__, (u64)sas_address));
543 			ioc->current_boot_device.device = device;
544 			ioc->current_boot_device.channel = channel;
545 		}
546 	}
547 }
548 
549 static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)550 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
551 		struct MPT3SAS_TARGET *tgt_priv)
552 {
553 	struct _sas_device *ret;
554 
555 	assert_spin_locked(&ioc->sas_device_lock);
556 
557 	ret = tgt_priv->sas_dev;
558 	if (ret)
559 		sas_device_get(ret);
560 
561 	return ret;
562 }
563 
564 static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)565 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
566 		struct MPT3SAS_TARGET *tgt_priv)
567 {
568 	struct _sas_device *ret;
569 	unsigned long flags;
570 
571 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
572 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
573 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
574 
575 	return ret;
576 }
577 
578 static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)579 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
580 	struct MPT3SAS_TARGET *tgt_priv)
581 {
582 	struct _pcie_device *ret;
583 
584 	assert_spin_locked(&ioc->pcie_device_lock);
585 
586 	ret = tgt_priv->pcie_dev;
587 	if (ret)
588 		pcie_device_get(ret);
589 
590 	return ret;
591 }
592 
593 /**
594  * mpt3sas_get_pdev_from_target - pcie device search
595  * @ioc: per adapter object
596  * @tgt_priv: starget private object
597  *
598  * Context: This function will acquire ioc->pcie_device_lock and will release
599  * before returning the pcie_device object.
600  *
601  * This searches for pcie_device from target, then return pcie_device object.
602  */
603 static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)604 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
605 	struct MPT3SAS_TARGET *tgt_priv)
606 {
607 	struct _pcie_device *ret;
608 	unsigned long flags;
609 
610 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
611 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
612 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
613 
614 	return ret;
615 }
616 
617 struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)618 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
619 					u64 sas_address)
620 {
621 	struct _sas_device *sas_device;
622 
623 	assert_spin_locked(&ioc->sas_device_lock);
624 
625 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
626 		if (sas_device->sas_address == sas_address)
627 			goto found_device;
628 
629 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
630 		if (sas_device->sas_address == sas_address)
631 			goto found_device;
632 
633 	return NULL;
634 
635 found_device:
636 	sas_device_get(sas_device);
637 	return sas_device;
638 }
639 
640 /**
641  * mpt3sas_get_sdev_by_addr - sas device search
642  * @ioc: per adapter object
643  * @sas_address: sas address
644  * Context: Calling function should acquire ioc->sas_device_lock
645  *
646  * This searches for sas_device based on sas_address, then return sas_device
647  * object.
648  */
649 struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)650 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
651 	u64 sas_address)
652 {
653 	struct _sas_device *sas_device;
654 	unsigned long flags;
655 
656 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
657 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
658 			sas_address);
659 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
660 
661 	return sas_device;
662 }
663 
664 static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)665 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
666 {
667 	struct _sas_device *sas_device;
668 
669 	assert_spin_locked(&ioc->sas_device_lock);
670 
671 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
672 		if (sas_device->handle == handle)
673 			goto found_device;
674 
675 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
676 		if (sas_device->handle == handle)
677 			goto found_device;
678 
679 	return NULL;
680 
681 found_device:
682 	sas_device_get(sas_device);
683 	return sas_device;
684 }
685 
686 /**
687  * mpt3sas_get_sdev_by_handle - sas device search
688  * @ioc: per adapter object
689  * @handle: sas device handle (assigned by firmware)
690  * Context: Calling function should acquire ioc->sas_device_lock
691  *
692  * This searches for sas_device based on sas_address, then return sas_device
693  * object.
694  */
695 struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)696 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
697 {
698 	struct _sas_device *sas_device;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
702 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
703 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
704 
705 	return sas_device;
706 }
707 
708 /**
709  * _scsih_display_enclosure_chassis_info - display device location info
710  * @ioc: per adapter object
711  * @sas_device: per sas device object
712  * @sdev: scsi device struct
713  * @starget: scsi target struct
714  */
715 static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)716 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
717 	struct _sas_device *sas_device, struct scsi_device *sdev,
718 	struct scsi_target *starget)
719 {
720 	if (sdev) {
721 		if (sas_device->enclosure_handle != 0)
722 			sdev_printk(KERN_INFO, sdev,
723 			    "enclosure logical id (0x%016llx), slot(%d) \n",
724 			    (unsigned long long)
725 			    sas_device->enclosure_logical_id,
726 			    sas_device->slot);
727 		if (sas_device->connector_name[0] != '\0')
728 			sdev_printk(KERN_INFO, sdev,
729 			    "enclosure level(0x%04x), connector name( %s)\n",
730 			    sas_device->enclosure_level,
731 			    sas_device->connector_name);
732 		if (sas_device->is_chassis_slot_valid)
733 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
734 			    sas_device->chassis_slot);
735 	} else if (starget) {
736 		if (sas_device->enclosure_handle != 0)
737 			starget_printk(KERN_INFO, starget,
738 			    "enclosure logical id(0x%016llx), slot(%d) \n",
739 			    (unsigned long long)
740 			    sas_device->enclosure_logical_id,
741 			    sas_device->slot);
742 		if (sas_device->connector_name[0] != '\0')
743 			starget_printk(KERN_INFO, starget,
744 			    "enclosure level(0x%04x), connector name( %s)\n",
745 			    sas_device->enclosure_level,
746 			    sas_device->connector_name);
747 		if (sas_device->is_chassis_slot_valid)
748 			starget_printk(KERN_INFO, starget,
749 			    "chassis slot(0x%04x)\n",
750 			    sas_device->chassis_slot);
751 	} else {
752 		if (sas_device->enclosure_handle != 0)
753 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
754 				 (u64)sas_device->enclosure_logical_id,
755 				 sas_device->slot);
756 		if (sas_device->connector_name[0] != '\0')
757 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
758 				 sas_device->enclosure_level,
759 				 sas_device->connector_name);
760 		if (sas_device->is_chassis_slot_valid)
761 			ioc_info(ioc, "chassis slot(0x%04x)\n",
762 				 sas_device->chassis_slot);
763 	}
764 }
765 
766 /**
767  * _scsih_sas_device_remove - remove sas_device from list.
768  * @ioc: per adapter object
769  * @sas_device: the sas_device object
770  * Context: This function will acquire ioc->sas_device_lock.
771  *
772  * If sas_device is on the list, remove it and decrement its reference count.
773  */
774 static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)775 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
776 	struct _sas_device *sas_device)
777 {
778 	unsigned long flags;
779 
780 	if (!sas_device)
781 		return;
782 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
783 		 sas_device->handle, (u64)sas_device->sas_address);
784 
785 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
786 
787 	/*
788 	 * The lock serializes access to the list, but we still need to verify
789 	 * that nobody removed the entry while we were waiting on the lock.
790 	 */
791 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
792 	if (!list_empty(&sas_device->list)) {
793 		list_del_init(&sas_device->list);
794 		sas_device_put(sas_device);
795 	}
796 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
797 }
798 
799 /**
800  * _scsih_device_remove_by_handle - removing device object by handle
801  * @ioc: per adapter object
802  * @handle: device handle
803  */
804 static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)805 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
806 {
807 	struct _sas_device *sas_device;
808 	unsigned long flags;
809 
810 	if (ioc->shost_recovery)
811 		return;
812 
813 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
814 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
815 	if (sas_device) {
816 		list_del_init(&sas_device->list);
817 		sas_device_put(sas_device);
818 	}
819 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
820 	if (sas_device) {
821 		_scsih_remove_device(ioc, sas_device);
822 		sas_device_put(sas_device);
823 	}
824 }
825 
826 /**
827  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
828  * @ioc: per adapter object
829  * @sas_address: device sas_address
830  */
831 void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)832 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
833 	u64 sas_address)
834 {
835 	struct _sas_device *sas_device;
836 	unsigned long flags;
837 
838 	if (ioc->shost_recovery)
839 		return;
840 
841 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
842 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
843 	if (sas_device) {
844 		list_del_init(&sas_device->list);
845 		sas_device_put(sas_device);
846 	}
847 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
848 	if (sas_device) {
849 		_scsih_remove_device(ioc, sas_device);
850 		sas_device_put(sas_device);
851 	}
852 }
853 
854 /**
855  * _scsih_sas_device_add - insert sas_device to the list.
856  * @ioc: per adapter object
857  * @sas_device: the sas_device object
858  * Context: This function will acquire ioc->sas_device_lock.
859  *
860  * Adding new object to the ioc->sas_device_list.
861  */
862 static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)863 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
864 	struct _sas_device *sas_device)
865 {
866 	unsigned long flags;
867 
868 	dewtprintk(ioc,
869 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
870 			    __func__, sas_device->handle,
871 			    (u64)sas_device->sas_address));
872 
873 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
874 	    NULL, NULL));
875 
876 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
877 	sas_device_get(sas_device);
878 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
879 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
880 
881 	if (ioc->hide_drives) {
882 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
883 		return;
884 	}
885 
886 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
887 	     sas_device->sas_address_parent)) {
888 		_scsih_sas_device_remove(ioc, sas_device);
889 	} else if (!sas_device->starget) {
890 		/*
891 		 * When asyn scanning is enabled, its not possible to remove
892 		 * devices while scanning is turned on due to an oops in
893 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
894 		 */
895 		if (!ioc->is_driver_loading) {
896 			mpt3sas_transport_port_remove(ioc,
897 			    sas_device->sas_address,
898 			    sas_device->sas_address_parent);
899 			_scsih_sas_device_remove(ioc, sas_device);
900 		}
901 	} else
902 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
903 }
904 
905 /**
906  * _scsih_sas_device_init_add - insert sas_device to the list.
907  * @ioc: per adapter object
908  * @sas_device: the sas_device object
909  * Context: This function will acquire ioc->sas_device_lock.
910  *
911  * Adding new object at driver load time to the ioc->sas_device_init_list.
912  */
913 static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)914 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
915 	struct _sas_device *sas_device)
916 {
917 	unsigned long flags;
918 
919 	dewtprintk(ioc,
920 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
921 			    __func__, sas_device->handle,
922 			    (u64)sas_device->sas_address));
923 
924 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
925 	    NULL, NULL));
926 
927 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
928 	sas_device_get(sas_device);
929 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
930 	_scsih_determine_boot_device(ioc, sas_device, 0);
931 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
932 }
933 
934 
935 static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)936 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
937 {
938 	struct _pcie_device *pcie_device;
939 
940 	assert_spin_locked(&ioc->pcie_device_lock);
941 
942 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
943 		if (pcie_device->wwid == wwid)
944 			goto found_device;
945 
946 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
947 		if (pcie_device->wwid == wwid)
948 			goto found_device;
949 
950 	return NULL;
951 
952 found_device:
953 	pcie_device_get(pcie_device);
954 	return pcie_device;
955 }
956 
957 
958 /**
959  * mpt3sas_get_pdev_by_wwid - pcie device search
960  * @ioc: per adapter object
961  * @wwid: wwid
962  *
963  * Context: This function will acquire ioc->pcie_device_lock and will release
964  * before returning the pcie_device object.
965  *
966  * This searches for pcie_device based on wwid, then return pcie_device object.
967  */
968 static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)969 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
970 {
971 	struct _pcie_device *pcie_device;
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
975 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
976 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
977 
978 	return pcie_device;
979 }
980 
981 
982 static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)983 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
984 	int channel)
985 {
986 	struct _pcie_device *pcie_device;
987 
988 	assert_spin_locked(&ioc->pcie_device_lock);
989 
990 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
991 		if (pcie_device->id == id && pcie_device->channel == channel)
992 			goto found_device;
993 
994 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
995 		if (pcie_device->id == id && pcie_device->channel == channel)
996 			goto found_device;
997 
998 	return NULL;
999 
1000 found_device:
1001 	pcie_device_get(pcie_device);
1002 	return pcie_device;
1003 }
1004 
1005 static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1006 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1007 {
1008 	struct _pcie_device *pcie_device;
1009 
1010 	assert_spin_locked(&ioc->pcie_device_lock);
1011 
1012 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1013 		if (pcie_device->handle == handle)
1014 			goto found_device;
1015 
1016 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1017 		if (pcie_device->handle == handle)
1018 			goto found_device;
1019 
1020 	return NULL;
1021 
1022 found_device:
1023 	pcie_device_get(pcie_device);
1024 	return pcie_device;
1025 }
1026 
1027 
1028 /**
1029  * mpt3sas_get_pdev_by_handle - pcie device search
1030  * @ioc: per adapter object
1031  * @handle: Firmware device handle
1032  *
1033  * Context: This function will acquire ioc->pcie_device_lock and will release
1034  * before returning the pcie_device object.
1035  *
1036  * This searches for pcie_device based on handle, then return pcie_device
1037  * object.
1038  */
1039 struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1040 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1041 {
1042 	struct _pcie_device *pcie_device;
1043 	unsigned long flags;
1044 
1045 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1046 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1047 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1048 
1049 	return pcie_device;
1050 }
1051 
1052 /**
1053  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1054  * @ioc: per adapter object
1055  * Context: This function will acquire ioc->pcie_device_lock
1056  *
1057  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1058  * which has reported maximum among all available NVMe drives.
1059  * Minimum max_shutdown_latency will be six seconds.
1060  */
1061 static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1062 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1063 {
1064 	struct _pcie_device *pcie_device;
1065 	unsigned long flags;
1066 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1067 
1068 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1069 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1070 		if (pcie_device->shutdown_latency) {
1071 			if (shutdown_latency < pcie_device->shutdown_latency)
1072 				shutdown_latency =
1073 					pcie_device->shutdown_latency;
1074 		}
1075 	}
1076 	ioc->max_shutdown_latency = shutdown_latency;
1077 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1078 }
1079 
1080 /**
1081  * _scsih_pcie_device_remove - remove pcie_device from list.
1082  * @ioc: per adapter object
1083  * @pcie_device: the pcie_device object
1084  * Context: This function will acquire ioc->pcie_device_lock.
1085  *
1086  * If pcie_device is on the list, remove it and decrement its reference count.
1087  */
1088 static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1089 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1090 	struct _pcie_device *pcie_device)
1091 {
1092 	unsigned long flags;
1093 	int was_on_pcie_device_list = 0;
1094 	u8 update_latency = 0;
1095 
1096 	if (!pcie_device)
1097 		return;
1098 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1099 		 pcie_device->handle, (u64)pcie_device->wwid);
1100 	if (pcie_device->enclosure_handle != 0)
1101 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1102 			 (u64)pcie_device->enclosure_logical_id,
1103 			 pcie_device->slot);
1104 	if (pcie_device->connector_name[0] != '\0')
1105 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1106 			 pcie_device->enclosure_level,
1107 			 pcie_device->connector_name);
1108 
1109 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1110 	if (!list_empty(&pcie_device->list)) {
1111 		list_del_init(&pcie_device->list);
1112 		was_on_pcie_device_list = 1;
1113 	}
1114 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1115 		update_latency = 1;
1116 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1117 	if (was_on_pcie_device_list) {
1118 		kfree(pcie_device->serial_number);
1119 		pcie_device_put(pcie_device);
1120 	}
1121 
1122 	/*
1123 	 * This device's RTD3 Entry Latency matches IOC's
1124 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1125 	 * from the available drives as current drive is getting removed.
1126 	 */
1127 	if (update_latency)
1128 		_scsih_set_nvme_max_shutdown_latency(ioc);
1129 }
1130 
1131 
1132 /**
1133  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1134  * @ioc: per adapter object
1135  * @handle: device handle
1136  */
1137 static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1138 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1139 {
1140 	struct _pcie_device *pcie_device;
1141 	unsigned long flags;
1142 	int was_on_pcie_device_list = 0;
1143 	u8 update_latency = 0;
1144 
1145 	if (ioc->shost_recovery)
1146 		return;
1147 
1148 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1149 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1150 	if (pcie_device) {
1151 		if (!list_empty(&pcie_device->list)) {
1152 			list_del_init(&pcie_device->list);
1153 			was_on_pcie_device_list = 1;
1154 			pcie_device_put(pcie_device);
1155 		}
1156 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1157 			update_latency = 1;
1158 	}
1159 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1160 	if (was_on_pcie_device_list) {
1161 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1162 		pcie_device_put(pcie_device);
1163 	}
1164 
1165 	/*
1166 	 * This device's RTD3 Entry Latency matches IOC's
1167 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1168 	 * from the available drives as current drive is getting removed.
1169 	 */
1170 	if (update_latency)
1171 		_scsih_set_nvme_max_shutdown_latency(ioc);
1172 }
1173 
1174 /**
1175  * _scsih_pcie_device_add - add pcie_device object
1176  * @ioc: per adapter object
1177  * @pcie_device: pcie_device object
1178  *
1179  * This is added to the pcie_device_list link list.
1180  */
1181 static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1182 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1183 	struct _pcie_device *pcie_device)
1184 {
1185 	unsigned long flags;
1186 
1187 	dewtprintk(ioc,
1188 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1189 			    __func__,
1190 			    pcie_device->handle, (u64)pcie_device->wwid));
1191 	if (pcie_device->enclosure_handle != 0)
1192 		dewtprintk(ioc,
1193 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1194 				    __func__,
1195 				    (u64)pcie_device->enclosure_logical_id,
1196 				    pcie_device->slot));
1197 	if (pcie_device->connector_name[0] != '\0')
1198 		dewtprintk(ioc,
1199 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1200 				    __func__, pcie_device->enclosure_level,
1201 				    pcie_device->connector_name));
1202 
1203 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1204 	pcie_device_get(pcie_device);
1205 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1206 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1207 
1208 	if (pcie_device->access_status ==
1209 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1210 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1211 		return;
1212 	}
1213 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1214 		_scsih_pcie_device_remove(ioc, pcie_device);
1215 	} else if (!pcie_device->starget) {
1216 		if (!ioc->is_driver_loading) {
1217 /*TODO-- Need to find out whether this condition will occur or not*/
1218 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1219 		}
1220 	} else
1221 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1222 }
1223 
1224 /*
1225  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1226  * @ioc: per adapter object
1227  * @pcie_device: the pcie_device object
1228  * Context: This function will acquire ioc->pcie_device_lock.
1229  *
1230  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1231  */
1232 static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1233 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1234 				struct _pcie_device *pcie_device)
1235 {
1236 	unsigned long flags;
1237 
1238 	dewtprintk(ioc,
1239 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1240 			    __func__,
1241 			    pcie_device->handle, (u64)pcie_device->wwid));
1242 	if (pcie_device->enclosure_handle != 0)
1243 		dewtprintk(ioc,
1244 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1245 				    __func__,
1246 				    (u64)pcie_device->enclosure_logical_id,
1247 				    pcie_device->slot));
1248 	if (pcie_device->connector_name[0] != '\0')
1249 		dewtprintk(ioc,
1250 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1251 				    __func__, pcie_device->enclosure_level,
1252 				    pcie_device->connector_name));
1253 
1254 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1255 	pcie_device_get(pcie_device);
1256 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1257 	if (pcie_device->access_status !=
1258 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1259 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1260 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1261 }
1262 /**
1263  * _scsih_raid_device_find_by_id - raid device search
1264  * @ioc: per adapter object
1265  * @id: sas device target id
1266  * @channel: sas device channel
1267  * Context: Calling function should acquire ioc->raid_device_lock
1268  *
1269  * This searches for raid_device based on target id, then return raid_device
1270  * object.
1271  */
1272 static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1273 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1274 {
1275 	struct _raid_device *raid_device, *r;
1276 
1277 	r = NULL;
1278 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1279 		if (raid_device->id == id && raid_device->channel == channel) {
1280 			r = raid_device;
1281 			goto out;
1282 		}
1283 	}
1284 
1285  out:
1286 	return r;
1287 }
1288 
1289 /**
1290  * mpt3sas_raid_device_find_by_handle - raid device search
1291  * @ioc: per adapter object
1292  * @handle: sas device handle (assigned by firmware)
1293  * Context: Calling function should acquire ioc->raid_device_lock
1294  *
1295  * This searches for raid_device based on handle, then return raid_device
1296  * object.
1297  */
1298 struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1299 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1300 {
1301 	struct _raid_device *raid_device, *r;
1302 
1303 	r = NULL;
1304 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1305 		if (raid_device->handle != handle)
1306 			continue;
1307 		r = raid_device;
1308 		goto out;
1309 	}
1310 
1311  out:
1312 	return r;
1313 }
1314 
1315 /**
1316  * _scsih_raid_device_find_by_wwid - raid device search
1317  * @ioc: per adapter object
1318  * @wwid: ?
1319  * Context: Calling function should acquire ioc->raid_device_lock
1320  *
1321  * This searches for raid_device based on wwid, then return raid_device
1322  * object.
1323  */
1324 static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1325 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1326 {
1327 	struct _raid_device *raid_device, *r;
1328 
1329 	r = NULL;
1330 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1331 		if (raid_device->wwid != wwid)
1332 			continue;
1333 		r = raid_device;
1334 		goto out;
1335 	}
1336 
1337  out:
1338 	return r;
1339 }
1340 
1341 /**
1342  * _scsih_raid_device_add - add raid_device object
1343  * @ioc: per adapter object
1344  * @raid_device: raid_device object
1345  *
1346  * This is added to the raid_device_list link list.
1347  */
1348 static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1349 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1350 	struct _raid_device *raid_device)
1351 {
1352 	unsigned long flags;
1353 
1354 	dewtprintk(ioc,
1355 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1356 			    __func__,
1357 			    raid_device->handle, (u64)raid_device->wwid));
1358 
1359 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1360 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1361 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1362 }
1363 
1364 /**
1365  * _scsih_raid_device_remove - delete raid_device object
1366  * @ioc: per adapter object
1367  * @raid_device: raid_device object
1368  *
1369  */
1370 static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1371 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1372 	struct _raid_device *raid_device)
1373 {
1374 	unsigned long flags;
1375 
1376 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1377 	list_del(&raid_device->list);
1378 	kfree(raid_device);
1379 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1380 }
1381 
1382 /**
1383  * mpt3sas_scsih_expander_find_by_handle - expander device search
1384  * @ioc: per adapter object
1385  * @handle: expander handle (assigned by firmware)
1386  * Context: Calling function should acquire ioc->sas_device_lock
1387  *
1388  * This searches for expander device based on handle, then returns the
1389  * sas_node object.
1390  */
1391 struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1392 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1393 {
1394 	struct _sas_node *sas_expander, *r;
1395 
1396 	r = NULL;
1397 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1398 		if (sas_expander->handle != handle)
1399 			continue;
1400 		r = sas_expander;
1401 		goto out;
1402 	}
1403  out:
1404 	return r;
1405 }
1406 
1407 /**
1408  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1409  * @ioc: per adapter object
1410  * @handle: enclosure handle (assigned by firmware)
1411  * Context: Calling function should acquire ioc->sas_device_lock
1412  *
1413  * This searches for enclosure device based on handle, then returns the
1414  * enclosure object.
1415  */
1416 static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1417 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1418 {
1419 	struct _enclosure_node *enclosure_dev, *r;
1420 
1421 	r = NULL;
1422 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1423 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1424 			continue;
1425 		r = enclosure_dev;
1426 		goto out;
1427 	}
1428 out:
1429 	return r;
1430 }
1431 /**
1432  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1433  * @ioc: per adapter object
1434  * @sas_address: sas address
1435  * Context: Calling function should acquire ioc->sas_node_lock.
1436  *
1437  * This searches for expander device based on sas_address, then returns the
1438  * sas_node object.
1439  */
1440 struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)1441 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1442 	u64 sas_address)
1443 {
1444 	struct _sas_node *sas_expander, *r;
1445 
1446 	r = NULL;
1447 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1448 		if (sas_expander->sas_address != sas_address)
1449 			continue;
1450 		r = sas_expander;
1451 		goto out;
1452 	}
1453  out:
1454 	return r;
1455 }
1456 
1457 /**
1458  * _scsih_expander_node_add - insert expander device to the list.
1459  * @ioc: per adapter object
1460  * @sas_expander: the sas_device object
1461  * Context: This function will acquire ioc->sas_node_lock.
1462  *
1463  * Adding new object to the ioc->sas_expander_list.
1464  */
1465 static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1466 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1467 	struct _sas_node *sas_expander)
1468 {
1469 	unsigned long flags;
1470 
1471 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1472 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1473 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1474 }
1475 
1476 /**
1477  * _scsih_is_end_device - determines if device is an end device
1478  * @device_info: bitfield providing information about the device.
1479  * Context: none
1480  *
1481  * Return: 1 if end device.
1482  */
1483 static int
_scsih_is_end_device(u32 device_info)1484 _scsih_is_end_device(u32 device_info)
1485 {
1486 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1487 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1488 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1489 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1490 		return 1;
1491 	else
1492 		return 0;
1493 }
1494 
1495 /**
1496  * _scsih_is_nvme_pciescsi_device - determines if
1497  *			device is an pcie nvme/scsi device
1498  * @device_info: bitfield providing information about the device.
1499  * Context: none
1500  *
1501  * Returns 1 if device is pcie device type nvme/scsi.
1502  */
1503 static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1504 _scsih_is_nvme_pciescsi_device(u32 device_info)
1505 {
1506 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1507 	    == MPI26_PCIE_DEVINFO_NVME) ||
1508 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1509 	    == MPI26_PCIE_DEVINFO_SCSI))
1510 		return 1;
1511 	else
1512 		return 0;
1513 }
1514 
1515 /**
1516  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1517  * @ioc: per adapter object
1518  * @id: target id
1519  * @channel: channel
1520  * Context: This function will acquire ioc->scsi_lookup_lock.
1521  *
1522  * This will search for a matching channel:id in the scsi_lookup array,
1523  * returning 1 if found.
1524  */
1525 static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1526 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1527 	int channel)
1528 {
1529 	int smid;
1530 	struct scsi_cmnd *scmd;
1531 
1532 	for (smid = 1;
1533 	     smid <= ioc->shost->can_queue; smid++) {
1534 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1535 		if (!scmd)
1536 			continue;
1537 		if (scmd->device->id == id &&
1538 		    scmd->device->channel == channel)
1539 			return 1;
1540 	}
1541 	return 0;
1542 }
1543 
1544 /**
1545  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1546  * @ioc: per adapter object
1547  * @id: target id
1548  * @lun: lun number
1549  * @channel: channel
1550  * Context: This function will acquire ioc->scsi_lookup_lock.
1551  *
1552  * This will search for a matching channel:id:lun in the scsi_lookup array,
1553  * returning 1 if found.
1554  */
1555 static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1556 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1557 	unsigned int lun, int channel)
1558 {
1559 	int smid;
1560 	struct scsi_cmnd *scmd;
1561 
1562 	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1563 
1564 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1565 		if (!scmd)
1566 			continue;
1567 		if (scmd->device->id == id &&
1568 		    scmd->device->channel == channel &&
1569 		    scmd->device->lun == lun)
1570 			return 1;
1571 	}
1572 	return 0;
1573 }
1574 
1575 /**
1576  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1577  * @ioc: per adapter object
1578  * @smid: system request message index
1579  *
1580  * Return: the smid stored scmd pointer.
1581  * Then will dereference the stored scmd pointer.
1582  */
1583 struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1584 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1585 {
1586 	struct scsi_cmnd *scmd = NULL;
1587 	struct scsiio_tracker *st;
1588 	Mpi25SCSIIORequest_t *mpi_request;
1589 
1590 	if (smid > 0  &&
1591 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1592 		u32 unique_tag = smid - 1;
1593 
1594 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1595 
1596 		/*
1597 		 * If SCSI IO request is outstanding at driver level then
1598 		 * DevHandle filed must be non-zero. If DevHandle is zero
1599 		 * then it means that this smid is free at driver level,
1600 		 * so return NULL.
1601 		 */
1602 		if (!mpi_request->DevHandle)
1603 			return scmd;
1604 
1605 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1606 		if (scmd) {
1607 			st = scsi_cmd_priv(scmd);
1608 			if (st->cb_idx == 0xFF || st->smid == 0)
1609 				scmd = NULL;
1610 		}
1611 	}
1612 	return scmd;
1613 }
1614 
1615 /**
1616  * scsih_change_queue_depth - setting device queue depth
1617  * @sdev: scsi device struct
1618  * @qdepth: requested queue depth
1619  *
1620  * Return: queue depth.
1621  */
1622 static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1623 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1624 {
1625 	struct Scsi_Host *shost = sdev->host;
1626 	int max_depth;
1627 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1628 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1629 	struct MPT3SAS_TARGET *sas_target_priv_data;
1630 	struct _sas_device *sas_device;
1631 	unsigned long flags;
1632 
1633 	max_depth = shost->can_queue;
1634 
1635 	/*
1636 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1637 	 * is disabled.
1638 	 */
1639 	if (ioc->enable_sdev_max_qd)
1640 		goto not_sata;
1641 
1642 	sas_device_priv_data = sdev->hostdata;
1643 	if (!sas_device_priv_data)
1644 		goto not_sata;
1645 	sas_target_priv_data = sas_device_priv_data->sas_target;
1646 	if (!sas_target_priv_data)
1647 		goto not_sata;
1648 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1649 		goto not_sata;
1650 
1651 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1652 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1653 	if (sas_device) {
1654 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1655 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1656 
1657 		sas_device_put(sas_device);
1658 	}
1659 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1660 
1661  not_sata:
1662 
1663 	if (!sdev->tagged_supported)
1664 		max_depth = 1;
1665 	if (qdepth > max_depth)
1666 		qdepth = max_depth;
1667 	scsi_change_queue_depth(sdev, qdepth);
1668 	sdev_printk(KERN_INFO, sdev,
1669 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1670 	    sdev->queue_depth, sdev->tagged_supported,
1671 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1672 	return sdev->queue_depth;
1673 }
1674 
1675 /**
1676  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1677  * @sdev: scsi device struct
1678  * @qdepth: requested queue depth
1679  *
1680  * Returns nothing.
1681  */
1682 void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1683 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1684 {
1685 	struct Scsi_Host *shost = sdev->host;
1686 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1687 
1688 	if (ioc->enable_sdev_max_qd)
1689 		qdepth = shost->can_queue;
1690 
1691 	scsih_change_queue_depth(sdev, qdepth);
1692 }
1693 
1694 /**
1695  * scsih_target_alloc - target add routine
1696  * @starget: scsi target struct
1697  *
1698  * Return: 0 if ok. Any other return is assumed to be an error and
1699  * the device is ignored.
1700  */
1701 static int
scsih_target_alloc(struct scsi_target * starget)1702 scsih_target_alloc(struct scsi_target *starget)
1703 {
1704 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1705 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1706 	struct MPT3SAS_TARGET *sas_target_priv_data;
1707 	struct _sas_device *sas_device;
1708 	struct _raid_device *raid_device;
1709 	struct _pcie_device *pcie_device;
1710 	unsigned long flags;
1711 	struct sas_rphy *rphy;
1712 
1713 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1714 				       GFP_KERNEL);
1715 	if (!sas_target_priv_data)
1716 		return -ENOMEM;
1717 
1718 	starget->hostdata = sas_target_priv_data;
1719 	sas_target_priv_data->starget = starget;
1720 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1721 
1722 	/* RAID volumes */
1723 	if (starget->channel == RAID_CHANNEL) {
1724 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1725 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1726 		    starget->channel);
1727 		if (raid_device) {
1728 			sas_target_priv_data->handle = raid_device->handle;
1729 			sas_target_priv_data->sas_address = raid_device->wwid;
1730 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1731 			if (ioc->is_warpdrive)
1732 				sas_target_priv_data->raid_device = raid_device;
1733 			raid_device->starget = starget;
1734 		}
1735 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1736 		return 0;
1737 	}
1738 
1739 	/* PCIe devices */
1740 	if (starget->channel == PCIE_CHANNEL) {
1741 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1742 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1743 			starget->channel);
1744 		if (pcie_device) {
1745 			sas_target_priv_data->handle = pcie_device->handle;
1746 			sas_target_priv_data->sas_address = pcie_device->wwid;
1747 			sas_target_priv_data->pcie_dev = pcie_device;
1748 			pcie_device->starget = starget;
1749 			pcie_device->id = starget->id;
1750 			pcie_device->channel = starget->channel;
1751 			sas_target_priv_data->flags |=
1752 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1753 			if (pcie_device->fast_path)
1754 				sas_target_priv_data->flags |=
1755 					MPT_TARGET_FASTPATH_IO;
1756 		}
1757 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1758 		return 0;
1759 	}
1760 
1761 	/* sas/sata devices */
1762 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1763 	rphy = dev_to_rphy(starget->dev.parent);
1764 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1765 	   rphy->identify.sas_address);
1766 
1767 	if (sas_device) {
1768 		sas_target_priv_data->handle = sas_device->handle;
1769 		sas_target_priv_data->sas_address = sas_device->sas_address;
1770 		sas_target_priv_data->sas_dev = sas_device;
1771 		sas_device->starget = starget;
1772 		sas_device->id = starget->id;
1773 		sas_device->channel = starget->channel;
1774 		if (test_bit(sas_device->handle, ioc->pd_handles))
1775 			sas_target_priv_data->flags |=
1776 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1777 		if (sas_device->fast_path)
1778 			sas_target_priv_data->flags |=
1779 					MPT_TARGET_FASTPATH_IO;
1780 	}
1781 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1782 
1783 	return 0;
1784 }
1785 
1786 /**
1787  * scsih_target_destroy - target destroy routine
1788  * @starget: scsi target struct
1789  */
1790 static void
scsih_target_destroy(struct scsi_target * starget)1791 scsih_target_destroy(struct scsi_target *starget)
1792 {
1793 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1794 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 	struct MPT3SAS_TARGET *sas_target_priv_data;
1796 	struct _sas_device *sas_device;
1797 	struct _raid_device *raid_device;
1798 	struct _pcie_device *pcie_device;
1799 	unsigned long flags;
1800 
1801 	sas_target_priv_data = starget->hostdata;
1802 	if (!sas_target_priv_data)
1803 		return;
1804 
1805 	if (starget->channel == RAID_CHANNEL) {
1806 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1807 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1808 		    starget->channel);
1809 		if (raid_device) {
1810 			raid_device->starget = NULL;
1811 			raid_device->sdev = NULL;
1812 		}
1813 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1814 		goto out;
1815 	}
1816 
1817 	if (starget->channel == PCIE_CHANNEL) {
1818 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1819 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1820 							sas_target_priv_data);
1821 		if (pcie_device && (pcie_device->starget == starget) &&
1822 			(pcie_device->id == starget->id) &&
1823 			(pcie_device->channel == starget->channel))
1824 			pcie_device->starget = NULL;
1825 
1826 		if (pcie_device) {
1827 			/*
1828 			 * Corresponding get() is in _scsih_target_alloc()
1829 			 */
1830 			sas_target_priv_data->pcie_dev = NULL;
1831 			pcie_device_put(pcie_device);
1832 			pcie_device_put(pcie_device);
1833 		}
1834 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1835 		goto out;
1836 	}
1837 
1838 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1839 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1840 	if (sas_device && (sas_device->starget == starget) &&
1841 	    (sas_device->id == starget->id) &&
1842 	    (sas_device->channel == starget->channel))
1843 		sas_device->starget = NULL;
1844 
1845 	if (sas_device) {
1846 		/*
1847 		 * Corresponding get() is in _scsih_target_alloc()
1848 		 */
1849 		sas_target_priv_data->sas_dev = NULL;
1850 		sas_device_put(sas_device);
1851 
1852 		sas_device_put(sas_device);
1853 	}
1854 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1855 
1856  out:
1857 	kfree(sas_target_priv_data);
1858 	starget->hostdata = NULL;
1859 }
1860 
1861 /**
1862  * scsih_slave_alloc - device add routine
1863  * @sdev: scsi device struct
1864  *
1865  * Return: 0 if ok. Any other return is assumed to be an error and
1866  * the device is ignored.
1867  */
1868 static int
scsih_slave_alloc(struct scsi_device * sdev)1869 scsih_slave_alloc(struct scsi_device *sdev)
1870 {
1871 	struct Scsi_Host *shost;
1872 	struct MPT3SAS_ADAPTER *ioc;
1873 	struct MPT3SAS_TARGET *sas_target_priv_data;
1874 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1875 	struct scsi_target *starget;
1876 	struct _raid_device *raid_device;
1877 	struct _sas_device *sas_device;
1878 	struct _pcie_device *pcie_device;
1879 	unsigned long flags;
1880 
1881 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1882 				       GFP_KERNEL);
1883 	if (!sas_device_priv_data)
1884 		return -ENOMEM;
1885 
1886 	sas_device_priv_data->lun = sdev->lun;
1887 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1888 
1889 	starget = scsi_target(sdev);
1890 	sas_target_priv_data = starget->hostdata;
1891 	sas_target_priv_data->num_luns++;
1892 	sas_device_priv_data->sas_target = sas_target_priv_data;
1893 	sdev->hostdata = sas_device_priv_data;
1894 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1895 		sdev->no_uld_attach = 1;
1896 
1897 	shost = dev_to_shost(&starget->dev);
1898 	ioc = shost_priv(shost);
1899 	if (starget->channel == RAID_CHANNEL) {
1900 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1901 		raid_device = _scsih_raid_device_find_by_id(ioc,
1902 		    starget->id, starget->channel);
1903 		if (raid_device)
1904 			raid_device->sdev = sdev; /* raid is single lun */
1905 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1906 	}
1907 	if (starget->channel == PCIE_CHANNEL) {
1908 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1910 				sas_target_priv_data->sas_address);
1911 		if (pcie_device && (pcie_device->starget == NULL)) {
1912 			sdev_printk(KERN_INFO, sdev,
1913 			    "%s : pcie_device->starget set to starget @ %d\n",
1914 			    __func__, __LINE__);
1915 			pcie_device->starget = starget;
1916 		}
1917 
1918 		if (pcie_device)
1919 			pcie_device_put(pcie_device);
1920 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1921 
1922 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1923 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1924 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1925 					sas_target_priv_data->sas_address);
1926 		if (sas_device && (sas_device->starget == NULL)) {
1927 			sdev_printk(KERN_INFO, sdev,
1928 			"%s : sas_device->starget set to starget @ %d\n",
1929 			     __func__, __LINE__);
1930 			sas_device->starget = starget;
1931 		}
1932 
1933 		if (sas_device)
1934 			sas_device_put(sas_device);
1935 
1936 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1937 	}
1938 
1939 	return 0;
1940 }
1941 
1942 /**
1943  * scsih_slave_destroy - device destroy routine
1944  * @sdev: scsi device struct
1945  */
1946 static void
scsih_slave_destroy(struct scsi_device * sdev)1947 scsih_slave_destroy(struct scsi_device *sdev)
1948 {
1949 	struct MPT3SAS_TARGET *sas_target_priv_data;
1950 	struct scsi_target *starget;
1951 	struct Scsi_Host *shost;
1952 	struct MPT3SAS_ADAPTER *ioc;
1953 	struct _sas_device *sas_device;
1954 	struct _pcie_device *pcie_device;
1955 	unsigned long flags;
1956 
1957 	if (!sdev->hostdata)
1958 		return;
1959 
1960 	starget = scsi_target(sdev);
1961 	sas_target_priv_data = starget->hostdata;
1962 	sas_target_priv_data->num_luns--;
1963 
1964 	shost = dev_to_shost(&starget->dev);
1965 	ioc = shost_priv(shost);
1966 
1967 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1968 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1969 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1970 				sas_target_priv_data);
1971 		if (pcie_device && !sas_target_priv_data->num_luns)
1972 			pcie_device->starget = NULL;
1973 
1974 		if (pcie_device)
1975 			pcie_device_put(pcie_device);
1976 
1977 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1978 
1979 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1980 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1981 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1982 				sas_target_priv_data);
1983 		if (sas_device && !sas_target_priv_data->num_luns)
1984 			sas_device->starget = NULL;
1985 
1986 		if (sas_device)
1987 			sas_device_put(sas_device);
1988 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1989 	}
1990 
1991 	kfree(sdev->hostdata);
1992 	sdev->hostdata = NULL;
1993 }
1994 
1995 /**
1996  * _scsih_display_sata_capabilities - sata capabilities
1997  * @ioc: per adapter object
1998  * @handle: device handle
1999  * @sdev: scsi device struct
2000  */
2001 static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2002 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2003 	u16 handle, struct scsi_device *sdev)
2004 {
2005 	Mpi2ConfigReply_t mpi_reply;
2006 	Mpi2SasDevicePage0_t sas_device_pg0;
2007 	u32 ioc_status;
2008 	u16 flags;
2009 	u32 device_info;
2010 
2011 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2012 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2013 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2014 			__FILE__, __LINE__, __func__);
2015 		return;
2016 	}
2017 
2018 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2019 	    MPI2_IOCSTATUS_MASK;
2020 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2021 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2022 			__FILE__, __LINE__, __func__);
2023 		return;
2024 	}
2025 
2026 	flags = le16_to_cpu(sas_device_pg0.Flags);
2027 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2028 
2029 	sdev_printk(KERN_INFO, sdev,
2030 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2031 	    "sw_preserve(%s)\n",
2032 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2033 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2034 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2035 	    "n",
2036 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2037 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2038 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2039 }
2040 
2041 /*
2042  * raid transport support -
2043  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2044  * unloading the driver followed by a load - I believe that the subroutine
2045  * raid_class_release() is not cleaning up properly.
2046  */
2047 
2048 /**
2049  * scsih_is_raid - return boolean indicating device is raid volume
2050  * @dev: the device struct object
2051  */
2052 static int
scsih_is_raid(struct device * dev)2053 scsih_is_raid(struct device *dev)
2054 {
2055 	struct scsi_device *sdev = to_scsi_device(dev);
2056 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2057 
2058 	if (ioc->is_warpdrive)
2059 		return 0;
2060 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2061 }
2062 
2063 static int
scsih_is_nvme(struct device * dev)2064 scsih_is_nvme(struct device *dev)
2065 {
2066 	struct scsi_device *sdev = to_scsi_device(dev);
2067 
2068 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2069 }
2070 
2071 /**
2072  * scsih_get_resync - get raid volume resync percent complete
2073  * @dev: the device struct object
2074  */
2075 static void
scsih_get_resync(struct device * dev)2076 scsih_get_resync(struct device *dev)
2077 {
2078 	struct scsi_device *sdev = to_scsi_device(dev);
2079 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2080 	static struct _raid_device *raid_device;
2081 	unsigned long flags;
2082 	Mpi2RaidVolPage0_t vol_pg0;
2083 	Mpi2ConfigReply_t mpi_reply;
2084 	u32 volume_status_flags;
2085 	u8 percent_complete;
2086 	u16 handle;
2087 
2088 	percent_complete = 0;
2089 	handle = 0;
2090 	if (ioc->is_warpdrive)
2091 		goto out;
2092 
2093 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2094 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2095 	    sdev->channel);
2096 	if (raid_device) {
2097 		handle = raid_device->handle;
2098 		percent_complete = raid_device->percent_complete;
2099 	}
2100 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2101 
2102 	if (!handle)
2103 		goto out;
2104 
2105 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2106 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2107 	     sizeof(Mpi2RaidVolPage0_t))) {
2108 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2109 			__FILE__, __LINE__, __func__);
2110 		percent_complete = 0;
2111 		goto out;
2112 	}
2113 
2114 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2115 	if (!(volume_status_flags &
2116 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2117 		percent_complete = 0;
2118 
2119  out:
2120 
2121 	switch (ioc->hba_mpi_version_belonged) {
2122 	case MPI2_VERSION:
2123 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2124 		break;
2125 	case MPI25_VERSION:
2126 	case MPI26_VERSION:
2127 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2128 		break;
2129 	}
2130 }
2131 
2132 /**
2133  * scsih_get_state - get raid volume level
2134  * @dev: the device struct object
2135  */
2136 static void
scsih_get_state(struct device * dev)2137 scsih_get_state(struct device *dev)
2138 {
2139 	struct scsi_device *sdev = to_scsi_device(dev);
2140 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2141 	static struct _raid_device *raid_device;
2142 	unsigned long flags;
2143 	Mpi2RaidVolPage0_t vol_pg0;
2144 	Mpi2ConfigReply_t mpi_reply;
2145 	u32 volstate;
2146 	enum raid_state state = RAID_STATE_UNKNOWN;
2147 	u16 handle = 0;
2148 
2149 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2150 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2151 	    sdev->channel);
2152 	if (raid_device)
2153 		handle = raid_device->handle;
2154 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2155 
2156 	if (!raid_device)
2157 		goto out;
2158 
2159 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2160 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2161 	     sizeof(Mpi2RaidVolPage0_t))) {
2162 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2163 			__FILE__, __LINE__, __func__);
2164 		goto out;
2165 	}
2166 
2167 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2168 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2169 		state = RAID_STATE_RESYNCING;
2170 		goto out;
2171 	}
2172 
2173 	switch (vol_pg0.VolumeState) {
2174 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2175 	case MPI2_RAID_VOL_STATE_ONLINE:
2176 		state = RAID_STATE_ACTIVE;
2177 		break;
2178 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2179 		state = RAID_STATE_DEGRADED;
2180 		break;
2181 	case MPI2_RAID_VOL_STATE_FAILED:
2182 	case MPI2_RAID_VOL_STATE_MISSING:
2183 		state = RAID_STATE_OFFLINE;
2184 		break;
2185 	}
2186  out:
2187 	switch (ioc->hba_mpi_version_belonged) {
2188 	case MPI2_VERSION:
2189 		raid_set_state(mpt2sas_raid_template, dev, state);
2190 		break;
2191 	case MPI25_VERSION:
2192 	case MPI26_VERSION:
2193 		raid_set_state(mpt3sas_raid_template, dev, state);
2194 		break;
2195 	}
2196 }
2197 
2198 /**
2199  * _scsih_set_level - set raid level
2200  * @ioc: ?
2201  * @sdev: scsi device struct
2202  * @volume_type: volume type
2203  */
2204 static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2205 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2206 	struct scsi_device *sdev, u8 volume_type)
2207 {
2208 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2209 
2210 	switch (volume_type) {
2211 	case MPI2_RAID_VOL_TYPE_RAID0:
2212 		level = RAID_LEVEL_0;
2213 		break;
2214 	case MPI2_RAID_VOL_TYPE_RAID10:
2215 		level = RAID_LEVEL_10;
2216 		break;
2217 	case MPI2_RAID_VOL_TYPE_RAID1E:
2218 		level = RAID_LEVEL_1E;
2219 		break;
2220 	case MPI2_RAID_VOL_TYPE_RAID1:
2221 		level = RAID_LEVEL_1;
2222 		break;
2223 	}
2224 
2225 	switch (ioc->hba_mpi_version_belonged) {
2226 	case MPI2_VERSION:
2227 		raid_set_level(mpt2sas_raid_template,
2228 			&sdev->sdev_gendev, level);
2229 		break;
2230 	case MPI25_VERSION:
2231 	case MPI26_VERSION:
2232 		raid_set_level(mpt3sas_raid_template,
2233 			&sdev->sdev_gendev, level);
2234 		break;
2235 	}
2236 }
2237 
2238 
2239 /**
2240  * _scsih_get_volume_capabilities - volume capabilities
2241  * @ioc: per adapter object
2242  * @raid_device: the raid_device object
2243  *
2244  * Return: 0 for success, else 1
2245  */
2246 static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2247 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2248 	struct _raid_device *raid_device)
2249 {
2250 	Mpi2RaidVolPage0_t *vol_pg0;
2251 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2252 	Mpi2SasDevicePage0_t sas_device_pg0;
2253 	Mpi2ConfigReply_t mpi_reply;
2254 	u16 sz;
2255 	u8 num_pds;
2256 
2257 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2258 	    &num_pds)) || !num_pds) {
2259 		dfailprintk(ioc,
2260 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2261 				     __FILE__, __LINE__, __func__));
2262 		return 1;
2263 	}
2264 
2265 	raid_device->num_pds = num_pds;
2266 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2267 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2268 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2269 	if (!vol_pg0) {
2270 		dfailprintk(ioc,
2271 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2272 				     __FILE__, __LINE__, __func__));
2273 		return 1;
2274 	}
2275 
2276 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2277 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2278 		dfailprintk(ioc,
2279 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2280 				     __FILE__, __LINE__, __func__));
2281 		kfree(vol_pg0);
2282 		return 1;
2283 	}
2284 
2285 	raid_device->volume_type = vol_pg0->VolumeType;
2286 
2287 	/* figure out what the underlying devices are by
2288 	 * obtaining the device_info bits for the 1st device
2289 	 */
2290 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2291 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2292 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2293 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2294 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2295 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2296 			raid_device->device_info =
2297 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2298 		}
2299 	}
2300 
2301 	kfree(vol_pg0);
2302 	return 0;
2303 }
2304 
2305 /**
2306  * _scsih_enable_tlr - setting TLR flags
2307  * @ioc: per adapter object
2308  * @sdev: scsi device struct
2309  *
2310  * Enabling Transaction Layer Retries for tape devices when
2311  * vpd page 0x90 is present
2312  *
2313  */
2314 static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2315 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2316 {
2317 
2318 	/* only for TAPE */
2319 	if (sdev->type != TYPE_TAPE)
2320 		return;
2321 
2322 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2323 		return;
2324 
2325 	sas_enable_tlr(sdev);
2326 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2327 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2328 	return;
2329 
2330 }
2331 
2332 /**
2333  * scsih_slave_configure - device configure routine.
2334  * @sdev: scsi device struct
2335  *
2336  * Return: 0 if ok. Any other return is assumed to be an error and
2337  * the device is ignored.
2338  */
2339 static int
scsih_slave_configure(struct scsi_device * sdev)2340 scsih_slave_configure(struct scsi_device *sdev)
2341 {
2342 	struct Scsi_Host *shost = sdev->host;
2343 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2344 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2345 	struct MPT3SAS_TARGET *sas_target_priv_data;
2346 	struct _sas_device *sas_device;
2347 	struct _pcie_device *pcie_device;
2348 	struct _raid_device *raid_device;
2349 	unsigned long flags;
2350 	int qdepth;
2351 	u8 ssp_target = 0;
2352 	char *ds = "";
2353 	char *r_level = "";
2354 	u16 handle, volume_handle = 0;
2355 	u64 volume_wwid = 0;
2356 
2357 	qdepth = 1;
2358 	sas_device_priv_data = sdev->hostdata;
2359 	sas_device_priv_data->configured_lun = 1;
2360 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2361 	sas_target_priv_data = sas_device_priv_data->sas_target;
2362 	handle = sas_target_priv_data->handle;
2363 
2364 	/* raid volume handling */
2365 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2366 
2367 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2368 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2369 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2370 		if (!raid_device) {
2371 			dfailprintk(ioc,
2372 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2373 					     __FILE__, __LINE__, __func__));
2374 			return 1;
2375 		}
2376 
2377 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2378 			dfailprintk(ioc,
2379 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2380 					     __FILE__, __LINE__, __func__));
2381 			return 1;
2382 		}
2383 
2384 		/*
2385 		 * WARPDRIVE: Initialize the required data for Direct IO
2386 		 */
2387 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2388 
2389 		/* RAID Queue Depth Support
2390 		 * IS volume = underlying qdepth of drive type, either
2391 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2392 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2393 		 */
2394 		if (raid_device->device_info &
2395 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2396 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2397 			ds = "SSP";
2398 		} else {
2399 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2400 			if (raid_device->device_info &
2401 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2402 				ds = "SATA";
2403 			else
2404 				ds = "STP";
2405 		}
2406 
2407 		switch (raid_device->volume_type) {
2408 		case MPI2_RAID_VOL_TYPE_RAID0:
2409 			r_level = "RAID0";
2410 			break;
2411 		case MPI2_RAID_VOL_TYPE_RAID1E:
2412 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2413 			if (ioc->manu_pg10.OEMIdentifier &&
2414 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2415 			    MFG10_GF0_R10_DISPLAY) &&
2416 			    !(raid_device->num_pds % 2))
2417 				r_level = "RAID10";
2418 			else
2419 				r_level = "RAID1E";
2420 			break;
2421 		case MPI2_RAID_VOL_TYPE_RAID1:
2422 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2423 			r_level = "RAID1";
2424 			break;
2425 		case MPI2_RAID_VOL_TYPE_RAID10:
2426 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2427 			r_level = "RAID10";
2428 			break;
2429 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2430 		default:
2431 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2432 			r_level = "RAIDX";
2433 			break;
2434 		}
2435 
2436 		if (!ioc->hide_ir_msg)
2437 			sdev_printk(KERN_INFO, sdev,
2438 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2439 			    " pd_count(%d), type(%s)\n",
2440 			    r_level, raid_device->handle,
2441 			    (unsigned long long)raid_device->wwid,
2442 			    raid_device->num_pds, ds);
2443 
2444 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2445 			blk_queue_max_hw_sectors(sdev->request_queue,
2446 						MPT3SAS_RAID_MAX_SECTORS);
2447 			sdev_printk(KERN_INFO, sdev,
2448 					"Set queue's max_sector to: %u\n",
2449 						MPT3SAS_RAID_MAX_SECTORS);
2450 		}
2451 
2452 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2453 
2454 		/* raid transport support */
2455 		if (!ioc->is_warpdrive)
2456 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2457 		return 0;
2458 	}
2459 
2460 	/* non-raid handling */
2461 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2462 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2463 		    &volume_handle)) {
2464 			dfailprintk(ioc,
2465 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2466 					     __FILE__, __LINE__, __func__));
2467 			return 1;
2468 		}
2469 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2470 		    volume_handle, &volume_wwid)) {
2471 			dfailprintk(ioc,
2472 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2473 					     __FILE__, __LINE__, __func__));
2474 			return 1;
2475 		}
2476 	}
2477 
2478 	/* PCIe handling */
2479 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2480 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2481 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2482 				sas_device_priv_data->sas_target->sas_address);
2483 		if (!pcie_device) {
2484 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2485 			dfailprintk(ioc,
2486 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2487 					     __FILE__, __LINE__, __func__));
2488 			return 1;
2489 		}
2490 
2491 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2492 		ds = "NVMe";
2493 		sdev_printk(KERN_INFO, sdev,
2494 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2495 			ds, handle, (unsigned long long)pcie_device->wwid,
2496 			pcie_device->port_num);
2497 		if (pcie_device->enclosure_handle != 0)
2498 			sdev_printk(KERN_INFO, sdev,
2499 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2500 			ds,
2501 			(unsigned long long)pcie_device->enclosure_logical_id,
2502 			pcie_device->slot);
2503 		if (pcie_device->connector_name[0] != '\0')
2504 			sdev_printk(KERN_INFO, sdev,
2505 				"%s: enclosure level(0x%04x),"
2506 				"connector name( %s)\n", ds,
2507 				pcie_device->enclosure_level,
2508 				pcie_device->connector_name);
2509 
2510 		if (pcie_device->nvme_mdts)
2511 			blk_queue_max_hw_sectors(sdev->request_queue,
2512 					pcie_device->nvme_mdts/512);
2513 
2514 		pcie_device_put(pcie_device);
2515 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2516 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2517 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2518 		 ** merged and can eliminate holes created during merging
2519 		 ** operation.
2520 		 **/
2521 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2522 				sdev->request_queue);
2523 		blk_queue_virt_boundary(sdev->request_queue,
2524 				ioc->page_size - 1);
2525 		return 0;
2526 	}
2527 
2528 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2529 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2530 	   sas_device_priv_data->sas_target->sas_address);
2531 	if (!sas_device) {
2532 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2533 		dfailprintk(ioc,
2534 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2535 				     __FILE__, __LINE__, __func__));
2536 		return 1;
2537 	}
2538 
2539 	sas_device->volume_handle = volume_handle;
2540 	sas_device->volume_wwid = volume_wwid;
2541 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2542 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2543 		ssp_target = 1;
2544 		if (sas_device->device_info &
2545 				MPI2_SAS_DEVICE_INFO_SEP) {
2546 			sdev_printk(KERN_WARNING, sdev,
2547 			"set ignore_delay_remove for handle(0x%04x)\n",
2548 			sas_device_priv_data->sas_target->handle);
2549 			sas_device_priv_data->ignore_delay_remove = 1;
2550 			ds = "SES";
2551 		} else
2552 			ds = "SSP";
2553 	} else {
2554 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2555 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2556 			ds = "STP";
2557 		else if (sas_device->device_info &
2558 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2559 			ds = "SATA";
2560 	}
2561 
2562 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2563 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2564 	    ds, handle, (unsigned long long)sas_device->sas_address,
2565 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2566 
2567 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2568 
2569 	sas_device_put(sas_device);
2570 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2571 
2572 	if (!ssp_target)
2573 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2574 
2575 
2576 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2577 
2578 	if (ssp_target) {
2579 		sas_read_port_mode_page(sdev);
2580 		_scsih_enable_tlr(ioc, sdev);
2581 	}
2582 
2583 	return 0;
2584 }
2585 
2586 /**
2587  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2588  * @sdev: scsi device struct
2589  * @bdev: pointer to block device context
2590  * @capacity: device size (in 512 byte sectors)
2591  * @params: three element array to place output:
2592  *              params[0] number of heads (max 255)
2593  *              params[1] number of sectors (max 63)
2594  *              params[2] number of cylinders
2595  */
2596 static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2597 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2598 	sector_t capacity, int params[])
2599 {
2600 	int		heads;
2601 	int		sectors;
2602 	sector_t	cylinders;
2603 	ulong		dummy;
2604 
2605 	heads = 64;
2606 	sectors = 32;
2607 
2608 	dummy = heads * sectors;
2609 	cylinders = capacity;
2610 	sector_div(cylinders, dummy);
2611 
2612 	/*
2613 	 * Handle extended translation size for logical drives
2614 	 * > 1Gb
2615 	 */
2616 	if ((ulong)capacity >= 0x200000) {
2617 		heads = 255;
2618 		sectors = 63;
2619 		dummy = heads * sectors;
2620 		cylinders = capacity;
2621 		sector_div(cylinders, dummy);
2622 	}
2623 
2624 	/* return result */
2625 	params[0] = heads;
2626 	params[1] = sectors;
2627 	params[2] = cylinders;
2628 
2629 	return 0;
2630 }
2631 
2632 /**
2633  * _scsih_response_code - translation of device response code
2634  * @ioc: per adapter object
2635  * @response_code: response code returned by the device
2636  */
2637 static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2638 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2639 {
2640 	char *desc;
2641 
2642 	switch (response_code) {
2643 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2644 		desc = "task management request completed";
2645 		break;
2646 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2647 		desc = "invalid frame";
2648 		break;
2649 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2650 		desc = "task management request not supported";
2651 		break;
2652 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2653 		desc = "task management request failed";
2654 		break;
2655 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2656 		desc = "task management request succeeded";
2657 		break;
2658 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2659 		desc = "invalid lun";
2660 		break;
2661 	case 0xA:
2662 		desc = "overlapped tag attempted";
2663 		break;
2664 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2665 		desc = "task queued, however not sent to target";
2666 		break;
2667 	default:
2668 		desc = "unknown";
2669 		break;
2670 	}
2671 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2672 }
2673 
2674 /**
2675  * _scsih_tm_done - tm completion routine
2676  * @ioc: per adapter object
2677  * @smid: system request message index
2678  * @msix_index: MSIX table index supplied by the OS
2679  * @reply: reply message frame(lower 32bit addr)
2680  * Context: none.
2681  *
2682  * The callback handler when using scsih_issue_tm.
2683  *
2684  * Return: 1 meaning mf should be freed from _base_interrupt
2685  *         0 means the mf is freed from this function.
2686  */
2687 static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2688 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2689 {
2690 	MPI2DefaultReply_t *mpi_reply;
2691 
2692 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2693 		return 1;
2694 	if (ioc->tm_cmds.smid != smid)
2695 		return 1;
2696 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2697 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2698 	if (mpi_reply) {
2699 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2700 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2701 	}
2702 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2703 	complete(&ioc->tm_cmds.done);
2704 	return 1;
2705 }
2706 
2707 /**
2708  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2709  * @ioc: per adapter object
2710  * @handle: device handle
2711  *
2712  * During taskmangement request, we need to freeze the device queue.
2713  */
2714 void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2715 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2716 {
2717 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2718 	struct scsi_device *sdev;
2719 	u8 skip = 0;
2720 
2721 	shost_for_each_device(sdev, ioc->shost) {
2722 		if (skip)
2723 			continue;
2724 		sas_device_priv_data = sdev->hostdata;
2725 		if (!sas_device_priv_data)
2726 			continue;
2727 		if (sas_device_priv_data->sas_target->handle == handle) {
2728 			sas_device_priv_data->sas_target->tm_busy = 1;
2729 			skip = 1;
2730 			ioc->ignore_loginfos = 1;
2731 		}
2732 	}
2733 }
2734 
2735 /**
2736  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2737  * @ioc: per adapter object
2738  * @handle: device handle
2739  *
2740  * During taskmangement request, we need to freeze the device queue.
2741  */
2742 void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2743 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2744 {
2745 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2746 	struct scsi_device *sdev;
2747 	u8 skip = 0;
2748 
2749 	shost_for_each_device(sdev, ioc->shost) {
2750 		if (skip)
2751 			continue;
2752 		sas_device_priv_data = sdev->hostdata;
2753 		if (!sas_device_priv_data)
2754 			continue;
2755 		if (sas_device_priv_data->sas_target->handle == handle) {
2756 			sas_device_priv_data->sas_target->tm_busy = 0;
2757 			skip = 1;
2758 			ioc->ignore_loginfos = 0;
2759 		}
2760 	}
2761 }
2762 
2763 /**
2764  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2765  * @ioc - per adapter object
2766  * @channel - the channel assigned by the OS
2767  * @id: the id assigned by the OS
2768  * @lun: lun number
2769  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2770  * @smid_task: smid assigned to the task
2771  *
2772  * Look whether TM has aborted the timed out SCSI command, if
2773  * TM has aborted the IO then return SUCCESS else return FAILED.
2774  */
2775 static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2776 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2777 	uint id, uint lun, u8 type, u16 smid_task)
2778 {
2779 
2780 	if (smid_task <= ioc->shost->can_queue) {
2781 		switch (type) {
2782 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2783 			if (!(_scsih_scsi_lookup_find_by_target(ioc,
2784 			    id, channel)))
2785 				return SUCCESS;
2786 			break;
2787 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2788 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2789 			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2790 			    lun, channel)))
2791 				return SUCCESS;
2792 			break;
2793 		default:
2794 			return SUCCESS;
2795 		}
2796 	} else if (smid_task == ioc->scsih_cmds.smid) {
2797 		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2798 		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2799 			return SUCCESS;
2800 	} else if (smid_task == ioc->ctl_cmds.smid) {
2801 		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2802 		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2803 			return SUCCESS;
2804 	}
2805 
2806 	return FAILED;
2807 }
2808 
2809 /**
2810  * scsih_tm_post_processing - post processing of target & LUN reset
2811  * @ioc - per adapter object
2812  * @handle: device handle
2813  * @channel - the channel assigned by the OS
2814  * @id: the id assigned by the OS
2815  * @lun: lun number
2816  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2817  * @smid_task: smid assigned to the task
2818  *
2819  * Post processing of target & LUN reset. Due to interrupt latency
2820  * issue it possible that interrupt for aborted IO might not be
2821  * received yet. So before returning failure status, poll the
2822  * reply descriptor pools for the reply of timed out SCSI command.
2823  * Return FAILED status if reply for timed out is not received
2824  * otherwise return SUCCESS.
2825  */
2826 static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2827 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2828 	uint channel, uint id, uint lun, u8 type, u16 smid_task)
2829 {
2830 	int rc;
2831 
2832 	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2833 	if (rc == SUCCESS)
2834 		return rc;
2835 
2836 	ioc_info(ioc,
2837 	    "Poll ReplyDescriptor queues for completion of"
2838 	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
2839 	    smid_task, type, handle);
2840 
2841 	/*
2842 	 * Due to interrupt latency issues, driver may receive interrupt for
2843 	 * TM first and then for aborted SCSI IO command. So, poll all the
2844 	 * ReplyDescriptor pools before returning the FAILED status to SML.
2845 	 */
2846 	mpt3sas_base_mask_interrupts(ioc);
2847 	mpt3sas_base_sync_reply_irqs(ioc, 1);
2848 	mpt3sas_base_unmask_interrupts(ioc);
2849 
2850 	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2851 }
2852 
2853 /**
2854  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2855  * @ioc: per adapter struct
2856  * @handle: device handle
2857  * @channel: the channel assigned by the OS
2858  * @id: the id assigned by the OS
2859  * @lun: lun number
2860  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2861  * @smid_task: smid assigned to the task
2862  * @msix_task: MSIX table index supplied by the OS
2863  * @timeout: timeout in seconds
2864  * @tr_method: Target Reset Method
2865  * Context: user
2866  *
2867  * A generic API for sending task management requests to firmware.
2868  *
2869  * The callback index is set inside `ioc->tm_cb_idx`.
2870  * The caller is responsible to check for outstanding commands.
2871  *
2872  * Return: SUCCESS or FAILED.
2873  */
2874 int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)2875 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2876 	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
2877 	u8 timeout, u8 tr_method)
2878 {
2879 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2880 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2881 	Mpi25SCSIIORequest_t *request;
2882 	u16 smid = 0;
2883 	u32 ioc_state;
2884 	int rc;
2885 	u8 issue_reset = 0;
2886 
2887 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2888 
2889 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2890 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2891 		return FAILED;
2892 	}
2893 
2894 	if (ioc->shost_recovery || ioc->remove_host ||
2895 	    ioc->pci_error_recovery) {
2896 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2897 		return FAILED;
2898 	}
2899 
2900 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2901 	if (ioc_state & MPI2_DOORBELL_USED) {
2902 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2903 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2904 		return (!rc) ? SUCCESS : FAILED;
2905 	}
2906 
2907 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2908 		mpt3sas_print_fault_code(ioc, ioc_state &
2909 		    MPI2_DOORBELL_DATA_MASK);
2910 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2911 		return (!rc) ? SUCCESS : FAILED;
2912 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
2913 	    MPI2_IOC_STATE_COREDUMP) {
2914 		mpt3sas_print_coredump_info(ioc, ioc_state &
2915 		    MPI2_DOORBELL_DATA_MASK);
2916 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2917 		return (!rc) ? SUCCESS : FAILED;
2918 	}
2919 
2920 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2921 	if (!smid) {
2922 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2923 		return FAILED;
2924 	}
2925 
2926 	dtmprintk(ioc,
2927 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2928 			   handle, type, smid_task, timeout, tr_method));
2929 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2930 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2931 	ioc->tm_cmds.smid = smid;
2932 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2933 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2934 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2935 	mpi_request->DevHandle = cpu_to_le16(handle);
2936 	mpi_request->TaskType = type;
2937 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
2938 	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
2939 		mpi_request->MsgFlags = tr_method;
2940 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2941 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2942 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2943 	init_completion(&ioc->tm_cmds.done);
2944 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
2945 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2946 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2947 		mpt3sas_check_cmd_timeout(ioc,
2948 		    ioc->tm_cmds.status, mpi_request,
2949 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
2950 		if (issue_reset) {
2951 			rc = mpt3sas_base_hard_reset_handler(ioc,
2952 					FORCE_BIG_HAMMER);
2953 			rc = (!rc) ? SUCCESS : FAILED;
2954 			goto out;
2955 		}
2956 	}
2957 
2958 	/* sync IRQs in case those were busy during flush. */
2959 	mpt3sas_base_sync_reply_irqs(ioc, 0);
2960 
2961 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2962 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2963 		mpi_reply = ioc->tm_cmds.reply;
2964 		dtmprintk(ioc,
2965 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2966 				   le16_to_cpu(mpi_reply->IOCStatus),
2967 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2968 				   le32_to_cpu(mpi_reply->TerminationCount)));
2969 		if (ioc->logging_level & MPT_DEBUG_TM) {
2970 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2971 			if (mpi_reply->IOCStatus)
2972 				_debug_dump_mf(mpi_request,
2973 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2974 		}
2975 	}
2976 
2977 	switch (type) {
2978 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2979 		rc = SUCCESS;
2980 		/*
2981 		 * If DevHandle filed in smid_task's entry of request pool
2982 		 * doesn't match with device handle on which this task abort
2983 		 * TM is received then it means that TM has successfully
2984 		 * aborted the timed out command. Since smid_task's entry in
2985 		 * request pool will be memset to zero once the timed out
2986 		 * command is returned to the SML. If the command is not
2987 		 * aborted then smid_task’s entry won’t be cleared and it
2988 		 * will have same DevHandle value on which this task abort TM
2989 		 * is received and driver will return the TM status as FAILED.
2990 		 */
2991 		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
2992 		if (le16_to_cpu(request->DevHandle) != handle)
2993 			break;
2994 
2995 		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
2996 		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
2997 		    handle, timeout, tr_method, smid_task, msix_task);
2998 		rc = FAILED;
2999 		break;
3000 
3001 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3002 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3003 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3004 		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3005 		    type, smid_task);
3006 		break;
3007 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3008 		rc = SUCCESS;
3009 		break;
3010 	default:
3011 		rc = FAILED;
3012 		break;
3013 	}
3014 
3015 out:
3016 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3017 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3018 	return rc;
3019 }
3020 
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3021 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3022 		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3023 		u16 msix_task, u8 timeout, u8 tr_method)
3024 {
3025 	int ret;
3026 
3027 	mutex_lock(&ioc->tm_cmds.mutex);
3028 	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3029 			smid_task, msix_task, timeout, tr_method);
3030 	mutex_unlock(&ioc->tm_cmds.mutex);
3031 
3032 	return ret;
3033 }
3034 
3035 /**
3036  * _scsih_tm_display_info - displays info about the device
3037  * @ioc: per adapter struct
3038  * @scmd: pointer to scsi command object
3039  *
3040  * Called by task management callback handlers.
3041  */
3042 static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3043 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3044 {
3045 	struct scsi_target *starget = scmd->device->sdev_target;
3046 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3047 	struct _sas_device *sas_device = NULL;
3048 	struct _pcie_device *pcie_device = NULL;
3049 	unsigned long flags;
3050 	char *device_str = NULL;
3051 
3052 	if (!priv_target)
3053 		return;
3054 	if (ioc->hide_ir_msg)
3055 		device_str = "WarpDrive";
3056 	else
3057 		device_str = "volume";
3058 
3059 	scsi_print_command(scmd);
3060 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3061 		starget_printk(KERN_INFO, starget,
3062 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3063 			device_str, priv_target->handle,
3064 		    device_str, (unsigned long long)priv_target->sas_address);
3065 
3066 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3067 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3068 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3069 		if (pcie_device) {
3070 			starget_printk(KERN_INFO, starget,
3071 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3072 				pcie_device->handle,
3073 				(unsigned long long)pcie_device->wwid,
3074 				pcie_device->port_num);
3075 			if (pcie_device->enclosure_handle != 0)
3076 				starget_printk(KERN_INFO, starget,
3077 					"enclosure logical id(0x%016llx), slot(%d)\n",
3078 					(unsigned long long)
3079 					pcie_device->enclosure_logical_id,
3080 					pcie_device->slot);
3081 			if (pcie_device->connector_name[0] != '\0')
3082 				starget_printk(KERN_INFO, starget,
3083 					"enclosure level(0x%04x), connector name( %s)\n",
3084 					pcie_device->enclosure_level,
3085 					pcie_device->connector_name);
3086 			pcie_device_put(pcie_device);
3087 		}
3088 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3089 
3090 	} else {
3091 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3092 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3093 		if (sas_device) {
3094 			if (priv_target->flags &
3095 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3096 				starget_printk(KERN_INFO, starget,
3097 				    "volume handle(0x%04x), "
3098 				    "volume wwid(0x%016llx)\n",
3099 				    sas_device->volume_handle,
3100 				   (unsigned long long)sas_device->volume_wwid);
3101 			}
3102 			starget_printk(KERN_INFO, starget,
3103 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3104 			    sas_device->handle,
3105 			    (unsigned long long)sas_device->sas_address,
3106 			    sas_device->phy);
3107 
3108 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3109 			    NULL, starget);
3110 
3111 			sas_device_put(sas_device);
3112 		}
3113 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3114 	}
3115 }
3116 
3117 /**
3118  * scsih_abort - eh threads main abort routine
3119  * @scmd: pointer to scsi command object
3120  *
3121  * Return: SUCCESS if command aborted else FAILED
3122  */
3123 static int
scsih_abort(struct scsi_cmnd * scmd)3124 scsih_abort(struct scsi_cmnd *scmd)
3125 {
3126 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3127 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3128 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3129 	u16 handle;
3130 	int r;
3131 
3132 	u8 timeout = 30;
3133 	struct _pcie_device *pcie_device = NULL;
3134 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3135 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3136 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3137 	    (scmd->request->timeout / HZ) * 1000);
3138 	_scsih_tm_display_info(ioc, scmd);
3139 
3140 	sas_device_priv_data = scmd->device->hostdata;
3141 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3142 	    ioc->remove_host) {
3143 		sdev_printk(KERN_INFO, scmd->device,
3144 		    "device been deleted! scmd(0x%p)\n", scmd);
3145 		scmd->result = DID_NO_CONNECT << 16;
3146 		scmd->scsi_done(scmd);
3147 		r = SUCCESS;
3148 		goto out;
3149 	}
3150 
3151 	/* check for completed command */
3152 	if (st == NULL || st->cb_idx == 0xFF) {
3153 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3154 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3155 		scmd->result = DID_RESET << 16;
3156 		r = SUCCESS;
3157 		goto out;
3158 	}
3159 
3160 	/* for hidden raid components and volumes this is not supported */
3161 	if (sas_device_priv_data->sas_target->flags &
3162 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3163 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3164 		scmd->result = DID_RESET << 16;
3165 		r = FAILED;
3166 		goto out;
3167 	}
3168 
3169 	mpt3sas_halt_firmware(ioc);
3170 
3171 	handle = sas_device_priv_data->sas_target->handle;
3172 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3173 	if (pcie_device && (!ioc->tm_custom_handling) &&
3174 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3175 		timeout = ioc->nvme_abort_timeout;
3176 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3177 		scmd->device->id, scmd->device->lun,
3178 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3179 		st->smid, st->msix_io, timeout, 0);
3180 	/* Command must be cleared after abort */
3181 	if (r == SUCCESS && st->cb_idx != 0xFF)
3182 		r = FAILED;
3183  out:
3184 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3185 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3186 	if (pcie_device)
3187 		pcie_device_put(pcie_device);
3188 	return r;
3189 }
3190 
3191 /**
3192  * scsih_dev_reset - eh threads main device reset routine
3193  * @scmd: pointer to scsi command object
3194  *
3195  * Return: SUCCESS if command aborted else FAILED
3196  */
3197 static int
scsih_dev_reset(struct scsi_cmnd * scmd)3198 scsih_dev_reset(struct scsi_cmnd *scmd)
3199 {
3200 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3201 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3202 	struct _sas_device *sas_device = NULL;
3203 	struct _pcie_device *pcie_device = NULL;
3204 	u16	handle;
3205 	u8	tr_method = 0;
3206 	u8	tr_timeout = 30;
3207 	int r;
3208 
3209 	struct scsi_target *starget = scmd->device->sdev_target;
3210 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3211 
3212 	sdev_printk(KERN_INFO, scmd->device,
3213 	    "attempting device reset! scmd(0x%p)\n", scmd);
3214 	_scsih_tm_display_info(ioc, scmd);
3215 
3216 	sas_device_priv_data = scmd->device->hostdata;
3217 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3218 	    ioc->remove_host) {
3219 		sdev_printk(KERN_INFO, scmd->device,
3220 		    "device been deleted! scmd(0x%p)\n", scmd);
3221 		scmd->result = DID_NO_CONNECT << 16;
3222 		scmd->scsi_done(scmd);
3223 		r = SUCCESS;
3224 		goto out;
3225 	}
3226 
3227 	/* for hidden raid components obtain the volume_handle */
3228 	handle = 0;
3229 	if (sas_device_priv_data->sas_target->flags &
3230 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3231 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3232 				target_priv_data);
3233 		if (sas_device)
3234 			handle = sas_device->volume_handle;
3235 	} else
3236 		handle = sas_device_priv_data->sas_target->handle;
3237 
3238 	if (!handle) {
3239 		scmd->result = DID_RESET << 16;
3240 		r = FAILED;
3241 		goto out;
3242 	}
3243 
3244 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3245 
3246 	if (pcie_device && (!ioc->tm_custom_handling) &&
3247 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3248 		tr_timeout = pcie_device->reset_timeout;
3249 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3250 	} else
3251 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3252 
3253 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3254 		scmd->device->id, scmd->device->lun,
3255 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3256 		tr_timeout, tr_method);
3257 	/* Check for busy commands after reset */
3258 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
3259 		r = FAILED;
3260  out:
3261 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3262 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3263 
3264 	if (sas_device)
3265 		sas_device_put(sas_device);
3266 	if (pcie_device)
3267 		pcie_device_put(pcie_device);
3268 
3269 	return r;
3270 }
3271 
3272 /**
3273  * scsih_target_reset - eh threads main target reset routine
3274  * @scmd: pointer to scsi command object
3275  *
3276  * Return: SUCCESS if command aborted else FAILED
3277  */
3278 static int
scsih_target_reset(struct scsi_cmnd * scmd)3279 scsih_target_reset(struct scsi_cmnd *scmd)
3280 {
3281 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3282 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3283 	struct _sas_device *sas_device = NULL;
3284 	struct _pcie_device *pcie_device = NULL;
3285 	u16	handle;
3286 	u8	tr_method = 0;
3287 	u8	tr_timeout = 30;
3288 	int r;
3289 	struct scsi_target *starget = scmd->device->sdev_target;
3290 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3291 
3292 	starget_printk(KERN_INFO, starget,
3293 	    "attempting target reset! scmd(0x%p)\n", scmd);
3294 	_scsih_tm_display_info(ioc, scmd);
3295 
3296 	sas_device_priv_data = scmd->device->hostdata;
3297 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3298 	    ioc->remove_host) {
3299 		starget_printk(KERN_INFO, starget,
3300 		    "target been deleted! scmd(0x%p)\n", scmd);
3301 		scmd->result = DID_NO_CONNECT << 16;
3302 		scmd->scsi_done(scmd);
3303 		r = SUCCESS;
3304 		goto out;
3305 	}
3306 
3307 	/* for hidden raid components obtain the volume_handle */
3308 	handle = 0;
3309 	if (sas_device_priv_data->sas_target->flags &
3310 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3311 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3312 				target_priv_data);
3313 		if (sas_device)
3314 			handle = sas_device->volume_handle;
3315 	} else
3316 		handle = sas_device_priv_data->sas_target->handle;
3317 
3318 	if (!handle) {
3319 		scmd->result = DID_RESET << 16;
3320 		r = FAILED;
3321 		goto out;
3322 	}
3323 
3324 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3325 
3326 	if (pcie_device && (!ioc->tm_custom_handling) &&
3327 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3328 		tr_timeout = pcie_device->reset_timeout;
3329 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3330 	} else
3331 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3332 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3333 		scmd->device->id, 0,
3334 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3335 	    tr_timeout, tr_method);
3336 	/* Check for busy commands after reset */
3337 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3338 		r = FAILED;
3339  out:
3340 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3341 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3342 
3343 	if (sas_device)
3344 		sas_device_put(sas_device);
3345 	if (pcie_device)
3346 		pcie_device_put(pcie_device);
3347 	return r;
3348 }
3349 
3350 
3351 /**
3352  * scsih_host_reset - eh threads main host reset routine
3353  * @scmd: pointer to scsi command object
3354  *
3355  * Return: SUCCESS if command aborted else FAILED
3356  */
3357 static int
scsih_host_reset(struct scsi_cmnd * scmd)3358 scsih_host_reset(struct scsi_cmnd *scmd)
3359 {
3360 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3361 	int r, retval;
3362 
3363 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3364 	scsi_print_command(scmd);
3365 
3366 	if (ioc->is_driver_loading || ioc->remove_host) {
3367 		ioc_info(ioc, "Blocking the host reset\n");
3368 		r = FAILED;
3369 		goto out;
3370 	}
3371 
3372 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3373 	r = (retval < 0) ? FAILED : SUCCESS;
3374 out:
3375 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3376 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3377 
3378 	return r;
3379 }
3380 
3381 /**
3382  * _scsih_fw_event_add - insert and queue up fw_event
3383  * @ioc: per adapter object
3384  * @fw_event: object describing the event
3385  * Context: This function will acquire ioc->fw_event_lock.
3386  *
3387  * This adds the firmware event object into link list, then queues it up to
3388  * be processed from user context.
3389  */
3390 static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3391 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3392 {
3393 	unsigned long flags;
3394 
3395 	if (ioc->firmware_event_thread == NULL)
3396 		return;
3397 
3398 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3399 	fw_event_work_get(fw_event);
3400 	INIT_LIST_HEAD(&fw_event->list);
3401 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3402 	INIT_WORK(&fw_event->work, _firmware_event_work);
3403 	fw_event_work_get(fw_event);
3404 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3405 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3406 }
3407 
3408 /**
3409  * _scsih_fw_event_del_from_list - delete fw_event from the list
3410  * @ioc: per adapter object
3411  * @fw_event: object describing the event
3412  * Context: This function will acquire ioc->fw_event_lock.
3413  *
3414  * If the fw_event is on the fw_event_list, remove it and do a put.
3415  */
3416 static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3417 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3418 	*fw_event)
3419 {
3420 	unsigned long flags;
3421 
3422 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3423 	if (!list_empty(&fw_event->list)) {
3424 		list_del_init(&fw_event->list);
3425 		fw_event_work_put(fw_event);
3426 	}
3427 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3428 }
3429 
3430 
3431  /**
3432  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3433  * @ioc: per adapter object
3434  * @event_data: trigger event data
3435  */
3436 void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3437 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3438 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3439 {
3440 	struct fw_event_work *fw_event;
3441 	u16 sz;
3442 
3443 	if (ioc->is_driver_loading)
3444 		return;
3445 	sz = sizeof(*event_data);
3446 	fw_event = alloc_fw_event_work(sz);
3447 	if (!fw_event)
3448 		return;
3449 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3450 	fw_event->ioc = ioc;
3451 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3452 	_scsih_fw_event_add(ioc, fw_event);
3453 	fw_event_work_put(fw_event);
3454 }
3455 
3456 /**
3457  * _scsih_error_recovery_delete_devices - remove devices not responding
3458  * @ioc: per adapter object
3459  */
3460 static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3461 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3462 {
3463 	struct fw_event_work *fw_event;
3464 
3465 	if (ioc->is_driver_loading)
3466 		return;
3467 	fw_event = alloc_fw_event_work(0);
3468 	if (!fw_event)
3469 		return;
3470 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3471 	fw_event->ioc = ioc;
3472 	_scsih_fw_event_add(ioc, fw_event);
3473 	fw_event_work_put(fw_event);
3474 }
3475 
3476 /**
3477  * mpt3sas_port_enable_complete - port enable completed (fake event)
3478  * @ioc: per adapter object
3479  */
3480 void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3481 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3482 {
3483 	struct fw_event_work *fw_event;
3484 
3485 	fw_event = alloc_fw_event_work(0);
3486 	if (!fw_event)
3487 		return;
3488 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3489 	fw_event->ioc = ioc;
3490 	_scsih_fw_event_add(ioc, fw_event);
3491 	fw_event_work_put(fw_event);
3492 }
3493 
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3494 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3495 {
3496 	unsigned long flags;
3497 	struct fw_event_work *fw_event = NULL;
3498 
3499 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3500 	if (!list_empty(&ioc->fw_event_list)) {
3501 		fw_event = list_first_entry(&ioc->fw_event_list,
3502 				struct fw_event_work, list);
3503 		list_del_init(&fw_event->list);
3504 		fw_event_work_put(fw_event);
3505 	}
3506 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3507 
3508 	return fw_event;
3509 }
3510 
3511 /**
3512  * _scsih_fw_event_cleanup_queue - cleanup event queue
3513  * @ioc: per adapter object
3514  *
3515  * Walk the firmware event queue, either killing timers, or waiting
3516  * for outstanding events to complete
3517  */
3518 static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3519 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3520 {
3521 	struct fw_event_work *fw_event;
3522 
3523 	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3524 	     !ioc->firmware_event_thread || in_interrupt())
3525 		return;
3526 
3527 	ioc->fw_events_cleanup = 1;
3528 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3529 	     (fw_event = ioc->current_event)) {
3530 
3531 		/*
3532 		 * Don't call cancel_work_sync() for current_event
3533 		 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3534 		 * otherwise we may observe deadlock if current
3535 		 * hard reset issued as part of processing the current_event.
3536 		 *
3537 		 * Orginal logic of cleaning the current_event is added
3538 		 * for handling the back to back host reset issued by the user.
3539 		 * i.e. during back to back host reset, driver use to process
3540 		 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3541 		 * event back to back and this made the drives to unregister
3542 		 * the devices from SML.
3543 		 */
3544 
3545 		if (fw_event == ioc->current_event &&
3546 		    ioc->current_event->event !=
3547 		    MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3548 			ioc->current_event = NULL;
3549 			continue;
3550 		}
3551 
3552 		/*
3553 		 * Wait on the fw_event to complete. If this returns 1, then
3554 		 * the event was never executed, and we need a put for the
3555 		 * reference the work had on the fw_event.
3556 		 *
3557 		 * If it did execute, we wait for it to finish, and the put will
3558 		 * happen from _firmware_event_work()
3559 		 */
3560 		if (cancel_work_sync(&fw_event->work))
3561 			fw_event_work_put(fw_event);
3562 
3563 	}
3564 	ioc->fw_events_cleanup = 0;
3565 }
3566 
3567 /**
3568  * _scsih_internal_device_block - block the sdev device
3569  * @sdev: per device object
3570  * @sas_device_priv_data : per device driver private data
3571  *
3572  * make sure device is blocked without error, if not
3573  * print an error
3574  */
3575 static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3576 _scsih_internal_device_block(struct scsi_device *sdev,
3577 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3578 {
3579 	int r = 0;
3580 
3581 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3582 	    sas_device_priv_data->sas_target->handle);
3583 	sas_device_priv_data->block = 1;
3584 
3585 	r = scsi_internal_device_block_nowait(sdev);
3586 	if (r == -EINVAL)
3587 		sdev_printk(KERN_WARNING, sdev,
3588 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3589 		    r, sas_device_priv_data->sas_target->handle);
3590 }
3591 
3592 /**
3593  * _scsih_internal_device_unblock - unblock the sdev device
3594  * @sdev: per device object
3595  * @sas_device_priv_data : per device driver private data
3596  * make sure device is unblocked without error, if not retry
3597  * by blocking and then unblocking
3598  */
3599 
3600 static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3601 _scsih_internal_device_unblock(struct scsi_device *sdev,
3602 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3603 {
3604 	int r = 0;
3605 
3606 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3607 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3608 	sas_device_priv_data->block = 0;
3609 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3610 	if (r == -EINVAL) {
3611 		/* The device has been set to SDEV_RUNNING by SD layer during
3612 		 * device addition but the request queue is still stopped by
3613 		 * our earlier block call. We need to perform a block again
3614 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3615 
3616 		sdev_printk(KERN_WARNING, sdev,
3617 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3618 		    "performing a block followed by an unblock\n",
3619 		    r, sas_device_priv_data->sas_target->handle);
3620 		sas_device_priv_data->block = 1;
3621 		r = scsi_internal_device_block_nowait(sdev);
3622 		if (r)
3623 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3624 			    "failed with return(%d) for handle(0x%04x)\n",
3625 			    r, sas_device_priv_data->sas_target->handle);
3626 
3627 		sas_device_priv_data->block = 0;
3628 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3629 		if (r)
3630 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3631 			    " failed with return(%d) for handle(0x%04x)\n",
3632 			    r, sas_device_priv_data->sas_target->handle);
3633 	}
3634 }
3635 
3636 /**
3637  * _scsih_ublock_io_all_device - unblock every device
3638  * @ioc: per adapter object
3639  *
3640  * change the device state from block to running
3641  */
3642 static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3643 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3644 {
3645 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3646 	struct scsi_device *sdev;
3647 
3648 	shost_for_each_device(sdev, ioc->shost) {
3649 		sas_device_priv_data = sdev->hostdata;
3650 		if (!sas_device_priv_data)
3651 			continue;
3652 		if (!sas_device_priv_data->block)
3653 			continue;
3654 
3655 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3656 			"device_running, handle(0x%04x)\n",
3657 		    sas_device_priv_data->sas_target->handle));
3658 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3659 	}
3660 }
3661 
3662 
3663 /**
3664  * _scsih_ublock_io_device - prepare device to be deleted
3665  * @ioc: per adapter object
3666  * @sas_address: sas address
3667  *
3668  * unblock then put device in offline state
3669  */
3670 static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)3671 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3672 {
3673 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3674 	struct scsi_device *sdev;
3675 
3676 	shost_for_each_device(sdev, ioc->shost) {
3677 		sas_device_priv_data = sdev->hostdata;
3678 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3679 			continue;
3680 		if (sas_device_priv_data->sas_target->sas_address
3681 		    != sas_address)
3682 			continue;
3683 		if (sas_device_priv_data->block)
3684 			_scsih_internal_device_unblock(sdev,
3685 				sas_device_priv_data);
3686 	}
3687 }
3688 
3689 /**
3690  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3691  * @ioc: per adapter object
3692  *
3693  * During device pull we need to appropriately set the sdev state.
3694  */
3695 static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3696 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3697 {
3698 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3699 	struct scsi_device *sdev;
3700 
3701 	shost_for_each_device(sdev, ioc->shost) {
3702 		sas_device_priv_data = sdev->hostdata;
3703 		if (!sas_device_priv_data)
3704 			continue;
3705 		if (sas_device_priv_data->block)
3706 			continue;
3707 		if (sas_device_priv_data->ignore_delay_remove) {
3708 			sdev_printk(KERN_INFO, sdev,
3709 			"%s skip device_block for SES handle(0x%04x)\n",
3710 			__func__, sas_device_priv_data->sas_target->handle);
3711 			continue;
3712 		}
3713 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3714 	}
3715 }
3716 
3717 /**
3718  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3719  * @ioc: per adapter object
3720  * @handle: device handle
3721  *
3722  * During device pull we need to appropriately set the sdev state.
3723  */
3724 static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3725 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3726 {
3727 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3728 	struct scsi_device *sdev;
3729 	struct _sas_device *sas_device;
3730 
3731 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3732 
3733 	shost_for_each_device(sdev, ioc->shost) {
3734 		sas_device_priv_data = sdev->hostdata;
3735 		if (!sas_device_priv_data)
3736 			continue;
3737 		if (sas_device_priv_data->sas_target->handle != handle)
3738 			continue;
3739 		if (sas_device_priv_data->block)
3740 			continue;
3741 		if (sas_device && sas_device->pend_sas_rphy_add)
3742 			continue;
3743 		if (sas_device_priv_data->ignore_delay_remove) {
3744 			sdev_printk(KERN_INFO, sdev,
3745 			"%s skip device_block for SES handle(0x%04x)\n",
3746 			__func__, sas_device_priv_data->sas_target->handle);
3747 			continue;
3748 		}
3749 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3750 	}
3751 
3752 	if (sas_device)
3753 		sas_device_put(sas_device);
3754 }
3755 
3756 /**
3757  * _scsih_block_io_to_children_attached_to_ex
3758  * @ioc: per adapter object
3759  * @sas_expander: the sas_device object
3760  *
3761  * This routine set sdev state to SDEV_BLOCK for all devices
3762  * attached to this expander. This function called when expander is
3763  * pulled.
3764  */
3765 static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3766 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3767 	struct _sas_node *sas_expander)
3768 {
3769 	struct _sas_port *mpt3sas_port;
3770 	struct _sas_device *sas_device;
3771 	struct _sas_node *expander_sibling;
3772 	unsigned long flags;
3773 
3774 	if (!sas_expander)
3775 		return;
3776 
3777 	list_for_each_entry(mpt3sas_port,
3778 	   &sas_expander->sas_port_list, port_list) {
3779 		if (mpt3sas_port->remote_identify.device_type ==
3780 		    SAS_END_DEVICE) {
3781 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3782 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3783 			    mpt3sas_port->remote_identify.sas_address);
3784 			if (sas_device) {
3785 				set_bit(sas_device->handle,
3786 						ioc->blocking_handles);
3787 				sas_device_put(sas_device);
3788 			}
3789 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3790 		}
3791 	}
3792 
3793 	list_for_each_entry(mpt3sas_port,
3794 	   &sas_expander->sas_port_list, port_list) {
3795 
3796 		if (mpt3sas_port->remote_identify.device_type ==
3797 		    SAS_EDGE_EXPANDER_DEVICE ||
3798 		    mpt3sas_port->remote_identify.device_type ==
3799 		    SAS_FANOUT_EXPANDER_DEVICE) {
3800 			expander_sibling =
3801 			    mpt3sas_scsih_expander_find_by_sas_address(
3802 			    ioc, mpt3sas_port->remote_identify.sas_address);
3803 			_scsih_block_io_to_children_attached_to_ex(ioc,
3804 			    expander_sibling);
3805 		}
3806 	}
3807 }
3808 
3809 /**
3810  * _scsih_block_io_to_children_attached_directly
3811  * @ioc: per adapter object
3812  * @event_data: topology change event data
3813  *
3814  * This routine set sdev state to SDEV_BLOCK for all devices
3815  * direct attached during device pull.
3816  */
3817 static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)3818 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3819 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3820 {
3821 	int i;
3822 	u16 handle;
3823 	u16 reason_code;
3824 
3825 	for (i = 0; i < event_data->NumEntries; i++) {
3826 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3827 		if (!handle)
3828 			continue;
3829 		reason_code = event_data->PHY[i].PhyStatus &
3830 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3831 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3832 			_scsih_block_io_device(ioc, handle);
3833 	}
3834 }
3835 
3836 /**
3837  * _scsih_block_io_to_pcie_children_attached_directly
3838  * @ioc: per adapter object
3839  * @event_data: topology change event data
3840  *
3841  * This routine set sdev state to SDEV_BLOCK for all devices
3842  * direct attached during device pull/reconnect.
3843  */
3844 static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)3845 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3846 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3847 {
3848 	int i;
3849 	u16 handle;
3850 	u16 reason_code;
3851 
3852 	for (i = 0; i < event_data->NumEntries; i++) {
3853 		handle =
3854 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3855 		if (!handle)
3856 			continue;
3857 		reason_code = event_data->PortEntry[i].PortStatus;
3858 		if (reason_code ==
3859 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3860 			_scsih_block_io_device(ioc, handle);
3861 	}
3862 }
3863 /**
3864  * _scsih_tm_tr_send - send task management request
3865  * @ioc: per adapter object
3866  * @handle: device handle
3867  * Context: interrupt time.
3868  *
3869  * This code is to initiate the device removal handshake protocol
3870  * with controller firmware.  This function will issue target reset
3871  * using high priority request queue.  It will send a sas iounit
3872  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3873  *
3874  * This is designed to send muliple task management request at the same
3875  * time to the fifo. If the fifo is full, we will append the request,
3876  * and process it in a future completion.
3877  */
3878 static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)3879 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3880 {
3881 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3882 	u16 smid;
3883 	struct _sas_device *sas_device = NULL;
3884 	struct _pcie_device *pcie_device = NULL;
3885 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3886 	u64 sas_address = 0;
3887 	unsigned long flags;
3888 	struct _tr_list *delayed_tr;
3889 	u32 ioc_state;
3890 	u8 tr_method = 0;
3891 
3892 	if (ioc->pci_error_recovery) {
3893 		dewtprintk(ioc,
3894 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3895 				    __func__, handle));
3896 		return;
3897 	}
3898 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3899 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3900 		dewtprintk(ioc,
3901 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3902 				    __func__, handle));
3903 		return;
3904 	}
3905 
3906 	/* if PD, then return */
3907 	if (test_bit(handle, ioc->pd_handles))
3908 		return;
3909 
3910 	clear_bit(handle, ioc->pend_os_device_add);
3911 
3912 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3913 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3914 	if (sas_device && sas_device->starget &&
3915 	    sas_device->starget->hostdata) {
3916 		sas_target_priv_data = sas_device->starget->hostdata;
3917 		sas_target_priv_data->deleted = 1;
3918 		sas_address = sas_device->sas_address;
3919 	}
3920 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3921 	if (!sas_device) {
3922 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3923 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3924 		if (pcie_device && pcie_device->starget &&
3925 			pcie_device->starget->hostdata) {
3926 			sas_target_priv_data = pcie_device->starget->hostdata;
3927 			sas_target_priv_data->deleted = 1;
3928 			sas_address = pcie_device->wwid;
3929 		}
3930 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3931 		if (pcie_device && (!ioc->tm_custom_handling) &&
3932 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
3933 		    pcie_device->device_info))))
3934 			tr_method =
3935 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3936 		else
3937 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3938 	}
3939 	if (sas_target_priv_data) {
3940 		dewtprintk(ioc,
3941 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3942 				    handle, (u64)sas_address));
3943 		if (sas_device) {
3944 			if (sas_device->enclosure_handle != 0)
3945 				dewtprintk(ioc,
3946 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3947 						    (u64)sas_device->enclosure_logical_id,
3948 						    sas_device->slot));
3949 			if (sas_device->connector_name[0] != '\0')
3950 				dewtprintk(ioc,
3951 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3952 						    sas_device->enclosure_level,
3953 						    sas_device->connector_name));
3954 		} else if (pcie_device) {
3955 			if (pcie_device->enclosure_handle != 0)
3956 				dewtprintk(ioc,
3957 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3958 						    (u64)pcie_device->enclosure_logical_id,
3959 						    pcie_device->slot));
3960 			if (pcie_device->connector_name[0] != '\0')
3961 				dewtprintk(ioc,
3962 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3963 						    pcie_device->enclosure_level,
3964 						    pcie_device->connector_name));
3965 		}
3966 		_scsih_ublock_io_device(ioc, sas_address);
3967 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3968 	}
3969 
3970 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3971 	if (!smid) {
3972 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3973 		if (!delayed_tr)
3974 			goto out;
3975 		INIT_LIST_HEAD(&delayed_tr->list);
3976 		delayed_tr->handle = handle;
3977 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3978 		dewtprintk(ioc,
3979 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3980 				    handle));
3981 		goto out;
3982 	}
3983 
3984 	dewtprintk(ioc,
3985 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3986 			    handle, smid, ioc->tm_tr_cb_idx));
3987 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3988 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3989 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3990 	mpi_request->DevHandle = cpu_to_le16(handle);
3991 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3992 	mpi_request->MsgFlags = tr_method;
3993 	set_bit(handle, ioc->device_remove_in_progress);
3994 	ioc->put_smid_hi_priority(ioc, smid, 0);
3995 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3996 
3997 out:
3998 	if (sas_device)
3999 		sas_device_put(sas_device);
4000 	if (pcie_device)
4001 		pcie_device_put(pcie_device);
4002 }
4003 
4004 /**
4005  * _scsih_tm_tr_complete -
4006  * @ioc: per adapter object
4007  * @smid: system request message index
4008  * @msix_index: MSIX table index supplied by the OS
4009  * @reply: reply message frame(lower 32bit addr)
4010  * Context: interrupt time.
4011  *
4012  * This is the target reset completion routine.
4013  * This code is part of the code to initiate the device removal
4014  * handshake protocol with controller firmware.
4015  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4016  *
4017  * Return: 1 meaning mf should be freed from _base_interrupt
4018  *         0 means the mf is freed from this function.
4019  */
4020 static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4021 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4022 	u32 reply)
4023 {
4024 	u16 handle;
4025 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4026 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4027 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4028 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4029 	u16 smid_sas_ctrl;
4030 	u32 ioc_state;
4031 	struct _sc_list *delayed_sc;
4032 
4033 	if (ioc->pci_error_recovery) {
4034 		dewtprintk(ioc,
4035 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4036 				    __func__));
4037 		return 1;
4038 	}
4039 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4040 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4041 		dewtprintk(ioc,
4042 			   ioc_info(ioc, "%s: host is not operational\n",
4043 				    __func__));
4044 		return 1;
4045 	}
4046 	if (unlikely(!mpi_reply)) {
4047 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4048 			__FILE__, __LINE__, __func__);
4049 		return 1;
4050 	}
4051 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4052 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4053 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4054 		dewtprintk(ioc,
4055 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4056 				   handle,
4057 				   le16_to_cpu(mpi_reply->DevHandle), smid));
4058 		return 0;
4059 	}
4060 
4061 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4062 	dewtprintk(ioc,
4063 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4064 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4065 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4066 			    le32_to_cpu(mpi_reply->TerminationCount)));
4067 
4068 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4069 	if (!smid_sas_ctrl) {
4070 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4071 		if (!delayed_sc)
4072 			return _scsih_check_for_pending_tm(ioc, smid);
4073 		INIT_LIST_HEAD(&delayed_sc->list);
4074 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4075 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4076 		dewtprintk(ioc,
4077 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4078 				    handle));
4079 		return _scsih_check_for_pending_tm(ioc, smid);
4080 	}
4081 
4082 	dewtprintk(ioc,
4083 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4084 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4085 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4086 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4087 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4088 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4089 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4090 	ioc->put_smid_default(ioc, smid_sas_ctrl);
4091 
4092 	return _scsih_check_for_pending_tm(ioc, smid);
4093 }
4094 
4095 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4096  *				 issue to IOC or not.
4097  * @ioc: per adapter object
4098  * @scmd: pointer to scsi command object
4099  *
4100  * Returns true if scmd can be issued to IOC otherwise returns false.
4101  */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4102 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4103 	struct scsi_cmnd *scmd)
4104 {
4105 
4106 	if (ioc->pci_error_recovery)
4107 		return false;
4108 
4109 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4110 		if (ioc->remove_host)
4111 			return false;
4112 
4113 		return true;
4114 	}
4115 
4116 	if (ioc->remove_host) {
4117 
4118 		switch (scmd->cmnd[0]) {
4119 		case SYNCHRONIZE_CACHE:
4120 		case START_STOP:
4121 			return true;
4122 		default:
4123 			return false;
4124 		}
4125 	}
4126 
4127 	return true;
4128 }
4129 
4130 /**
4131  * _scsih_sas_control_complete - completion routine
4132  * @ioc: per adapter object
4133  * @smid: system request message index
4134  * @msix_index: MSIX table index supplied by the OS
4135  * @reply: reply message frame(lower 32bit addr)
4136  * Context: interrupt time.
4137  *
4138  * This is the sas iounit control completion routine.
4139  * This code is part of the code to initiate the device removal
4140  * handshake protocol with controller firmware.
4141  *
4142  * Return: 1 meaning mf should be freed from _base_interrupt
4143  *         0 means the mf is freed from this function.
4144  */
4145 static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4146 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4147 	u8 msix_index, u32 reply)
4148 {
4149 	Mpi2SasIoUnitControlReply_t *mpi_reply =
4150 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4151 
4152 	if (likely(mpi_reply)) {
4153 		dewtprintk(ioc,
4154 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4155 				    le16_to_cpu(mpi_reply->DevHandle), smid,
4156 				    le16_to_cpu(mpi_reply->IOCStatus),
4157 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4158 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4159 		     MPI2_IOCSTATUS_SUCCESS) {
4160 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4161 			    ioc->device_remove_in_progress);
4162 		}
4163 	} else {
4164 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4165 			__FILE__, __LINE__, __func__);
4166 	}
4167 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4168 }
4169 
4170 /**
4171  * _scsih_tm_tr_volume_send - send target reset request for volumes
4172  * @ioc: per adapter object
4173  * @handle: device handle
4174  * Context: interrupt time.
4175  *
4176  * This is designed to send muliple task management request at the same
4177  * time to the fifo. If the fifo is full, we will append the request,
4178  * and process it in a future completion.
4179  */
4180 static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4181 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4182 {
4183 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4184 	u16 smid;
4185 	struct _tr_list *delayed_tr;
4186 
4187 	if (ioc->pci_error_recovery) {
4188 		dewtprintk(ioc,
4189 			   ioc_info(ioc, "%s: host reset in progress!\n",
4190 				    __func__));
4191 		return;
4192 	}
4193 
4194 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4195 	if (!smid) {
4196 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4197 		if (!delayed_tr)
4198 			return;
4199 		INIT_LIST_HEAD(&delayed_tr->list);
4200 		delayed_tr->handle = handle;
4201 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4202 		dewtprintk(ioc,
4203 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4204 				    handle));
4205 		return;
4206 	}
4207 
4208 	dewtprintk(ioc,
4209 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4210 			    handle, smid, ioc->tm_tr_volume_cb_idx));
4211 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4212 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4213 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4214 	mpi_request->DevHandle = cpu_to_le16(handle);
4215 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4216 	ioc->put_smid_hi_priority(ioc, smid, 0);
4217 }
4218 
4219 /**
4220  * _scsih_tm_volume_tr_complete - target reset completion
4221  * @ioc: per adapter object
4222  * @smid: system request message index
4223  * @msix_index: MSIX table index supplied by the OS
4224  * @reply: reply message frame(lower 32bit addr)
4225  * Context: interrupt time.
4226  *
4227  * Return: 1 meaning mf should be freed from _base_interrupt
4228  *         0 means the mf is freed from this function.
4229  */
4230 static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4231 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4232 	u8 msix_index, u32 reply)
4233 {
4234 	u16 handle;
4235 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4236 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4237 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4238 
4239 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4240 		dewtprintk(ioc,
4241 			   ioc_info(ioc, "%s: host reset in progress!\n",
4242 				    __func__));
4243 		return 1;
4244 	}
4245 	if (unlikely(!mpi_reply)) {
4246 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4247 			__FILE__, __LINE__, __func__);
4248 		return 1;
4249 	}
4250 
4251 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4252 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4253 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4254 		dewtprintk(ioc,
4255 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4256 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4257 				   smid));
4258 		return 0;
4259 	}
4260 
4261 	dewtprintk(ioc,
4262 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4263 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4264 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4265 			    le32_to_cpu(mpi_reply->TerminationCount)));
4266 
4267 	return _scsih_check_for_pending_tm(ioc, smid);
4268 }
4269 
4270 /**
4271  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4272  * @ioc: per adapter object
4273  * @smid: system request message index
4274  * @event: Event ID
4275  * @event_context: used to track events uniquely
4276  *
4277  * Context - processed in interrupt context.
4278  */
4279 static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4280 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4281 				U32 event_context)
4282 {
4283 	Mpi2EventAckRequest_t *ack_request;
4284 	int i = smid - ioc->internal_smid;
4285 	unsigned long flags;
4286 
4287 	/* Without releasing the smid just update the
4288 	 * call back index and reuse the same smid for
4289 	 * processing this delayed request
4290 	 */
4291 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4292 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4293 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4294 
4295 	dewtprintk(ioc,
4296 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4297 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4298 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4299 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4300 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4301 	ack_request->Event = event;
4302 	ack_request->EventContext = event_context;
4303 	ack_request->VF_ID = 0;  /* TODO */
4304 	ack_request->VP_ID = 0;
4305 	ioc->put_smid_default(ioc, smid);
4306 }
4307 
4308 /**
4309  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4310  *				sas_io_unit_ctrl messages
4311  * @ioc: per adapter object
4312  * @smid: system request message index
4313  * @handle: device handle
4314  *
4315  * Context - processed in interrupt context.
4316  */
4317 static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4318 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4319 					u16 smid, u16 handle)
4320 {
4321 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4322 	u32 ioc_state;
4323 	int i = smid - ioc->internal_smid;
4324 	unsigned long flags;
4325 
4326 	if (ioc->remove_host) {
4327 		dewtprintk(ioc,
4328 			   ioc_info(ioc, "%s: host has been removed\n",
4329 				    __func__));
4330 		return;
4331 	} else if (ioc->pci_error_recovery) {
4332 		dewtprintk(ioc,
4333 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4334 				    __func__));
4335 		return;
4336 	}
4337 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4338 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4339 		dewtprintk(ioc,
4340 			   ioc_info(ioc, "%s: host is not operational\n",
4341 				    __func__));
4342 		return;
4343 	}
4344 
4345 	/* Without releasing the smid just update the
4346 	 * call back index and reuse the same smid for
4347 	 * processing this delayed request
4348 	 */
4349 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4350 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4351 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4352 
4353 	dewtprintk(ioc,
4354 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4355 			    handle, smid, ioc->tm_sas_control_cb_idx));
4356 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4357 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4358 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4359 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4360 	mpi_request->DevHandle = cpu_to_le16(handle);
4361 	ioc->put_smid_default(ioc, smid);
4362 }
4363 
4364 /**
4365  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4366  * @ioc: per adapter object
4367  * @smid: system request message index
4368  *
4369  * Context: Executed in interrupt context
4370  *
4371  * This will check delayed internal messages list, and process the
4372  * next request.
4373  *
4374  * Return: 1 meaning mf should be freed from _base_interrupt
4375  *         0 means the mf is freed from this function.
4376  */
4377 u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4378 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4379 {
4380 	struct _sc_list *delayed_sc;
4381 	struct _event_ack_list *delayed_event_ack;
4382 
4383 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4384 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4385 						struct _event_ack_list, list);
4386 		_scsih_issue_delayed_event_ack(ioc, smid,
4387 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4388 		list_del(&delayed_event_ack->list);
4389 		kfree(delayed_event_ack);
4390 		return 0;
4391 	}
4392 
4393 	if (!list_empty(&ioc->delayed_sc_list)) {
4394 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4395 						struct _sc_list, list);
4396 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4397 						 delayed_sc->handle);
4398 		list_del(&delayed_sc->list);
4399 		kfree(delayed_sc);
4400 		return 0;
4401 	}
4402 	return 1;
4403 }
4404 
4405 /**
4406  * _scsih_check_for_pending_tm - check for pending task management
4407  * @ioc: per adapter object
4408  * @smid: system request message index
4409  *
4410  * This will check delayed target reset list, and feed the
4411  * next reqeust.
4412  *
4413  * Return: 1 meaning mf should be freed from _base_interrupt
4414  *         0 means the mf is freed from this function.
4415  */
4416 static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4417 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4418 {
4419 	struct _tr_list *delayed_tr;
4420 
4421 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4422 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4423 		    struct _tr_list, list);
4424 		mpt3sas_base_free_smid(ioc, smid);
4425 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4426 		list_del(&delayed_tr->list);
4427 		kfree(delayed_tr);
4428 		return 0;
4429 	}
4430 
4431 	if (!list_empty(&ioc->delayed_tr_list)) {
4432 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4433 		    struct _tr_list, list);
4434 		mpt3sas_base_free_smid(ioc, smid);
4435 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4436 		list_del(&delayed_tr->list);
4437 		kfree(delayed_tr);
4438 		return 0;
4439 	}
4440 
4441 	return 1;
4442 }
4443 
4444 /**
4445  * _scsih_check_topo_delete_events - sanity check on topo events
4446  * @ioc: per adapter object
4447  * @event_data: the event data payload
4448  *
4449  * This routine added to better handle cable breaker.
4450  *
4451  * This handles the case where driver receives multiple expander
4452  * add and delete events in a single shot.  When there is a delete event
4453  * the routine will void any pending add events waiting in the event queue.
4454  */
4455 static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4456 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4457 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4458 {
4459 	struct fw_event_work *fw_event;
4460 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4461 	u16 expander_handle;
4462 	struct _sas_node *sas_expander;
4463 	unsigned long flags;
4464 	int i, reason_code;
4465 	u16 handle;
4466 
4467 	for (i = 0 ; i < event_data->NumEntries; i++) {
4468 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4469 		if (!handle)
4470 			continue;
4471 		reason_code = event_data->PHY[i].PhyStatus &
4472 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4473 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4474 			_scsih_tm_tr_send(ioc, handle);
4475 	}
4476 
4477 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4478 	if (expander_handle < ioc->sas_hba.num_phys) {
4479 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4480 		return;
4481 	}
4482 	if (event_data->ExpStatus ==
4483 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4484 		/* put expander attached devices into blocking state */
4485 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4486 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4487 		    expander_handle);
4488 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4489 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4490 		do {
4491 			handle = find_first_bit(ioc->blocking_handles,
4492 			    ioc->facts.MaxDevHandle);
4493 			if (handle < ioc->facts.MaxDevHandle)
4494 				_scsih_block_io_device(ioc, handle);
4495 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4496 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4497 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4498 
4499 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4500 		return;
4501 
4502 	/* mark ignore flag for pending events */
4503 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4504 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4505 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4506 		    fw_event->ignore)
4507 			continue;
4508 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4509 				   fw_event->event_data;
4510 		if (local_event_data->ExpStatus ==
4511 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4512 		    local_event_data->ExpStatus ==
4513 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4514 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4515 			    expander_handle) {
4516 				dewtprintk(ioc,
4517 					   ioc_info(ioc, "setting ignoring flag\n"));
4518 				fw_event->ignore = 1;
4519 			}
4520 		}
4521 	}
4522 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4523 }
4524 
4525 /**
4526  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4527  * events
4528  * @ioc: per adapter object
4529  * @event_data: the event data payload
4530  *
4531  * This handles the case where driver receives multiple switch
4532  * or device add and delete events in a single shot.  When there
4533  * is a delete event the routine will void any pending add
4534  * events waiting in the event queue.
4535  */
4536 static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4537 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4538 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4539 {
4540 	struct fw_event_work *fw_event;
4541 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4542 	unsigned long flags;
4543 	int i, reason_code;
4544 	u16 handle, switch_handle;
4545 
4546 	for (i = 0; i < event_data->NumEntries; i++) {
4547 		handle =
4548 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4549 		if (!handle)
4550 			continue;
4551 		reason_code = event_data->PortEntry[i].PortStatus;
4552 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4553 			_scsih_tm_tr_send(ioc, handle);
4554 	}
4555 
4556 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4557 	if (!switch_handle) {
4558 		_scsih_block_io_to_pcie_children_attached_directly(
4559 							ioc, event_data);
4560 		return;
4561 	}
4562     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4563 	if ((event_data->SwitchStatus
4564 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4565 		(event_data->SwitchStatus ==
4566 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4567 		_scsih_block_io_to_pcie_children_attached_directly(
4568 							ioc, event_data);
4569 
4570 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4571 		return;
4572 
4573 	/* mark ignore flag for pending events */
4574 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4575 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4576 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4577 			fw_event->ignore)
4578 			continue;
4579 		local_event_data =
4580 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4581 			fw_event->event_data;
4582 		if (local_event_data->SwitchStatus ==
4583 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4584 		    local_event_data->SwitchStatus ==
4585 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4586 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4587 				switch_handle) {
4588 				dewtprintk(ioc,
4589 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4590 				fw_event->ignore = 1;
4591 			}
4592 		}
4593 	}
4594 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4595 }
4596 
4597 /**
4598  * _scsih_set_volume_delete_flag - setting volume delete flag
4599  * @ioc: per adapter object
4600  * @handle: device handle
4601  *
4602  * This returns nothing.
4603  */
4604 static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4605 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4606 {
4607 	struct _raid_device *raid_device;
4608 	struct MPT3SAS_TARGET *sas_target_priv_data;
4609 	unsigned long flags;
4610 
4611 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4612 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4613 	if (raid_device && raid_device->starget &&
4614 	    raid_device->starget->hostdata) {
4615 		sas_target_priv_data =
4616 		    raid_device->starget->hostdata;
4617 		sas_target_priv_data->deleted = 1;
4618 		dewtprintk(ioc,
4619 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4620 				    handle, (u64)raid_device->wwid));
4621 	}
4622 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4623 }
4624 
4625 /**
4626  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4627  * @handle: input handle
4628  * @a: handle for volume a
4629  * @b: handle for volume b
4630  *
4631  * IR firmware only supports two raid volumes.  The purpose of this
4632  * routine is to set the volume handle in either a or b. When the given
4633  * input handle is non-zero, or when a and b have not been set before.
4634  */
4635 static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4636 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4637 {
4638 	if (!handle || handle == *a || handle == *b)
4639 		return;
4640 	if (!*a)
4641 		*a = handle;
4642 	else if (!*b)
4643 		*b = handle;
4644 }
4645 
4646 /**
4647  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4648  * @ioc: per adapter object
4649  * @event_data: the event data payload
4650  * Context: interrupt time.
4651  *
4652  * This routine will send target reset to volume, followed by target
4653  * resets to the PDs. This is called when a PD has been removed, or
4654  * volume has been deleted or removed. When the target reset is sent
4655  * to volume, the PD target resets need to be queued to start upon
4656  * completion of the volume target reset.
4657  */
4658 static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4659 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4660 	Mpi2EventDataIrConfigChangeList_t *event_data)
4661 {
4662 	Mpi2EventIrConfigElement_t *element;
4663 	int i;
4664 	u16 handle, volume_handle, a, b;
4665 	struct _tr_list *delayed_tr;
4666 
4667 	a = 0;
4668 	b = 0;
4669 
4670 	if (ioc->is_warpdrive)
4671 		return;
4672 
4673 	/* Volume Resets for Deleted or Removed */
4674 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4675 	for (i = 0; i < event_data->NumElements; i++, element++) {
4676 		if (le32_to_cpu(event_data->Flags) &
4677 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4678 			continue;
4679 		if (element->ReasonCode ==
4680 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4681 		    element->ReasonCode ==
4682 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4683 			volume_handle = le16_to_cpu(element->VolDevHandle);
4684 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4685 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4686 		}
4687 	}
4688 
4689 	/* Volume Resets for UNHIDE events */
4690 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4691 	for (i = 0; i < event_data->NumElements; i++, element++) {
4692 		if (le32_to_cpu(event_data->Flags) &
4693 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4694 			continue;
4695 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4696 			volume_handle = le16_to_cpu(element->VolDevHandle);
4697 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4698 		}
4699 	}
4700 
4701 	if (a)
4702 		_scsih_tm_tr_volume_send(ioc, a);
4703 	if (b)
4704 		_scsih_tm_tr_volume_send(ioc, b);
4705 
4706 	/* PD target resets */
4707 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4708 	for (i = 0; i < event_data->NumElements; i++, element++) {
4709 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4710 			continue;
4711 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4712 		volume_handle = le16_to_cpu(element->VolDevHandle);
4713 		clear_bit(handle, ioc->pd_handles);
4714 		if (!volume_handle)
4715 			_scsih_tm_tr_send(ioc, handle);
4716 		else if (volume_handle == a || volume_handle == b) {
4717 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4718 			BUG_ON(!delayed_tr);
4719 			INIT_LIST_HEAD(&delayed_tr->list);
4720 			delayed_tr->handle = handle;
4721 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4722 			dewtprintk(ioc,
4723 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4724 					    handle));
4725 		} else
4726 			_scsih_tm_tr_send(ioc, handle);
4727 	}
4728 }
4729 
4730 
4731 /**
4732  * _scsih_check_volume_delete_events - set delete flag for volumes
4733  * @ioc: per adapter object
4734  * @event_data: the event data payload
4735  * Context: interrupt time.
4736  *
4737  * This will handle the case when the cable connected to entire volume is
4738  * pulled. We will take care of setting the deleted flag so normal IO will
4739  * not be sent.
4740  */
4741 static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4742 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4743 	Mpi2EventDataIrVolume_t *event_data)
4744 {
4745 	u32 state;
4746 
4747 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4748 		return;
4749 	state = le32_to_cpu(event_data->NewValue);
4750 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4751 	    MPI2_RAID_VOL_STATE_FAILED)
4752 		_scsih_set_volume_delete_flag(ioc,
4753 		    le16_to_cpu(event_data->VolDevHandle));
4754 }
4755 
4756 /**
4757  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4758  * @ioc: per adapter object
4759  * @event_data: the temp threshold event data
4760  * Context: interrupt time.
4761  */
4762 static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4763 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4764 	Mpi2EventDataTemperature_t *event_data)
4765 {
4766 	u32 doorbell;
4767 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4768 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4769 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4770 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4771 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4772 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4773 			event_data->SensorNum);
4774 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4775 			event_data->CurrentTemperature);
4776 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4777 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4778 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4779 			    MPI2_IOC_STATE_FAULT) {
4780 				mpt3sas_print_fault_code(ioc,
4781 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4782 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4783 			    MPI2_IOC_STATE_COREDUMP) {
4784 				mpt3sas_print_coredump_info(ioc,
4785 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4786 			}
4787 		}
4788 	}
4789 }
4790 
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4791 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4792 {
4793 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4794 
4795 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4796 		return 0;
4797 
4798 	if (pending)
4799 		return test_and_set_bit(0, &priv->ata_command_pending);
4800 
4801 	clear_bit(0, &priv->ata_command_pending);
4802 	return 0;
4803 }
4804 
4805 /**
4806  * _scsih_flush_running_cmds - completing outstanding commands.
4807  * @ioc: per adapter object
4808  *
4809  * The flushing out of all pending scmd commands following host reset,
4810  * where all IO is dropped to the floor.
4811  */
4812 static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)4813 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4814 {
4815 	struct scsi_cmnd *scmd;
4816 	struct scsiio_tracker *st;
4817 	u16 smid;
4818 	int count = 0;
4819 
4820 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4821 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4822 		if (!scmd)
4823 			continue;
4824 		count++;
4825 		_scsih_set_satl_pending(scmd, false);
4826 		st = scsi_cmd_priv(scmd);
4827 		mpt3sas_base_clear_st(ioc, st);
4828 		scsi_dma_unmap(scmd);
4829 		if (ioc->pci_error_recovery || ioc->remove_host)
4830 			scmd->result = DID_NO_CONNECT << 16;
4831 		else
4832 			scmd->result = DID_RESET << 16;
4833 		scmd->scsi_done(scmd);
4834 	}
4835 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4836 }
4837 
4838 /**
4839  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4840  * @ioc: per adapter object
4841  * @scmd: pointer to scsi command object
4842  * @mpi_request: pointer to the SCSI_IO request message frame
4843  *
4844  * Supporting protection 1 and 3.
4845  */
4846 static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)4847 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4848 	Mpi25SCSIIORequest_t *mpi_request)
4849 {
4850 	u16 eedp_flags;
4851 	unsigned char prot_op = scsi_get_prot_op(scmd);
4852 	unsigned char prot_type = scsi_get_prot_type(scmd);
4853 	Mpi25SCSIIORequest_t *mpi_request_3v =
4854 	   (Mpi25SCSIIORequest_t *)mpi_request;
4855 
4856 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4857 		return;
4858 
4859 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4860 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4861 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4862 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4863 	else
4864 		return;
4865 
4866 	switch (prot_type) {
4867 	case SCSI_PROT_DIF_TYPE1:
4868 	case SCSI_PROT_DIF_TYPE2:
4869 
4870 		/*
4871 		* enable ref/guard checking
4872 		* auto increment ref tag
4873 		*/
4874 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4875 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4876 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4877 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4878 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4879 		break;
4880 
4881 	case SCSI_PROT_DIF_TYPE3:
4882 
4883 		/*
4884 		* enable guard checking
4885 		*/
4886 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4887 
4888 		break;
4889 	}
4890 
4891 	mpi_request_3v->EEDPBlockSize =
4892 	    cpu_to_le16(scmd->device->sector_size);
4893 
4894 	if (ioc->is_gen35_ioc)
4895 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4896 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4897 }
4898 
4899 /**
4900  * _scsih_eedp_error_handling - return sense code for EEDP errors
4901  * @scmd: pointer to scsi command object
4902  * @ioc_status: ioc status
4903  */
4904 static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)4905 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4906 {
4907 	u8 ascq;
4908 
4909 	switch (ioc_status) {
4910 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4911 		ascq = 0x01;
4912 		break;
4913 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4914 		ascq = 0x02;
4915 		break;
4916 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4917 		ascq = 0x03;
4918 		break;
4919 	default:
4920 		ascq = 0x00;
4921 		break;
4922 	}
4923 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4924 	    ascq);
4925 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4926 	    SAM_STAT_CHECK_CONDITION;
4927 }
4928 
4929 /**
4930  * scsih_qcmd - main scsi request entry point
4931  * @shost: SCSI host pointer
4932  * @scmd: pointer to scsi command object
4933  *
4934  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4935  *
4936  * Return: 0 on success.  If there's a failure, return either:
4937  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4938  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4939  */
4940 static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)4941 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4942 {
4943 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4944 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4945 	struct MPT3SAS_TARGET *sas_target_priv_data;
4946 	struct _raid_device *raid_device;
4947 	struct request *rq = scmd->request;
4948 	int class;
4949 	Mpi25SCSIIORequest_t *mpi_request;
4950 	struct _pcie_device *pcie_device = NULL;
4951 	u32 mpi_control;
4952 	u16 smid;
4953 	u16 handle;
4954 
4955 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4956 		scsi_print_command(scmd);
4957 
4958 	sas_device_priv_data = scmd->device->hostdata;
4959 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4960 		scmd->result = DID_NO_CONNECT << 16;
4961 		scmd->scsi_done(scmd);
4962 		return 0;
4963 	}
4964 
4965 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4966 		scmd->result = DID_NO_CONNECT << 16;
4967 		scmd->scsi_done(scmd);
4968 		return 0;
4969 	}
4970 
4971 	sas_target_priv_data = sas_device_priv_data->sas_target;
4972 
4973 	/* invalid device handle */
4974 	handle = sas_target_priv_data->handle;
4975 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4976 		scmd->result = DID_NO_CONNECT << 16;
4977 		scmd->scsi_done(scmd);
4978 		return 0;
4979 	}
4980 
4981 
4982 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4983 		/* host recovery or link resets sent via IOCTLs */
4984 		return SCSI_MLQUEUE_HOST_BUSY;
4985 	} else if (sas_target_priv_data->deleted) {
4986 		/* device has been deleted */
4987 		scmd->result = DID_NO_CONNECT << 16;
4988 		scmd->scsi_done(scmd);
4989 		return 0;
4990 	} else if (sas_target_priv_data->tm_busy ||
4991 		   sas_device_priv_data->block) {
4992 		/* device busy with task management */
4993 		return SCSI_MLQUEUE_DEVICE_BUSY;
4994 	}
4995 
4996 	/*
4997 	 * Bug work around for firmware SATL handling.  The loop
4998 	 * is based on atomic operations and ensures consistency
4999 	 * since we're lockless at this point
5000 	 */
5001 	do {
5002 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5003 			return SCSI_MLQUEUE_DEVICE_BUSY;
5004 	} while (_scsih_set_satl_pending(scmd, true));
5005 
5006 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5007 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
5008 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5009 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5010 	else
5011 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5012 
5013 	/* set tags */
5014 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5015 	/* NCQ Prio supported, make sure control indicated high priority */
5016 	if (sas_device_priv_data->ncq_prio_enable) {
5017 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5018 		if (class == IOPRIO_CLASS_RT)
5019 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5020 	}
5021 	/* Make sure Device is not raid volume.
5022 	 * We do not expose raid functionality to upper layer for warpdrive.
5023 	 */
5024 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5025 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5026 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5027 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5028 
5029 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5030 	if (!smid) {
5031 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5032 		_scsih_set_satl_pending(scmd, false);
5033 		goto out;
5034 	}
5035 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5036 	memset(mpi_request, 0, ioc->request_sz);
5037 	_scsih_setup_eedp(ioc, scmd, mpi_request);
5038 
5039 	if (scmd->cmd_len == 32)
5040 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5041 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5042 	if (sas_device_priv_data->sas_target->flags &
5043 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5044 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5045 	else
5046 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5047 	mpi_request->DevHandle = cpu_to_le16(handle);
5048 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5049 	mpi_request->Control = cpu_to_le32(mpi_control);
5050 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5051 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5052 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5053 	mpi_request->SenseBufferLowAddress =
5054 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5055 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5056 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5057 	    mpi_request->LUN);
5058 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5059 
5060 	if (mpi_request->DataLength) {
5061 		pcie_device = sas_target_priv_data->pcie_dev;
5062 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5063 			mpt3sas_base_free_smid(ioc, smid);
5064 			_scsih_set_satl_pending(scmd, false);
5065 			goto out;
5066 		}
5067 	} else
5068 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5069 
5070 	raid_device = sas_target_priv_data->raid_device;
5071 	if (raid_device && raid_device->direct_io_enabled)
5072 		mpt3sas_setup_direct_io(ioc, scmd,
5073 			raid_device, mpi_request);
5074 
5075 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5076 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5077 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5078 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5079 			ioc->put_smid_fast_path(ioc, smid, handle);
5080 		} else
5081 			ioc->put_smid_scsi_io(ioc, smid,
5082 			    le16_to_cpu(mpi_request->DevHandle));
5083 	} else
5084 		ioc->put_smid_default(ioc, smid);
5085 	return 0;
5086 
5087  out:
5088 	return SCSI_MLQUEUE_HOST_BUSY;
5089 }
5090 
5091 /**
5092  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5093  * @sense_buffer: sense data returned by target
5094  * @data: normalized skey/asc/ascq
5095  */
5096 static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5097 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5098 {
5099 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5100 		/* descriptor format */
5101 		data->skey = sense_buffer[1] & 0x0F;
5102 		data->asc = sense_buffer[2];
5103 		data->ascq = sense_buffer[3];
5104 	} else {
5105 		/* fixed format */
5106 		data->skey = sense_buffer[2] & 0x0F;
5107 		data->asc = sense_buffer[12];
5108 		data->ascq = sense_buffer[13];
5109 	}
5110 }
5111 
5112 /**
5113  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5114  * @ioc: per adapter object
5115  * @scmd: pointer to scsi command object
5116  * @mpi_reply: reply mf payload returned from firmware
5117  * @smid: ?
5118  *
5119  * scsi_status - SCSI Status code returned from target device
5120  * scsi_state - state info associated with SCSI_IO determined by ioc
5121  * ioc_status - ioc supplied status info
5122  */
5123 static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5124 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5125 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5126 {
5127 	u32 response_info;
5128 	u8 *response_bytes;
5129 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5130 	    MPI2_IOCSTATUS_MASK;
5131 	u8 scsi_state = mpi_reply->SCSIState;
5132 	u8 scsi_status = mpi_reply->SCSIStatus;
5133 	char *desc_ioc_state = NULL;
5134 	char *desc_scsi_status = NULL;
5135 	char *desc_scsi_state = ioc->tmp_string;
5136 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5137 	struct _sas_device *sas_device = NULL;
5138 	struct _pcie_device *pcie_device = NULL;
5139 	struct scsi_target *starget = scmd->device->sdev_target;
5140 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5141 	char *device_str = NULL;
5142 
5143 	if (!priv_target)
5144 		return;
5145 	if (ioc->hide_ir_msg)
5146 		device_str = "WarpDrive";
5147 	else
5148 		device_str = "volume";
5149 
5150 	if (log_info == 0x31170000)
5151 		return;
5152 
5153 	switch (ioc_status) {
5154 	case MPI2_IOCSTATUS_SUCCESS:
5155 		desc_ioc_state = "success";
5156 		break;
5157 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5158 		desc_ioc_state = "invalid function";
5159 		break;
5160 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5161 		desc_ioc_state = "scsi recovered error";
5162 		break;
5163 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5164 		desc_ioc_state = "scsi invalid dev handle";
5165 		break;
5166 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5167 		desc_ioc_state = "scsi device not there";
5168 		break;
5169 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5170 		desc_ioc_state = "scsi data overrun";
5171 		break;
5172 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5173 		desc_ioc_state = "scsi data underrun";
5174 		break;
5175 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5176 		desc_ioc_state = "scsi io data error";
5177 		break;
5178 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5179 		desc_ioc_state = "scsi protocol error";
5180 		break;
5181 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5182 		desc_ioc_state = "scsi task terminated";
5183 		break;
5184 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5185 		desc_ioc_state = "scsi residual mismatch";
5186 		break;
5187 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5188 		desc_ioc_state = "scsi task mgmt failed";
5189 		break;
5190 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5191 		desc_ioc_state = "scsi ioc terminated";
5192 		break;
5193 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5194 		desc_ioc_state = "scsi ext terminated";
5195 		break;
5196 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5197 		desc_ioc_state = "eedp guard error";
5198 		break;
5199 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5200 		desc_ioc_state = "eedp ref tag error";
5201 		break;
5202 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5203 		desc_ioc_state = "eedp app tag error";
5204 		break;
5205 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5206 		desc_ioc_state = "insufficient power";
5207 		break;
5208 	default:
5209 		desc_ioc_state = "unknown";
5210 		break;
5211 	}
5212 
5213 	switch (scsi_status) {
5214 	case MPI2_SCSI_STATUS_GOOD:
5215 		desc_scsi_status = "good";
5216 		break;
5217 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5218 		desc_scsi_status = "check condition";
5219 		break;
5220 	case MPI2_SCSI_STATUS_CONDITION_MET:
5221 		desc_scsi_status = "condition met";
5222 		break;
5223 	case MPI2_SCSI_STATUS_BUSY:
5224 		desc_scsi_status = "busy";
5225 		break;
5226 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5227 		desc_scsi_status = "intermediate";
5228 		break;
5229 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5230 		desc_scsi_status = "intermediate condmet";
5231 		break;
5232 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5233 		desc_scsi_status = "reservation conflict";
5234 		break;
5235 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5236 		desc_scsi_status = "command terminated";
5237 		break;
5238 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5239 		desc_scsi_status = "task set full";
5240 		break;
5241 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5242 		desc_scsi_status = "aca active";
5243 		break;
5244 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5245 		desc_scsi_status = "task aborted";
5246 		break;
5247 	default:
5248 		desc_scsi_status = "unknown";
5249 		break;
5250 	}
5251 
5252 	desc_scsi_state[0] = '\0';
5253 	if (!scsi_state)
5254 		desc_scsi_state = " ";
5255 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5256 		strcat(desc_scsi_state, "response info ");
5257 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5258 		strcat(desc_scsi_state, "state terminated ");
5259 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5260 		strcat(desc_scsi_state, "no status ");
5261 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5262 		strcat(desc_scsi_state, "autosense failed ");
5263 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5264 		strcat(desc_scsi_state, "autosense valid ");
5265 
5266 	scsi_print_command(scmd);
5267 
5268 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5269 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5270 			 device_str, (u64)priv_target->sas_address);
5271 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5272 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5273 		if (pcie_device) {
5274 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5275 				 (u64)pcie_device->wwid, pcie_device->port_num);
5276 			if (pcie_device->enclosure_handle != 0)
5277 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5278 					 (u64)pcie_device->enclosure_logical_id,
5279 					 pcie_device->slot);
5280 			if (pcie_device->connector_name[0])
5281 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5282 					 pcie_device->enclosure_level,
5283 					 pcie_device->connector_name);
5284 			pcie_device_put(pcie_device);
5285 		}
5286 	} else {
5287 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5288 		if (sas_device) {
5289 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5290 				 (u64)sas_device->sas_address, sas_device->phy);
5291 
5292 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5293 			    NULL, NULL);
5294 
5295 			sas_device_put(sas_device);
5296 		}
5297 	}
5298 
5299 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5300 		 le16_to_cpu(mpi_reply->DevHandle),
5301 		 desc_ioc_state, ioc_status, smid);
5302 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5303 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5304 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5305 		 le16_to_cpu(mpi_reply->TaskTag),
5306 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5307 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5308 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5309 
5310 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5311 		struct sense_info data;
5312 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5313 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5314 			 data.skey, data.asc, data.ascq,
5315 			 le32_to_cpu(mpi_reply->SenseCount));
5316 	}
5317 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5318 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5319 		response_bytes = (u8 *)&response_info;
5320 		_scsih_response_code(ioc, response_bytes[0]);
5321 	}
5322 }
5323 
5324 /**
5325  * _scsih_turn_on_pfa_led - illuminate PFA LED
5326  * @ioc: per adapter object
5327  * @handle: device handle
5328  * Context: process
5329  */
5330 static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5331 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5332 {
5333 	Mpi2SepReply_t mpi_reply;
5334 	Mpi2SepRequest_t mpi_request;
5335 	struct _sas_device *sas_device;
5336 
5337 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5338 	if (!sas_device)
5339 		return;
5340 
5341 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5342 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5343 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5344 	mpi_request.SlotStatus =
5345 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5346 	mpi_request.DevHandle = cpu_to_le16(handle);
5347 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5348 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5349 	    &mpi_request)) != 0) {
5350 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5351 			__FILE__, __LINE__, __func__);
5352 		goto out;
5353 	}
5354 	sas_device->pfa_led_on = 1;
5355 
5356 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5357 		dewtprintk(ioc,
5358 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5359 				    le16_to_cpu(mpi_reply.IOCStatus),
5360 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5361 		goto out;
5362 	}
5363 out:
5364 	sas_device_put(sas_device);
5365 }
5366 
5367 /**
5368  * _scsih_turn_off_pfa_led - turn off Fault LED
5369  * @ioc: per adapter object
5370  * @sas_device: sas device whose PFA LED has to turned off
5371  * Context: process
5372  */
5373 static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5374 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5375 	struct _sas_device *sas_device)
5376 {
5377 	Mpi2SepReply_t mpi_reply;
5378 	Mpi2SepRequest_t mpi_request;
5379 
5380 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5381 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5382 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5383 	mpi_request.SlotStatus = 0;
5384 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5385 	mpi_request.DevHandle = 0;
5386 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5387 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5388 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5389 		&mpi_request)) != 0) {
5390 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5391 			__FILE__, __LINE__, __func__);
5392 		return;
5393 	}
5394 
5395 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5396 		dewtprintk(ioc,
5397 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5398 				    le16_to_cpu(mpi_reply.IOCStatus),
5399 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5400 		return;
5401 	}
5402 }
5403 
5404 /**
5405  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5406  * @ioc: per adapter object
5407  * @handle: device handle
5408  * Context: interrupt.
5409  */
5410 static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5411 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5412 {
5413 	struct fw_event_work *fw_event;
5414 
5415 	fw_event = alloc_fw_event_work(0);
5416 	if (!fw_event)
5417 		return;
5418 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5419 	fw_event->device_handle = handle;
5420 	fw_event->ioc = ioc;
5421 	_scsih_fw_event_add(ioc, fw_event);
5422 	fw_event_work_put(fw_event);
5423 }
5424 
5425 /**
5426  * _scsih_smart_predicted_fault - process smart errors
5427  * @ioc: per adapter object
5428  * @handle: device handle
5429  * Context: interrupt.
5430  */
5431 static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5432 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5433 {
5434 	struct scsi_target *starget;
5435 	struct MPT3SAS_TARGET *sas_target_priv_data;
5436 	Mpi2EventNotificationReply_t *event_reply;
5437 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5438 	struct _sas_device *sas_device;
5439 	ssize_t sz;
5440 	unsigned long flags;
5441 
5442 	/* only handle non-raid devices */
5443 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5444 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5445 	if (!sas_device)
5446 		goto out_unlock;
5447 
5448 	starget = sas_device->starget;
5449 	sas_target_priv_data = starget->hostdata;
5450 
5451 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5452 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5453 		goto out_unlock;
5454 
5455 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5456 
5457 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5458 
5459 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5460 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5461 
5462 	/* insert into event log */
5463 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5464 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5465 	event_reply = kzalloc(sz, GFP_ATOMIC);
5466 	if (!event_reply) {
5467 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5468 			__FILE__, __LINE__, __func__);
5469 		goto out;
5470 	}
5471 
5472 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5473 	event_reply->Event =
5474 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5475 	event_reply->MsgLength = sz/4;
5476 	event_reply->EventDataLength =
5477 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5478 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5479 	    event_reply->EventData;
5480 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5481 	event_data->ASC = 0x5D;
5482 	event_data->DevHandle = cpu_to_le16(handle);
5483 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5484 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5485 	kfree(event_reply);
5486 out:
5487 	if (sas_device)
5488 		sas_device_put(sas_device);
5489 	return;
5490 
5491 out_unlock:
5492 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5493 	goto out;
5494 }
5495 
5496 /**
5497  * _scsih_io_done - scsi request callback
5498  * @ioc: per adapter object
5499  * @smid: system request message index
5500  * @msix_index: MSIX table index supplied by the OS
5501  * @reply: reply message frame(lower 32bit addr)
5502  *
5503  * Callback handler when using _scsih_qcmd.
5504  *
5505  * Return: 1 meaning mf should be freed from _base_interrupt
5506  *         0 means the mf is freed from this function.
5507  */
5508 static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5509 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5510 {
5511 	Mpi25SCSIIORequest_t *mpi_request;
5512 	Mpi2SCSIIOReply_t *mpi_reply;
5513 	struct scsi_cmnd *scmd;
5514 	struct scsiio_tracker *st;
5515 	u16 ioc_status;
5516 	u32 xfer_cnt;
5517 	u8 scsi_state;
5518 	u8 scsi_status;
5519 	u32 log_info;
5520 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5521 	u32 response_code = 0;
5522 
5523 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5524 
5525 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5526 	if (scmd == NULL)
5527 		return 1;
5528 
5529 	_scsih_set_satl_pending(scmd, false);
5530 
5531 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5532 
5533 	if (mpi_reply == NULL) {
5534 		scmd->result = DID_OK << 16;
5535 		goto out;
5536 	}
5537 
5538 	sas_device_priv_data = scmd->device->hostdata;
5539 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5540 	     sas_device_priv_data->sas_target->deleted) {
5541 		scmd->result = DID_NO_CONNECT << 16;
5542 		goto out;
5543 	}
5544 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5545 
5546 	/*
5547 	 * WARPDRIVE: If direct_io is set then it is directIO,
5548 	 * the failed direct I/O should be redirected to volume
5549 	 */
5550 	st = scsi_cmd_priv(scmd);
5551 	if (st->direct_io &&
5552 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5553 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5554 		st->direct_io = 0;
5555 		st->scmd = scmd;
5556 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5557 		mpi_request->DevHandle =
5558 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5559 		ioc->put_smid_scsi_io(ioc, smid,
5560 		    sas_device_priv_data->sas_target->handle);
5561 		return 0;
5562 	}
5563 	/* turning off TLR */
5564 	scsi_state = mpi_reply->SCSIState;
5565 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5566 		response_code =
5567 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5568 	if (!sas_device_priv_data->tlr_snoop_check) {
5569 		sas_device_priv_data->tlr_snoop_check++;
5570 		if ((!ioc->is_warpdrive &&
5571 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5572 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5573 		    && sas_is_tlr_enabled(scmd->device) &&
5574 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5575 			sas_disable_tlr(scmd->device);
5576 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5577 		}
5578 	}
5579 
5580 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5581 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5582 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5583 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5584 	else
5585 		log_info = 0;
5586 	ioc_status &= MPI2_IOCSTATUS_MASK;
5587 	scsi_status = mpi_reply->SCSIStatus;
5588 
5589 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5590 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5591 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5592 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5593 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5594 	}
5595 
5596 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5597 		struct sense_info data;
5598 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5599 		    smid);
5600 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5601 		    le32_to_cpu(mpi_reply->SenseCount));
5602 		memcpy(scmd->sense_buffer, sense_data, sz);
5603 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5604 		/* failure prediction threshold exceeded */
5605 		if (data.asc == 0x5D)
5606 			_scsih_smart_predicted_fault(ioc,
5607 			    le16_to_cpu(mpi_reply->DevHandle));
5608 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5609 
5610 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5611 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5612 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5613 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5614 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5615 	}
5616 	switch (ioc_status) {
5617 	case MPI2_IOCSTATUS_BUSY:
5618 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5619 		scmd->result = SAM_STAT_BUSY;
5620 		break;
5621 
5622 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5623 		scmd->result = DID_NO_CONNECT << 16;
5624 		break;
5625 
5626 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5627 		if (sas_device_priv_data->block) {
5628 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5629 			goto out;
5630 		}
5631 		if (log_info == 0x31110630) {
5632 			if (scmd->retries > 2) {
5633 				scmd->result = DID_NO_CONNECT << 16;
5634 				scsi_device_set_state(scmd->device,
5635 				    SDEV_OFFLINE);
5636 			} else {
5637 				scmd->result = DID_SOFT_ERROR << 16;
5638 				scmd->device->expecting_cc_ua = 1;
5639 			}
5640 			break;
5641 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5642 			scmd->result = DID_RESET << 16;
5643 			break;
5644 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5645 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5646 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5647 			scmd->result = DID_RESET << 16;
5648 			break;
5649 		}
5650 		scmd->result = DID_SOFT_ERROR << 16;
5651 		break;
5652 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5653 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5654 		scmd->result = DID_RESET << 16;
5655 		break;
5656 
5657 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5658 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5659 			scmd->result = DID_SOFT_ERROR << 16;
5660 		else
5661 			scmd->result = (DID_OK << 16) | scsi_status;
5662 		break;
5663 
5664 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5665 		scmd->result = (DID_OK << 16) | scsi_status;
5666 
5667 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5668 			break;
5669 
5670 		if (xfer_cnt < scmd->underflow) {
5671 			if (scsi_status == SAM_STAT_BUSY)
5672 				scmd->result = SAM_STAT_BUSY;
5673 			else
5674 				scmd->result = DID_SOFT_ERROR << 16;
5675 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5676 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5677 			scmd->result = DID_SOFT_ERROR << 16;
5678 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5679 			scmd->result = DID_RESET << 16;
5680 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5681 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5682 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5683 			scmd->result = (DRIVER_SENSE << 24) |
5684 			    SAM_STAT_CHECK_CONDITION;
5685 			scmd->sense_buffer[0] = 0x70;
5686 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5687 			scmd->sense_buffer[12] = 0x20;
5688 			scmd->sense_buffer[13] = 0;
5689 		}
5690 		break;
5691 
5692 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5693 		scsi_set_resid(scmd, 0);
5694 		fallthrough;
5695 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5696 	case MPI2_IOCSTATUS_SUCCESS:
5697 		scmd->result = (DID_OK << 16) | scsi_status;
5698 		if (response_code ==
5699 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5700 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5701 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5702 			scmd->result = DID_SOFT_ERROR << 16;
5703 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5704 			scmd->result = DID_RESET << 16;
5705 		break;
5706 
5707 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5708 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5709 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5710 		_scsih_eedp_error_handling(scmd, ioc_status);
5711 		break;
5712 
5713 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5714 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5715 	case MPI2_IOCSTATUS_INVALID_SGL:
5716 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5717 	case MPI2_IOCSTATUS_INVALID_FIELD:
5718 	case MPI2_IOCSTATUS_INVALID_STATE:
5719 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5720 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5721 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5722 	default:
5723 		scmd->result = DID_SOFT_ERROR << 16;
5724 		break;
5725 
5726 	}
5727 
5728 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5729 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5730 
5731  out:
5732 
5733 	scsi_dma_unmap(scmd);
5734 	mpt3sas_base_free_smid(ioc, smid);
5735 	scmd->scsi_done(scmd);
5736 	return 0;
5737 }
5738 
5739 /**
5740  * _scsih_sas_host_refresh - refreshing sas host object contents
5741  * @ioc: per adapter object
5742  * Context: user
5743  *
5744  * During port enable, fw will send topology events for every device. Its
5745  * possible that the handles may change from the previous setting, so this
5746  * code keeping handles updating if changed.
5747  */
5748 static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)5749 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5750 {
5751 	u16 sz;
5752 	u16 ioc_status;
5753 	int i;
5754 	Mpi2ConfigReply_t mpi_reply;
5755 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5756 	u16 attached_handle;
5757 	u8 link_rate;
5758 
5759 	dtmprintk(ioc,
5760 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5761 			   (u64)ioc->sas_hba.sas_address));
5762 
5763 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5764 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5765 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5766 	if (!sas_iounit_pg0) {
5767 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5768 			__FILE__, __LINE__, __func__);
5769 		return;
5770 	}
5771 
5772 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5773 	    sas_iounit_pg0, sz)) != 0)
5774 		goto out;
5775 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5776 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5777 		goto out;
5778 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5779 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5780 		if (i == 0)
5781 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5782 			    PhyData[0].ControllerDevHandle);
5783 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5784 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5785 		    AttachedDevHandle);
5786 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5787 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5788 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5789 		    attached_handle, i, link_rate);
5790 	}
5791  out:
5792 	kfree(sas_iounit_pg0);
5793 }
5794 
5795 /**
5796  * _scsih_sas_host_add - create sas host object
5797  * @ioc: per adapter object
5798  *
5799  * Creating host side data object, stored in ioc->sas_hba
5800  */
5801 static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)5802 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5803 {
5804 	int i;
5805 	Mpi2ConfigReply_t mpi_reply;
5806 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5807 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5808 	Mpi2SasPhyPage0_t phy_pg0;
5809 	Mpi2SasDevicePage0_t sas_device_pg0;
5810 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5811 	u16 ioc_status;
5812 	u16 sz;
5813 	u8 device_missing_delay;
5814 	u8 num_phys;
5815 
5816 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5817 	if (!num_phys) {
5818 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5819 			__FILE__, __LINE__, __func__);
5820 		return;
5821 	}
5822 	ioc->sas_hba.phy = kcalloc(num_phys,
5823 	    sizeof(struct _sas_phy), GFP_KERNEL);
5824 	if (!ioc->sas_hba.phy) {
5825 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5826 			__FILE__, __LINE__, __func__);
5827 		goto out;
5828 	}
5829 	ioc->sas_hba.num_phys = num_phys;
5830 
5831 	/* sas_iounit page 0 */
5832 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5833 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5834 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5835 	if (!sas_iounit_pg0) {
5836 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5837 			__FILE__, __LINE__, __func__);
5838 		return;
5839 	}
5840 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5841 	    sas_iounit_pg0, sz))) {
5842 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5843 			__FILE__, __LINE__, __func__);
5844 		goto out;
5845 	}
5846 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5847 	    MPI2_IOCSTATUS_MASK;
5848 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5849 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5850 			__FILE__, __LINE__, __func__);
5851 		goto out;
5852 	}
5853 
5854 	/* sas_iounit page 1 */
5855 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5856 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5857 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5858 	if (!sas_iounit_pg1) {
5859 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5860 			__FILE__, __LINE__, __func__);
5861 		goto out;
5862 	}
5863 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5864 	    sas_iounit_pg1, sz))) {
5865 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5866 			__FILE__, __LINE__, __func__);
5867 		goto out;
5868 	}
5869 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5870 	    MPI2_IOCSTATUS_MASK;
5871 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5872 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5873 			__FILE__, __LINE__, __func__);
5874 		goto out;
5875 	}
5876 
5877 	ioc->io_missing_delay =
5878 	    sas_iounit_pg1->IODeviceMissingDelay;
5879 	device_missing_delay =
5880 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5881 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5882 		ioc->device_missing_delay = (device_missing_delay &
5883 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5884 	else
5885 		ioc->device_missing_delay = device_missing_delay &
5886 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5887 
5888 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5889 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5890 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5891 		    i))) {
5892 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5893 				__FILE__, __LINE__, __func__);
5894 			goto out;
5895 		}
5896 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5897 		    MPI2_IOCSTATUS_MASK;
5898 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5899 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5900 				__FILE__, __LINE__, __func__);
5901 			goto out;
5902 		}
5903 
5904 		if (i == 0)
5905 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5906 			    PhyData[0].ControllerDevHandle);
5907 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5908 		ioc->sas_hba.phy[i].phy_id = i;
5909 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5910 		    phy_pg0, ioc->sas_hba.parent_dev);
5911 	}
5912 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5913 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5914 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5915 			__FILE__, __LINE__, __func__);
5916 		goto out;
5917 	}
5918 	ioc->sas_hba.enclosure_handle =
5919 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5920 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5921 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5922 		 ioc->sas_hba.handle,
5923 		 (u64)ioc->sas_hba.sas_address,
5924 		 ioc->sas_hba.num_phys);
5925 
5926 	if (ioc->sas_hba.enclosure_handle) {
5927 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5928 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5929 		   ioc->sas_hba.enclosure_handle)))
5930 			ioc->sas_hba.enclosure_logical_id =
5931 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5932 	}
5933 
5934  out:
5935 	kfree(sas_iounit_pg1);
5936 	kfree(sas_iounit_pg0);
5937 }
5938 
5939 /**
5940  * _scsih_expander_add -  creating expander object
5941  * @ioc: per adapter object
5942  * @handle: expander handle
5943  *
5944  * Creating expander object, stored in ioc->sas_expander_list.
5945  *
5946  * Return: 0 for success, else error.
5947  */
5948 static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)5949 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5950 {
5951 	struct _sas_node *sas_expander;
5952 	struct _enclosure_node *enclosure_dev;
5953 	Mpi2ConfigReply_t mpi_reply;
5954 	Mpi2ExpanderPage0_t expander_pg0;
5955 	Mpi2ExpanderPage1_t expander_pg1;
5956 	u32 ioc_status;
5957 	u16 parent_handle;
5958 	u64 sas_address, sas_address_parent = 0;
5959 	int i;
5960 	unsigned long flags;
5961 	struct _sas_port *mpt3sas_port = NULL;
5962 
5963 	int rc = 0;
5964 
5965 	if (!handle)
5966 		return -1;
5967 
5968 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5969 		return -1;
5970 
5971 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5972 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5973 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5974 			__FILE__, __LINE__, __func__);
5975 		return -1;
5976 	}
5977 
5978 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5979 	    MPI2_IOCSTATUS_MASK;
5980 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5981 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5982 			__FILE__, __LINE__, __func__);
5983 		return -1;
5984 	}
5985 
5986 	/* handle out of order topology events */
5987 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5988 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5989 	    != 0) {
5990 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5991 			__FILE__, __LINE__, __func__);
5992 		return -1;
5993 	}
5994 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5995 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5996 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5997 		    sas_address_parent);
5998 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5999 		if (!sas_expander) {
6000 			rc = _scsih_expander_add(ioc, parent_handle);
6001 			if (rc != 0)
6002 				return rc;
6003 		}
6004 	}
6005 
6006 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6007 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
6008 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6009 	    sas_address);
6010 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6011 
6012 	if (sas_expander)
6013 		return 0;
6014 
6015 	sas_expander = kzalloc(sizeof(struct _sas_node),
6016 	    GFP_KERNEL);
6017 	if (!sas_expander) {
6018 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6019 			__FILE__, __LINE__, __func__);
6020 		return -1;
6021 	}
6022 
6023 	sas_expander->handle = handle;
6024 	sas_expander->num_phys = expander_pg0.NumPhys;
6025 	sas_expander->sas_address_parent = sas_address_parent;
6026 	sas_expander->sas_address = sas_address;
6027 
6028 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6029 		 handle, parent_handle,
6030 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
6031 
6032 	if (!sas_expander->num_phys) {
6033 		rc = -1;
6034 		goto out_fail;
6035 	}
6036 	sas_expander->phy = kcalloc(sas_expander->num_phys,
6037 	    sizeof(struct _sas_phy), GFP_KERNEL);
6038 	if (!sas_expander->phy) {
6039 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6040 			__FILE__, __LINE__, __func__);
6041 		rc = -1;
6042 		goto out_fail;
6043 	}
6044 
6045 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
6046 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6047 	    sas_address_parent);
6048 	if (!mpt3sas_port) {
6049 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6050 			__FILE__, __LINE__, __func__);
6051 		rc = -1;
6052 		goto out_fail;
6053 	}
6054 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6055 
6056 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
6057 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6058 		    &expander_pg1, i, handle))) {
6059 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6060 				__FILE__, __LINE__, __func__);
6061 			rc = -1;
6062 			goto out_fail;
6063 		}
6064 		sas_expander->phy[i].handle = handle;
6065 		sas_expander->phy[i].phy_id = i;
6066 
6067 		if ((mpt3sas_transport_add_expander_phy(ioc,
6068 		    &sas_expander->phy[i], expander_pg1,
6069 		    sas_expander->parent_dev))) {
6070 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6071 				__FILE__, __LINE__, __func__);
6072 			rc = -1;
6073 			goto out_fail;
6074 		}
6075 	}
6076 
6077 	if (sas_expander->enclosure_handle) {
6078 		enclosure_dev =
6079 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6080 						sas_expander->enclosure_handle);
6081 		if (enclosure_dev)
6082 			sas_expander->enclosure_logical_id =
6083 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6084 	}
6085 
6086 	_scsih_expander_node_add(ioc, sas_expander);
6087 	return 0;
6088 
6089  out_fail:
6090 
6091 	if (mpt3sas_port)
6092 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6093 		    sas_address_parent);
6094 	kfree(sas_expander);
6095 	return rc;
6096 }
6097 
6098 /**
6099  * mpt3sas_expander_remove - removing expander object
6100  * @ioc: per adapter object
6101  * @sas_address: expander sas_address
6102  */
6103 void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address)6104 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
6105 {
6106 	struct _sas_node *sas_expander;
6107 	unsigned long flags;
6108 
6109 	if (ioc->shost_recovery)
6110 		return;
6111 
6112 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6113 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6114 	    sas_address);
6115 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6116 	if (sas_expander)
6117 		_scsih_expander_node_remove(ioc, sas_expander);
6118 }
6119 
6120 /**
6121  * _scsih_done -  internal SCSI_IO callback handler.
6122  * @ioc: per adapter object
6123  * @smid: system request message index
6124  * @msix_index: MSIX table index supplied by the OS
6125  * @reply: reply message frame(lower 32bit addr)
6126  *
6127  * Callback handler when sending internal generated SCSI_IO.
6128  * The callback index passed is `ioc->scsih_cb_idx`
6129  *
6130  * Return: 1 meaning mf should be freed from _base_interrupt
6131  *         0 means the mf is freed from this function.
6132  */
6133 static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)6134 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6135 {
6136 	MPI2DefaultReply_t *mpi_reply;
6137 
6138 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
6139 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6140 		return 1;
6141 	if (ioc->scsih_cmds.smid != smid)
6142 		return 1;
6143 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
6144 	if (mpi_reply) {
6145 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
6146 		    mpi_reply->MsgLength*4);
6147 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
6148 	}
6149 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
6150 	complete(&ioc->scsih_cmds.done);
6151 	return 1;
6152 }
6153 
6154 
6155 
6156 
6157 #define MPT3_MAX_LUNS (255)
6158 
6159 
6160 /**
6161  * _scsih_check_access_status - check access flags
6162  * @ioc: per adapter object
6163  * @sas_address: sas address
6164  * @handle: sas device handle
6165  * @access_status: errors returned during discovery of the device
6166  *
6167  * Return: 0 for success, else failure
6168  */
6169 static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)6170 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6171 	u16 handle, u8 access_status)
6172 {
6173 	u8 rc = 1;
6174 	char *desc = NULL;
6175 
6176 	switch (access_status) {
6177 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
6178 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
6179 		rc = 0;
6180 		break;
6181 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
6182 		desc = "sata capability failed";
6183 		break;
6184 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
6185 		desc = "sata affiliation conflict";
6186 		break;
6187 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
6188 		desc = "route not addressable";
6189 		break;
6190 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
6191 		desc = "smp error not addressable";
6192 		break;
6193 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
6194 		desc = "device blocked";
6195 		break;
6196 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
6197 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
6198 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
6199 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
6200 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
6201 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
6202 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
6203 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
6204 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
6205 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
6206 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
6207 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
6208 		desc = "sata initialization failed";
6209 		break;
6210 	default:
6211 		desc = "unknown";
6212 		break;
6213 	}
6214 
6215 	if (!rc)
6216 		return 0;
6217 
6218 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
6219 		desc, (u64)sas_address, handle);
6220 	return rc;
6221 }
6222 
6223 /**
6224  * _scsih_check_device - checking device responsiveness
6225  * @ioc: per adapter object
6226  * @parent_sas_address: sas address of parent expander or sas host
6227  * @handle: attached device handle
6228  * @phy_number: phy number
6229  * @link_rate: new link rate
6230  */
6231 static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)6232 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
6233 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
6234 {
6235 	Mpi2ConfigReply_t mpi_reply;
6236 	Mpi2SasDevicePage0_t sas_device_pg0;
6237 	struct _sas_device *sas_device;
6238 	struct _enclosure_node *enclosure_dev = NULL;
6239 	u32 ioc_status;
6240 	unsigned long flags;
6241 	u64 sas_address;
6242 	struct scsi_target *starget;
6243 	struct MPT3SAS_TARGET *sas_target_priv_data;
6244 	u32 device_info;
6245 
6246 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6247 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
6248 		return;
6249 
6250 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6251 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6252 		return;
6253 
6254 	/* wide port handling ~ we need only handle device once for the phy that
6255 	 * is matched in sas device page zero
6256 	 */
6257 	if (phy_number != sas_device_pg0.PhyNum)
6258 		return;
6259 
6260 	/* check if this is end device */
6261 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6262 	if (!(_scsih_is_end_device(device_info)))
6263 		return;
6264 
6265 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6266 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6267 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6268 	    sas_address);
6269 
6270 	if (!sas_device)
6271 		goto out_unlock;
6272 
6273 	if (unlikely(sas_device->handle != handle)) {
6274 		starget = sas_device->starget;
6275 		sas_target_priv_data = starget->hostdata;
6276 		starget_printk(KERN_INFO, starget,
6277 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
6278 			sas_device->handle, handle);
6279 		sas_target_priv_data->handle = handle;
6280 		sas_device->handle = handle;
6281 		if (le16_to_cpu(sas_device_pg0.Flags) &
6282 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6283 			sas_device->enclosure_level =
6284 				sas_device_pg0.EnclosureLevel;
6285 			memcpy(sas_device->connector_name,
6286 				sas_device_pg0.ConnectorName, 4);
6287 			sas_device->connector_name[4] = '\0';
6288 		} else {
6289 			sas_device->enclosure_level = 0;
6290 			sas_device->connector_name[0] = '\0';
6291 		}
6292 
6293 		sas_device->enclosure_handle =
6294 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
6295 		sas_device->is_chassis_slot_valid = 0;
6296 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
6297 						sas_device->enclosure_handle);
6298 		if (enclosure_dev) {
6299 			sas_device->enclosure_logical_id =
6300 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6301 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6302 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6303 				sas_device->is_chassis_slot_valid = 1;
6304 				sas_device->chassis_slot =
6305 					enclosure_dev->pg0.ChassisSlot;
6306 			}
6307 		}
6308 	}
6309 
6310 	/* check if device is present */
6311 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6312 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6313 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6314 			handle);
6315 		goto out_unlock;
6316 	}
6317 
6318 	/* check if there were any issues with discovery */
6319 	if (_scsih_check_access_status(ioc, sas_address, handle,
6320 	    sas_device_pg0.AccessStatus))
6321 		goto out_unlock;
6322 
6323 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6324 	_scsih_ublock_io_device(ioc, sas_address);
6325 
6326 	if (sas_device)
6327 		sas_device_put(sas_device);
6328 	return;
6329 
6330 out_unlock:
6331 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6332 	if (sas_device)
6333 		sas_device_put(sas_device);
6334 }
6335 
6336 /**
6337  * _scsih_add_device -  creating sas device object
6338  * @ioc: per adapter object
6339  * @handle: sas device handle
6340  * @phy_num: phy number end device attached to
6341  * @is_pd: is this hidden raid component
6342  *
6343  * Creating end device object, stored in ioc->sas_device_list.
6344  *
6345  * Return: 0 for success, non-zero for failure.
6346  */
6347 static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)6348 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6349 	u8 is_pd)
6350 {
6351 	Mpi2ConfigReply_t mpi_reply;
6352 	Mpi2SasDevicePage0_t sas_device_pg0;
6353 	struct _sas_device *sas_device;
6354 	struct _enclosure_node *enclosure_dev = NULL;
6355 	u32 ioc_status;
6356 	u64 sas_address;
6357 	u32 device_info;
6358 
6359 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6360 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6361 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6362 			__FILE__, __LINE__, __func__);
6363 		return -1;
6364 	}
6365 
6366 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6367 	    MPI2_IOCSTATUS_MASK;
6368 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6369 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6370 			__FILE__, __LINE__, __func__);
6371 		return -1;
6372 	}
6373 
6374 	/* check if this is end device */
6375 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6376 	if (!(_scsih_is_end_device(device_info)))
6377 		return -1;
6378 	set_bit(handle, ioc->pend_os_device_add);
6379 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6380 
6381 	/* check if device is present */
6382 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6383 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6384 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6385 			handle);
6386 		return -1;
6387 	}
6388 
6389 	/* check if there were any issues with discovery */
6390 	if (_scsih_check_access_status(ioc, sas_address, handle,
6391 	    sas_device_pg0.AccessStatus))
6392 		return -1;
6393 
6394 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6395 					sas_address);
6396 	if (sas_device) {
6397 		clear_bit(handle, ioc->pend_os_device_add);
6398 		sas_device_put(sas_device);
6399 		return -1;
6400 	}
6401 
6402 	if (sas_device_pg0.EnclosureHandle) {
6403 		enclosure_dev =
6404 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6405 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6406 		if (enclosure_dev == NULL)
6407 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6408 				 sas_device_pg0.EnclosureHandle);
6409 	}
6410 
6411 	sas_device = kzalloc(sizeof(struct _sas_device),
6412 	    GFP_KERNEL);
6413 	if (!sas_device) {
6414 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6415 			__FILE__, __LINE__, __func__);
6416 		return 0;
6417 	}
6418 
6419 	kref_init(&sas_device->refcount);
6420 	sas_device->handle = handle;
6421 	if (_scsih_get_sas_address(ioc,
6422 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6423 	    &sas_device->sas_address_parent) != 0)
6424 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6425 			__FILE__, __LINE__, __func__);
6426 	sas_device->enclosure_handle =
6427 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6428 	if (sas_device->enclosure_handle != 0)
6429 		sas_device->slot =
6430 		    le16_to_cpu(sas_device_pg0.Slot);
6431 	sas_device->device_info = device_info;
6432 	sas_device->sas_address = sas_address;
6433 	sas_device->phy = sas_device_pg0.PhyNum;
6434 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6435 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6436 
6437 	if (le16_to_cpu(sas_device_pg0.Flags)
6438 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6439 		sas_device->enclosure_level =
6440 			sas_device_pg0.EnclosureLevel;
6441 		memcpy(sas_device->connector_name,
6442 			sas_device_pg0.ConnectorName, 4);
6443 		sas_device->connector_name[4] = '\0';
6444 	} else {
6445 		sas_device->enclosure_level = 0;
6446 		sas_device->connector_name[0] = '\0';
6447 	}
6448 	/* get enclosure_logical_id & chassis_slot*/
6449 	sas_device->is_chassis_slot_valid = 0;
6450 	if (enclosure_dev) {
6451 		sas_device->enclosure_logical_id =
6452 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6453 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6454 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6455 			sas_device->is_chassis_slot_valid = 1;
6456 			sas_device->chassis_slot =
6457 					enclosure_dev->pg0.ChassisSlot;
6458 		}
6459 	}
6460 
6461 	/* get device name */
6462 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6463 
6464 	if (ioc->wait_for_discovery_to_complete)
6465 		_scsih_sas_device_init_add(ioc, sas_device);
6466 	else
6467 		_scsih_sas_device_add(ioc, sas_device);
6468 
6469 	sas_device_put(sas_device);
6470 	return 0;
6471 }
6472 
6473 /**
6474  * _scsih_remove_device -  removing sas device object
6475  * @ioc: per adapter object
6476  * @sas_device: the sas_device object
6477  */
6478 static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)6479 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6480 	struct _sas_device *sas_device)
6481 {
6482 	struct MPT3SAS_TARGET *sas_target_priv_data;
6483 
6484 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6485 	     (sas_device->pfa_led_on)) {
6486 		_scsih_turn_off_pfa_led(ioc, sas_device);
6487 		sas_device->pfa_led_on = 0;
6488 	}
6489 
6490 	dewtprintk(ioc,
6491 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6492 			    __func__,
6493 			    sas_device->handle, (u64)sas_device->sas_address));
6494 
6495 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6496 	    NULL, NULL));
6497 
6498 	if (sas_device->starget && sas_device->starget->hostdata) {
6499 		sas_target_priv_data = sas_device->starget->hostdata;
6500 		sas_target_priv_data->deleted = 1;
6501 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6502 		sas_target_priv_data->handle =
6503 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6504 	}
6505 
6506 	if (!ioc->hide_drives)
6507 		mpt3sas_transport_port_remove(ioc,
6508 		    sas_device->sas_address,
6509 		    sas_device->sas_address_parent);
6510 
6511 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6512 		 sas_device->handle, (u64)sas_device->sas_address);
6513 
6514 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6515 
6516 	dewtprintk(ioc,
6517 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6518 			    __func__,
6519 			    sas_device->handle, (u64)sas_device->sas_address));
6520 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6521 	    NULL, NULL));
6522 }
6523 
6524 /**
6525  * _scsih_sas_topology_change_event_debug - debug for topology event
6526  * @ioc: per adapter object
6527  * @event_data: event data payload
6528  * Context: user.
6529  */
6530 static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)6531 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6532 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6533 {
6534 	int i;
6535 	u16 handle;
6536 	u16 reason_code;
6537 	u8 phy_number;
6538 	char *status_str = NULL;
6539 	u8 link_rate, prev_link_rate;
6540 
6541 	switch (event_data->ExpStatus) {
6542 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6543 		status_str = "add";
6544 		break;
6545 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6546 		status_str = "remove";
6547 		break;
6548 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6549 	case 0:
6550 		status_str =  "responding";
6551 		break;
6552 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6553 		status_str = "remove delay";
6554 		break;
6555 	default:
6556 		status_str = "unknown status";
6557 		break;
6558 	}
6559 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6560 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6561 	    "start_phy(%02d), count(%d)\n",
6562 	    le16_to_cpu(event_data->ExpanderDevHandle),
6563 	    le16_to_cpu(event_data->EnclosureHandle),
6564 	    event_data->StartPhyNum, event_data->NumEntries);
6565 	for (i = 0; i < event_data->NumEntries; i++) {
6566 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6567 		if (!handle)
6568 			continue;
6569 		phy_number = event_data->StartPhyNum + i;
6570 		reason_code = event_data->PHY[i].PhyStatus &
6571 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6572 		switch (reason_code) {
6573 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6574 			status_str = "target add";
6575 			break;
6576 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6577 			status_str = "target remove";
6578 			break;
6579 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6580 			status_str = "delay target remove";
6581 			break;
6582 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6583 			status_str = "link rate change";
6584 			break;
6585 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6586 			status_str = "target responding";
6587 			break;
6588 		default:
6589 			status_str = "unknown";
6590 			break;
6591 		}
6592 		link_rate = event_data->PHY[i].LinkRate >> 4;
6593 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6594 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6595 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6596 		    handle, status_str, link_rate, prev_link_rate);
6597 
6598 	}
6599 }
6600 
6601 /**
6602  * _scsih_sas_topology_change_event - handle topology changes
6603  * @ioc: per adapter object
6604  * @fw_event: The fw_event_work object
6605  * Context: user.
6606  *
6607  */
6608 static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)6609 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6610 	struct fw_event_work *fw_event)
6611 {
6612 	int i;
6613 	u16 parent_handle, handle;
6614 	u16 reason_code;
6615 	u8 phy_number, max_phys;
6616 	struct _sas_node *sas_expander;
6617 	u64 sas_address;
6618 	unsigned long flags;
6619 	u8 link_rate, prev_link_rate;
6620 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6621 		(Mpi2EventDataSasTopologyChangeList_t *)
6622 		fw_event->event_data;
6623 
6624 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6625 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6626 
6627 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6628 		return 0;
6629 
6630 	if (!ioc->sas_hba.num_phys)
6631 		_scsih_sas_host_add(ioc);
6632 	else
6633 		_scsih_sas_host_refresh(ioc);
6634 
6635 	if (fw_event->ignore) {
6636 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6637 		return 0;
6638 	}
6639 
6640 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6641 
6642 	/* handle expander add */
6643 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6644 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6645 			return 0;
6646 
6647 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6648 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6649 	    parent_handle);
6650 	if (sas_expander) {
6651 		sas_address = sas_expander->sas_address;
6652 		max_phys = sas_expander->num_phys;
6653 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6654 		sas_address = ioc->sas_hba.sas_address;
6655 		max_phys = ioc->sas_hba.num_phys;
6656 	} else {
6657 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6658 		return 0;
6659 	}
6660 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6661 
6662 	/* handle siblings events */
6663 	for (i = 0; i < event_data->NumEntries; i++) {
6664 		if (fw_event->ignore) {
6665 			dewtprintk(ioc,
6666 				   ioc_info(ioc, "ignoring expander event\n"));
6667 			return 0;
6668 		}
6669 		if (ioc->remove_host || ioc->pci_error_recovery)
6670 			return 0;
6671 		phy_number = event_data->StartPhyNum + i;
6672 		if (phy_number >= max_phys)
6673 			continue;
6674 		reason_code = event_data->PHY[i].PhyStatus &
6675 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6676 		if ((event_data->PHY[i].PhyStatus &
6677 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6678 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6679 				continue;
6680 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6681 		if (!handle)
6682 			continue;
6683 		link_rate = event_data->PHY[i].LinkRate >> 4;
6684 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6685 		switch (reason_code) {
6686 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6687 
6688 			if (ioc->shost_recovery)
6689 				break;
6690 
6691 			if (link_rate == prev_link_rate)
6692 				break;
6693 
6694 			mpt3sas_transport_update_links(ioc, sas_address,
6695 			    handle, phy_number, link_rate);
6696 
6697 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6698 				break;
6699 
6700 			_scsih_check_device(ioc, sas_address, handle,
6701 			    phy_number, link_rate);
6702 
6703 			if (!test_bit(handle, ioc->pend_os_device_add))
6704 				break;
6705 
6706 			fallthrough;
6707 
6708 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6709 
6710 			if (ioc->shost_recovery)
6711 				break;
6712 
6713 			mpt3sas_transport_update_links(ioc, sas_address,
6714 			    handle, phy_number, link_rate);
6715 
6716 			_scsih_add_device(ioc, handle, phy_number, 0);
6717 
6718 			break;
6719 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6720 
6721 			_scsih_device_remove_by_handle(ioc, handle);
6722 			break;
6723 		}
6724 	}
6725 
6726 	/* handle expander removal */
6727 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6728 	    sas_expander)
6729 		mpt3sas_expander_remove(ioc, sas_address);
6730 
6731 	return 0;
6732 }
6733 
6734 /**
6735  * _scsih_sas_device_status_change_event_debug - debug for device event
6736  * @ioc: ?
6737  * @event_data: event data payload
6738  * Context: user.
6739  */
6740 static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)6741 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6742 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6743 {
6744 	char *reason_str = NULL;
6745 
6746 	switch (event_data->ReasonCode) {
6747 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6748 		reason_str = "smart data";
6749 		break;
6750 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6751 		reason_str = "unsupported device discovered";
6752 		break;
6753 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6754 		reason_str = "internal device reset";
6755 		break;
6756 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6757 		reason_str = "internal task abort";
6758 		break;
6759 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6760 		reason_str = "internal task abort set";
6761 		break;
6762 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6763 		reason_str = "internal clear task set";
6764 		break;
6765 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6766 		reason_str = "internal query task";
6767 		break;
6768 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6769 		reason_str = "sata init failure";
6770 		break;
6771 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6772 		reason_str = "internal device reset complete";
6773 		break;
6774 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6775 		reason_str = "internal task abort complete";
6776 		break;
6777 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6778 		reason_str = "internal async notification";
6779 		break;
6780 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6781 		reason_str = "expander reduced functionality";
6782 		break;
6783 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6784 		reason_str = "expander reduced functionality complete";
6785 		break;
6786 	default:
6787 		reason_str = "unknown reason";
6788 		break;
6789 	}
6790 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6791 		 reason_str, le16_to_cpu(event_data->DevHandle),
6792 		 (u64)le64_to_cpu(event_data->SASAddress),
6793 		 le16_to_cpu(event_data->TaskTag));
6794 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6795 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6796 			event_data->ASC, event_data->ASCQ);
6797 	pr_cont("\n");
6798 }
6799 
6800 /**
6801  * _scsih_sas_device_status_change_event - handle device status change
6802  * @ioc: per adapter object
6803  * @event_data: The fw event
6804  * Context: user.
6805  */
6806 static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)6807 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6808 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6809 {
6810 	struct MPT3SAS_TARGET *target_priv_data;
6811 	struct _sas_device *sas_device;
6812 	u64 sas_address;
6813 	unsigned long flags;
6814 
6815 	/* In MPI Revision K (0xC), the internal device reset complete was
6816 	 * implemented, so avoid setting tm_busy flag for older firmware.
6817 	 */
6818 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6819 		return;
6820 
6821 	if (event_data->ReasonCode !=
6822 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6823 	   event_data->ReasonCode !=
6824 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6825 		return;
6826 
6827 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6828 	sas_address = le64_to_cpu(event_data->SASAddress);
6829 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6830 	    sas_address);
6831 
6832 	if (!sas_device || !sas_device->starget)
6833 		goto out;
6834 
6835 	target_priv_data = sas_device->starget->hostdata;
6836 	if (!target_priv_data)
6837 		goto out;
6838 
6839 	if (event_data->ReasonCode ==
6840 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6841 		target_priv_data->tm_busy = 1;
6842 	else
6843 		target_priv_data->tm_busy = 0;
6844 
6845 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6846 		ioc_info(ioc,
6847 		    "%s tm_busy flag for handle(0x%04x)\n",
6848 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6849 		    target_priv_data->handle);
6850 
6851 out:
6852 	if (sas_device)
6853 		sas_device_put(sas_device);
6854 
6855 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6856 }
6857 
6858 
6859 /**
6860  * _scsih_check_pcie_access_status - check access flags
6861  * @ioc: per adapter object
6862  * @wwid: wwid
6863  * @handle: sas device handle
6864  * @access_status: errors returned during discovery of the device
6865  *
6866  * Return: 0 for success, else failure
6867  */
6868 static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)6869 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6870 	u16 handle, u8 access_status)
6871 {
6872 	u8 rc = 1;
6873 	char *desc = NULL;
6874 
6875 	switch (access_status) {
6876 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6877 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6878 		rc = 0;
6879 		break;
6880 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6881 		desc = "PCIe device capability failed";
6882 		break;
6883 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6884 		desc = "PCIe device blocked";
6885 		ioc_info(ioc,
6886 		    "Device with Access Status (%s): wwid(0x%016llx), "
6887 		    "handle(0x%04x)\n ll only be added to the internal list",
6888 		    desc, (u64)wwid, handle);
6889 		rc = 0;
6890 		break;
6891 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6892 		desc = "PCIe device mem space access failed";
6893 		break;
6894 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6895 		desc = "PCIe device unsupported";
6896 		break;
6897 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6898 		desc = "PCIe device MSIx Required";
6899 		break;
6900 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6901 		desc = "PCIe device init fail max";
6902 		break;
6903 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6904 		desc = "PCIe device status unknown";
6905 		break;
6906 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6907 		desc = "nvme ready timeout";
6908 		break;
6909 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6910 		desc = "nvme device configuration unsupported";
6911 		break;
6912 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6913 		desc = "nvme identify failed";
6914 		break;
6915 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6916 		desc = "nvme qconfig failed";
6917 		break;
6918 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6919 		desc = "nvme qcreation failed";
6920 		break;
6921 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6922 		desc = "nvme eventcfg failed";
6923 		break;
6924 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6925 		desc = "nvme get feature stat failed";
6926 		break;
6927 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6928 		desc = "nvme idle timeout";
6929 		break;
6930 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6931 		desc = "nvme failure status";
6932 		break;
6933 	default:
6934 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6935 			access_status, (u64)wwid, handle);
6936 		return rc;
6937 	}
6938 
6939 	if (!rc)
6940 		return rc;
6941 
6942 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6943 		 desc, (u64)wwid, handle);
6944 	return rc;
6945 }
6946 
6947 /**
6948  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6949  * from SML and free up associated memory
6950  * @ioc: per adapter object
6951  * @pcie_device: the pcie_device object
6952  */
6953 static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)6954 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6955 	struct _pcie_device *pcie_device)
6956 {
6957 	struct MPT3SAS_TARGET *sas_target_priv_data;
6958 
6959 	dewtprintk(ioc,
6960 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6961 			    __func__,
6962 			    pcie_device->handle, (u64)pcie_device->wwid));
6963 	if (pcie_device->enclosure_handle != 0)
6964 		dewtprintk(ioc,
6965 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6966 				    __func__,
6967 				    (u64)pcie_device->enclosure_logical_id,
6968 				    pcie_device->slot));
6969 	if (pcie_device->connector_name[0] != '\0')
6970 		dewtprintk(ioc,
6971 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6972 				    __func__,
6973 				    pcie_device->enclosure_level,
6974 				    pcie_device->connector_name));
6975 
6976 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6977 		sas_target_priv_data = pcie_device->starget->hostdata;
6978 		sas_target_priv_data->deleted = 1;
6979 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6980 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6981 	}
6982 
6983 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6984 		 pcie_device->handle, (u64)pcie_device->wwid);
6985 	if (pcie_device->enclosure_handle != 0)
6986 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6987 			 (u64)pcie_device->enclosure_logical_id,
6988 			 pcie_device->slot);
6989 	if (pcie_device->connector_name[0] != '\0')
6990 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6991 			 pcie_device->enclosure_level,
6992 			 pcie_device->connector_name);
6993 
6994 	if (pcie_device->starget && (pcie_device->access_status !=
6995 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6996 		scsi_remove_target(&pcie_device->starget->dev);
6997 	dewtprintk(ioc,
6998 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6999 			    __func__,
7000 			    pcie_device->handle, (u64)pcie_device->wwid));
7001 	if (pcie_device->enclosure_handle != 0)
7002 		dewtprintk(ioc,
7003 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7004 				    __func__,
7005 				    (u64)pcie_device->enclosure_logical_id,
7006 				    pcie_device->slot));
7007 	if (pcie_device->connector_name[0] != '\0')
7008 		dewtprintk(ioc,
7009 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7010 				    __func__,
7011 				    pcie_device->enclosure_level,
7012 				    pcie_device->connector_name));
7013 
7014 	kfree(pcie_device->serial_number);
7015 }
7016 
7017 
7018 /**
7019  * _scsih_pcie_check_device - checking device responsiveness
7020  * @ioc: per adapter object
7021  * @handle: attached device handle
7022  */
7023 static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7024 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7025 {
7026 	Mpi2ConfigReply_t mpi_reply;
7027 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7028 	u32 ioc_status;
7029 	struct _pcie_device *pcie_device;
7030 	u64 wwid;
7031 	unsigned long flags;
7032 	struct scsi_target *starget;
7033 	struct MPT3SAS_TARGET *sas_target_priv_data;
7034 	u32 device_info;
7035 
7036 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7037 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7038 		return;
7039 
7040 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7041 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7042 		return;
7043 
7044 	/* check if this is end device */
7045 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7046 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7047 		return;
7048 
7049 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
7050 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7051 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7052 
7053 	if (!pcie_device) {
7054 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7055 		return;
7056 	}
7057 
7058 	if (unlikely(pcie_device->handle != handle)) {
7059 		starget = pcie_device->starget;
7060 		sas_target_priv_data = starget->hostdata;
7061 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
7062 		starget_printk(KERN_INFO, starget,
7063 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
7064 		    pcie_device->handle, handle);
7065 		sas_target_priv_data->handle = handle;
7066 		pcie_device->handle = handle;
7067 
7068 		if (le32_to_cpu(pcie_device_pg0.Flags) &
7069 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7070 			pcie_device->enclosure_level =
7071 			    pcie_device_pg0.EnclosureLevel;
7072 			memcpy(&pcie_device->connector_name[0],
7073 			    &pcie_device_pg0.ConnectorName[0], 4);
7074 		} else {
7075 			pcie_device->enclosure_level = 0;
7076 			pcie_device->connector_name[0] = '\0';
7077 		}
7078 	}
7079 
7080 	/* check if device is present */
7081 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7082 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7083 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7084 			 handle);
7085 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7086 		pcie_device_put(pcie_device);
7087 		return;
7088 	}
7089 
7090 	/* check if there were any issues with discovery */
7091 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7092 	    pcie_device_pg0.AccessStatus)) {
7093 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7094 		pcie_device_put(pcie_device);
7095 		return;
7096 	}
7097 
7098 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7099 	pcie_device_put(pcie_device);
7100 
7101 	_scsih_ublock_io_device(ioc, wwid);
7102 
7103 	return;
7104 }
7105 
7106 /**
7107  * _scsih_pcie_add_device -  creating pcie device object
7108  * @ioc: per adapter object
7109  * @handle: pcie device handle
7110  *
7111  * Creating end device object, stored in ioc->pcie_device_list.
7112  *
7113  * Return: 1 means queue the event later, 0 means complete the event
7114  */
7115 static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7116 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7117 {
7118 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7119 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
7120 	Mpi2ConfigReply_t mpi_reply;
7121 	struct _pcie_device *pcie_device;
7122 	struct _enclosure_node *enclosure_dev;
7123 	u32 ioc_status;
7124 	u64 wwid;
7125 
7126 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7127 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
7128 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7129 			__FILE__, __LINE__, __func__);
7130 		return 0;
7131 	}
7132 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7133 	    MPI2_IOCSTATUS_MASK;
7134 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7135 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7136 			__FILE__, __LINE__, __func__);
7137 		return 0;
7138 	}
7139 
7140 	set_bit(handle, ioc->pend_os_device_add);
7141 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
7142 
7143 	/* check if device is present */
7144 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7145 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7146 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7147 			handle);
7148 		return 0;
7149 	}
7150 
7151 	/* check if there were any issues with discovery */
7152 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7153 	    pcie_device_pg0.AccessStatus))
7154 		return 0;
7155 
7156 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
7157 	    (pcie_device_pg0.DeviceInfo))))
7158 		return 0;
7159 
7160 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
7161 	if (pcie_device) {
7162 		clear_bit(handle, ioc->pend_os_device_add);
7163 		pcie_device_put(pcie_device);
7164 		return 0;
7165 	}
7166 
7167 	/* PCIe Device Page 2 contains read-only information about a
7168 	 * specific NVMe device; therefore, this page is only
7169 	 * valid for NVMe devices and skip for pcie devices of type scsi.
7170 	 */
7171 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
7172 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7173 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
7174 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
7175 		    handle)) {
7176 			ioc_err(ioc,
7177 			    "failure at %s:%d/%s()!\n", __FILE__,
7178 			    __LINE__, __func__);
7179 			return 0;
7180 		}
7181 
7182 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7183 					MPI2_IOCSTATUS_MASK;
7184 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7185 			ioc_err(ioc,
7186 			    "failure at %s:%d/%s()!\n", __FILE__,
7187 			    __LINE__, __func__);
7188 			return 0;
7189 		}
7190 	}
7191 
7192 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
7193 	if (!pcie_device) {
7194 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7195 			__FILE__, __LINE__, __func__);
7196 		return 0;
7197 	}
7198 
7199 	kref_init(&pcie_device->refcount);
7200 	pcie_device->id = ioc->pcie_target_id++;
7201 	pcie_device->channel = PCIE_CHANNEL;
7202 	pcie_device->handle = handle;
7203 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
7204 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7205 	pcie_device->wwid = wwid;
7206 	pcie_device->port_num = pcie_device_pg0.PortNum;
7207 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
7208 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7209 
7210 	pcie_device->enclosure_handle =
7211 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
7212 	if (pcie_device->enclosure_handle != 0)
7213 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
7214 
7215 	if (le32_to_cpu(pcie_device_pg0.Flags) &
7216 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7217 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
7218 		memcpy(&pcie_device->connector_name[0],
7219 		    &pcie_device_pg0.ConnectorName[0], 4);
7220 	} else {
7221 		pcie_device->enclosure_level = 0;
7222 		pcie_device->connector_name[0] = '\0';
7223 	}
7224 
7225 	/* get enclosure_logical_id */
7226 	if (pcie_device->enclosure_handle) {
7227 		enclosure_dev =
7228 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7229 						pcie_device->enclosure_handle);
7230 		if (enclosure_dev)
7231 			pcie_device->enclosure_logical_id =
7232 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7233 	}
7234 	/* TODO -- Add device name once FW supports it */
7235 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
7236 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7237 		pcie_device->nvme_mdts =
7238 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
7239 		pcie_device->shutdown_latency =
7240 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
7241 		/*
7242 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
7243 		 * if drive's RTD3 Entry Latency is greater then IOC's
7244 		 * max_shutdown_latency.
7245 		 */
7246 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
7247 			ioc->max_shutdown_latency =
7248 				pcie_device->shutdown_latency;
7249 		if (pcie_device_pg2.ControllerResetTO)
7250 			pcie_device->reset_timeout =
7251 			    pcie_device_pg2.ControllerResetTO;
7252 		else
7253 			pcie_device->reset_timeout = 30;
7254 	} else
7255 		pcie_device->reset_timeout = 30;
7256 
7257 	if (ioc->wait_for_discovery_to_complete)
7258 		_scsih_pcie_device_init_add(ioc, pcie_device);
7259 	else
7260 		_scsih_pcie_device_add(ioc, pcie_device);
7261 
7262 	pcie_device_put(pcie_device);
7263 	return 0;
7264 }
7265 
7266 /**
7267  * _scsih_pcie_topology_change_event_debug - debug for topology
7268  * event
7269  * @ioc: per adapter object
7270  * @event_data: event data payload
7271  * Context: user.
7272  */
7273 static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)7274 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7275 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
7276 {
7277 	int i;
7278 	u16 handle;
7279 	u16 reason_code;
7280 	u8 port_number;
7281 	char *status_str = NULL;
7282 	u8 link_rate, prev_link_rate;
7283 
7284 	switch (event_data->SwitchStatus) {
7285 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
7286 		status_str = "add";
7287 		break;
7288 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
7289 		status_str = "remove";
7290 		break;
7291 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
7292 	case 0:
7293 		status_str =  "responding";
7294 		break;
7295 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
7296 		status_str = "remove delay";
7297 		break;
7298 	default:
7299 		status_str = "unknown status";
7300 		break;
7301 	}
7302 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
7303 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
7304 		"start_port(%02d), count(%d)\n",
7305 		le16_to_cpu(event_data->SwitchDevHandle),
7306 		le16_to_cpu(event_data->EnclosureHandle),
7307 		event_data->StartPortNum, event_data->NumEntries);
7308 	for (i = 0; i < event_data->NumEntries; i++) {
7309 		handle =
7310 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7311 		if (!handle)
7312 			continue;
7313 		port_number = event_data->StartPortNum + i;
7314 		reason_code = event_data->PortEntry[i].PortStatus;
7315 		switch (reason_code) {
7316 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7317 			status_str = "target add";
7318 			break;
7319 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7320 			status_str = "target remove";
7321 			break;
7322 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7323 			status_str = "delay target remove";
7324 			break;
7325 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7326 			status_str = "link rate change";
7327 			break;
7328 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7329 			status_str = "target responding";
7330 			break;
7331 		default:
7332 			status_str = "unknown";
7333 			break;
7334 		}
7335 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
7336 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7337 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7338 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7339 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7340 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
7341 			handle, status_str, link_rate, prev_link_rate);
7342 	}
7343 }
7344 
7345 /**
7346  * _scsih_pcie_topology_change_event - handle PCIe topology
7347  *  changes
7348  * @ioc: per adapter object
7349  * @fw_event: The fw_event_work object
7350  * Context: user.
7351  *
7352  */
7353 static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7354 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7355 	struct fw_event_work *fw_event)
7356 {
7357 	int i;
7358 	u16 handle;
7359 	u16 reason_code;
7360 	u8 link_rate, prev_link_rate;
7361 	unsigned long flags;
7362 	int rc;
7363 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7364 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7365 	struct _pcie_device *pcie_device;
7366 
7367 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7368 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
7369 
7370 	if (ioc->shost_recovery || ioc->remove_host ||
7371 		ioc->pci_error_recovery)
7372 		return;
7373 
7374 	if (fw_event->ignore) {
7375 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7376 		return;
7377 	}
7378 
7379 	/* handle siblings events */
7380 	for (i = 0; i < event_data->NumEntries; i++) {
7381 		if (fw_event->ignore) {
7382 			dewtprintk(ioc,
7383 				   ioc_info(ioc, "ignoring switch event\n"));
7384 			return;
7385 		}
7386 		if (ioc->remove_host || ioc->pci_error_recovery)
7387 			return;
7388 		reason_code = event_data->PortEntry[i].PortStatus;
7389 		handle =
7390 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7391 		if (!handle)
7392 			continue;
7393 
7394 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7395 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7396 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7397 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7398 
7399 		switch (reason_code) {
7400 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7401 			if (ioc->shost_recovery)
7402 				break;
7403 			if (link_rate == prev_link_rate)
7404 				break;
7405 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7406 				break;
7407 
7408 			_scsih_pcie_check_device(ioc, handle);
7409 
7410 			/* This code after this point handles the test case
7411 			 * where a device has been added, however its returning
7412 			 * BUSY for sometime.  Then before the Device Missing
7413 			 * Delay expires and the device becomes READY, the
7414 			 * device is removed and added back.
7415 			 */
7416 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7417 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7418 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7419 
7420 			if (pcie_device) {
7421 				pcie_device_put(pcie_device);
7422 				break;
7423 			}
7424 
7425 			if (!test_bit(handle, ioc->pend_os_device_add))
7426 				break;
7427 
7428 			dewtprintk(ioc,
7429 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7430 					    handle));
7431 			event_data->PortEntry[i].PortStatus &= 0xF0;
7432 			event_data->PortEntry[i].PortStatus |=
7433 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7434 			fallthrough;
7435 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7436 			if (ioc->shost_recovery)
7437 				break;
7438 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7439 				break;
7440 
7441 			rc = _scsih_pcie_add_device(ioc, handle);
7442 			if (!rc) {
7443 				/* mark entry vacant */
7444 				/* TODO This needs to be reviewed and fixed,
7445 				 * we dont have an entry
7446 				 * to make an event void like vacant
7447 				 */
7448 				event_data->PortEntry[i].PortStatus |=
7449 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7450 			}
7451 			break;
7452 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7453 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7454 			break;
7455 		}
7456 	}
7457 }
7458 
7459 /**
7460  * _scsih_pcie_device_status_change_event_debug - debug for device event
7461  * @ioc: ?
7462  * @event_data: event data payload
7463  * Context: user.
7464  */
7465 static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)7466 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7467 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7468 {
7469 	char *reason_str = NULL;
7470 
7471 	switch (event_data->ReasonCode) {
7472 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7473 		reason_str = "smart data";
7474 		break;
7475 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7476 		reason_str = "unsupported device discovered";
7477 		break;
7478 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7479 		reason_str = "internal device reset";
7480 		break;
7481 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7482 		reason_str = "internal task abort";
7483 		break;
7484 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7485 		reason_str = "internal task abort set";
7486 		break;
7487 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7488 		reason_str = "internal clear task set";
7489 		break;
7490 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7491 		reason_str = "internal query task";
7492 		break;
7493 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7494 		reason_str = "device init failure";
7495 		break;
7496 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7497 		reason_str = "internal device reset complete";
7498 		break;
7499 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7500 		reason_str = "internal task abort complete";
7501 		break;
7502 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7503 		reason_str = "internal async notification";
7504 		break;
7505 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7506 		reason_str = "pcie hot reset failed";
7507 		break;
7508 	default:
7509 		reason_str = "unknown reason";
7510 		break;
7511 	}
7512 
7513 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7514 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7515 		 reason_str, le16_to_cpu(event_data->DevHandle),
7516 		 (u64)le64_to_cpu(event_data->WWID),
7517 		 le16_to_cpu(event_data->TaskTag));
7518 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7519 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7520 			event_data->ASC, event_data->ASCQ);
7521 	pr_cont("\n");
7522 }
7523 
7524 /**
7525  * _scsih_pcie_device_status_change_event - handle device status
7526  * change
7527  * @ioc: per adapter object
7528  * @fw_event: The fw_event_work object
7529  * Context: user.
7530  */
7531 static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7532 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7533 	struct fw_event_work *fw_event)
7534 {
7535 	struct MPT3SAS_TARGET *target_priv_data;
7536 	struct _pcie_device *pcie_device;
7537 	u64 wwid;
7538 	unsigned long flags;
7539 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7540 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7541 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7542 		_scsih_pcie_device_status_change_event_debug(ioc,
7543 			event_data);
7544 
7545 	if (event_data->ReasonCode !=
7546 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7547 		event_data->ReasonCode !=
7548 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7549 		return;
7550 
7551 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7552 	wwid = le64_to_cpu(event_data->WWID);
7553 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7554 
7555 	if (!pcie_device || !pcie_device->starget)
7556 		goto out;
7557 
7558 	target_priv_data = pcie_device->starget->hostdata;
7559 	if (!target_priv_data)
7560 		goto out;
7561 
7562 	if (event_data->ReasonCode ==
7563 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7564 		target_priv_data->tm_busy = 1;
7565 	else
7566 		target_priv_data->tm_busy = 0;
7567 out:
7568 	if (pcie_device)
7569 		pcie_device_put(pcie_device);
7570 
7571 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7572 }
7573 
7574 /**
7575  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7576  * event
7577  * @ioc: per adapter object
7578  * @event_data: event data payload
7579  * Context: user.
7580  */
7581 static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)7582 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7583 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7584 {
7585 	char *reason_str = NULL;
7586 
7587 	switch (event_data->ReasonCode) {
7588 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7589 		reason_str = "enclosure add";
7590 		break;
7591 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7592 		reason_str = "enclosure remove";
7593 		break;
7594 	default:
7595 		reason_str = "unknown reason";
7596 		break;
7597 	}
7598 
7599 	ioc_info(ioc, "enclosure status change: (%s)\n"
7600 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7601 		 reason_str,
7602 		 le16_to_cpu(event_data->EnclosureHandle),
7603 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7604 		 le16_to_cpu(event_data->StartSlot));
7605 }
7606 
7607 /**
7608  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7609  * @ioc: per adapter object
7610  * @fw_event: The fw_event_work object
7611  * Context: user.
7612  */
7613 static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7614 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7615 	struct fw_event_work *fw_event)
7616 {
7617 	Mpi2ConfigReply_t mpi_reply;
7618 	struct _enclosure_node *enclosure_dev = NULL;
7619 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7620 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7621 	int rc;
7622 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7623 
7624 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7625 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7626 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7627 		     fw_event->event_data);
7628 	if (ioc->shost_recovery)
7629 		return;
7630 
7631 	if (enclosure_handle)
7632 		enclosure_dev =
7633 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7634 						enclosure_handle);
7635 	switch (event_data->ReasonCode) {
7636 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7637 		if (!enclosure_dev) {
7638 			enclosure_dev =
7639 				kzalloc(sizeof(struct _enclosure_node),
7640 					GFP_KERNEL);
7641 			if (!enclosure_dev) {
7642 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7643 					 __FILE__, __LINE__, __func__);
7644 				return;
7645 			}
7646 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7647 				&enclosure_dev->pg0,
7648 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7649 				enclosure_handle);
7650 
7651 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7652 						MPI2_IOCSTATUS_MASK)) {
7653 				kfree(enclosure_dev);
7654 				return;
7655 			}
7656 
7657 			list_add_tail(&enclosure_dev->list,
7658 							&ioc->enclosure_list);
7659 		}
7660 		break;
7661 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7662 		if (enclosure_dev) {
7663 			list_del(&enclosure_dev->list);
7664 			kfree(enclosure_dev);
7665 		}
7666 		break;
7667 	default:
7668 		break;
7669 	}
7670 }
7671 
7672 /**
7673  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7674  * @ioc: per adapter object
7675  * @fw_event: The fw_event_work object
7676  * Context: user.
7677  */
7678 static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7679 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7680 	struct fw_event_work *fw_event)
7681 {
7682 	struct scsi_cmnd *scmd;
7683 	struct scsi_device *sdev;
7684 	struct scsiio_tracker *st;
7685 	u16 smid, handle;
7686 	u32 lun;
7687 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7688 	u32 termination_count;
7689 	u32 query_count;
7690 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7691 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7692 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7693 		fw_event->event_data;
7694 	u16 ioc_status;
7695 	unsigned long flags;
7696 	int r;
7697 	u8 max_retries = 0;
7698 	u8 task_abort_retries;
7699 
7700 	mutex_lock(&ioc->tm_cmds.mutex);
7701 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7702 		 __func__, event_data->PhyNum, event_data->PortWidth);
7703 
7704 	_scsih_block_io_all_device(ioc);
7705 
7706 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7707 	mpi_reply = ioc->tm_cmds.reply;
7708  broadcast_aen_retry:
7709 
7710 	/* sanity checks for retrying this loop */
7711 	if (max_retries++ == 5) {
7712 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7713 		goto out;
7714 	} else if (max_retries > 1)
7715 		dewtprintk(ioc,
7716 			   ioc_info(ioc, "%s: %d retry\n",
7717 				    __func__, max_retries - 1));
7718 
7719 	termination_count = 0;
7720 	query_count = 0;
7721 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7722 		if (ioc->shost_recovery)
7723 			goto out;
7724 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7725 		if (!scmd)
7726 			continue;
7727 		st = scsi_cmd_priv(scmd);
7728 		sdev = scmd->device;
7729 		sas_device_priv_data = sdev->hostdata;
7730 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7731 			continue;
7732 		 /* skip hidden raid components */
7733 		if (sas_device_priv_data->sas_target->flags &
7734 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7735 			continue;
7736 		 /* skip volumes */
7737 		if (sas_device_priv_data->sas_target->flags &
7738 		    MPT_TARGET_FLAGS_VOLUME)
7739 			continue;
7740 		 /* skip PCIe devices */
7741 		if (sas_device_priv_data->sas_target->flags &
7742 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7743 			continue;
7744 
7745 		handle = sas_device_priv_data->sas_target->handle;
7746 		lun = sas_device_priv_data->lun;
7747 		query_count++;
7748 
7749 		if (ioc->shost_recovery)
7750 			goto out;
7751 
7752 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7753 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
7754 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7755 			st->msix_io, 30, 0);
7756 		if (r == FAILED) {
7757 			sdev_printk(KERN_WARNING, sdev,
7758 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7759 			    "QUERY_TASK: scmd(%p)\n", scmd);
7760 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7761 			goto broadcast_aen_retry;
7762 		}
7763 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7764 		    & MPI2_IOCSTATUS_MASK;
7765 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7766 			sdev_printk(KERN_WARNING, sdev,
7767 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7768 				ioc_status, scmd);
7769 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7770 			goto broadcast_aen_retry;
7771 		}
7772 
7773 		/* see if IO is still owned by IOC and target */
7774 		if (mpi_reply->ResponseCode ==
7775 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7776 		     mpi_reply->ResponseCode ==
7777 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7778 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7779 			continue;
7780 		}
7781 		task_abort_retries = 0;
7782  tm_retry:
7783 		if (task_abort_retries++ == 60) {
7784 			dewtprintk(ioc,
7785 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7786 					    __func__));
7787 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7788 			goto broadcast_aen_retry;
7789 		}
7790 
7791 		if (ioc->shost_recovery)
7792 			goto out_no_lock;
7793 
7794 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
7795 			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
7796 			st->smid, st->msix_io, 30, 0);
7797 		if (r == FAILED || st->cb_idx != 0xFF) {
7798 			sdev_printk(KERN_WARNING, sdev,
7799 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7800 			    "scmd(%p)\n", scmd);
7801 			goto tm_retry;
7802 		}
7803 
7804 		if (task_abort_retries > 1)
7805 			sdev_printk(KERN_WARNING, sdev,
7806 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7807 			    " scmd(%p)\n",
7808 			    task_abort_retries - 1, scmd);
7809 
7810 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7811 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7812 	}
7813 
7814 	if (ioc->broadcast_aen_pending) {
7815 		dewtprintk(ioc,
7816 			   ioc_info(ioc,
7817 				    "%s: loop back due to pending AEN\n",
7818 				    __func__));
7819 		 ioc->broadcast_aen_pending = 0;
7820 		 goto broadcast_aen_retry;
7821 	}
7822 
7823  out:
7824 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7825  out_no_lock:
7826 
7827 	dewtprintk(ioc,
7828 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7829 			    __func__, query_count, termination_count));
7830 
7831 	ioc->broadcast_aen_busy = 0;
7832 	if (!ioc->shost_recovery)
7833 		_scsih_ublock_io_all_device(ioc);
7834 	mutex_unlock(&ioc->tm_cmds.mutex);
7835 }
7836 
7837 /**
7838  * _scsih_sas_discovery_event - handle discovery events
7839  * @ioc: per adapter object
7840  * @fw_event: The fw_event_work object
7841  * Context: user.
7842  */
7843 static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7844 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7845 	struct fw_event_work *fw_event)
7846 {
7847 	Mpi2EventDataSasDiscovery_t *event_data =
7848 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7849 
7850 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7851 		ioc_info(ioc, "discovery event: (%s)",
7852 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7853 			 "start" : "stop");
7854 		if (event_data->DiscoveryStatus)
7855 			pr_cont("discovery_status(0x%08x)",
7856 				le32_to_cpu(event_data->DiscoveryStatus));
7857 		pr_cont("\n");
7858 	}
7859 
7860 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7861 	    !ioc->sas_hba.num_phys) {
7862 		if (disable_discovery > 0 && ioc->shost_recovery) {
7863 			/* Wait for the reset to complete */
7864 			while (ioc->shost_recovery)
7865 				ssleep(1);
7866 		}
7867 		_scsih_sas_host_add(ioc);
7868 	}
7869 }
7870 
7871 /**
7872  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7873  *						events
7874  * @ioc: per adapter object
7875  * @fw_event: The fw_event_work object
7876  * Context: user.
7877  */
7878 static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7879 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7880 	struct fw_event_work *fw_event)
7881 {
7882 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7883 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7884 
7885 	switch (event_data->ReasonCode) {
7886 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7887 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7888 			 le16_to_cpu(event_data->DevHandle),
7889 			 (u64)le64_to_cpu(event_data->SASAddress),
7890 			 event_data->PhysicalPort);
7891 		break;
7892 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7893 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7894 			 le16_to_cpu(event_data->DevHandle),
7895 			 (u64)le64_to_cpu(event_data->SASAddress),
7896 			 event_data->PhysicalPort);
7897 		break;
7898 	default:
7899 		break;
7900 	}
7901 }
7902 
7903 /**
7904  * _scsih_pcie_enumeration_event - handle enumeration events
7905  * @ioc: per adapter object
7906  * @fw_event: The fw_event_work object
7907  * Context: user.
7908  */
7909 static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7910 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7911 	struct fw_event_work *fw_event)
7912 {
7913 	Mpi26EventDataPCIeEnumeration_t *event_data =
7914 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7915 
7916 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7917 		return;
7918 
7919 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7920 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7921 		 "started" : "completed",
7922 		 event_data->Flags);
7923 	if (event_data->EnumerationStatus)
7924 		pr_cont("enumeration_status(0x%08x)",
7925 			le32_to_cpu(event_data->EnumerationStatus));
7926 	pr_cont("\n");
7927 }
7928 
7929 /**
7930  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7931  * @ioc: per adapter object
7932  * @handle: device handle for physical disk
7933  * @phys_disk_num: physical disk number
7934  *
7935  * Return: 0 for success, else failure.
7936  */
7937 static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)7938 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7939 {
7940 	Mpi2RaidActionRequest_t *mpi_request;
7941 	Mpi2RaidActionReply_t *mpi_reply;
7942 	u16 smid;
7943 	u8 issue_reset = 0;
7944 	int rc = 0;
7945 	u16 ioc_status;
7946 	u32 log_info;
7947 
7948 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7949 		return rc;
7950 
7951 	mutex_lock(&ioc->scsih_cmds.mutex);
7952 
7953 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7954 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7955 		rc = -EAGAIN;
7956 		goto out;
7957 	}
7958 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7959 
7960 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7961 	if (!smid) {
7962 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7963 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7964 		rc = -EAGAIN;
7965 		goto out;
7966 	}
7967 
7968 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7969 	ioc->scsih_cmds.smid = smid;
7970 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7971 
7972 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7973 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7974 	mpi_request->PhysDiskNum = phys_disk_num;
7975 
7976 	dewtprintk(ioc,
7977 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7978 			    handle, phys_disk_num));
7979 
7980 	init_completion(&ioc->scsih_cmds.done);
7981 	ioc->put_smid_default(ioc, smid);
7982 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7983 
7984 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7985 		mpt3sas_check_cmd_timeout(ioc,
7986 		    ioc->scsih_cmds.status, mpi_request,
7987 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
7988 		rc = -EFAULT;
7989 		goto out;
7990 	}
7991 
7992 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7993 
7994 		mpi_reply = ioc->scsih_cmds.reply;
7995 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7996 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7997 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7998 		else
7999 			log_info = 0;
8000 		ioc_status &= MPI2_IOCSTATUS_MASK;
8001 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8002 			dewtprintk(ioc,
8003 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8004 					    ioc_status, log_info));
8005 			rc = -EFAULT;
8006 		} else
8007 			dewtprintk(ioc,
8008 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8009 	}
8010 
8011  out:
8012 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8013 	mutex_unlock(&ioc->scsih_cmds.mutex);
8014 
8015 	if (issue_reset)
8016 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8017 	return rc;
8018 }
8019 
8020 /**
8021  * _scsih_reprobe_lun - reprobing lun
8022  * @sdev: scsi device struct
8023  * @no_uld_attach: sdev->no_uld_attach flag setting
8024  *
8025  **/
8026 static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)8027 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8028 {
8029 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8030 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8031 	    sdev->no_uld_attach ? "hiding" : "exposing");
8032 	WARN_ON(scsi_device_reprobe(sdev));
8033 }
8034 
8035 /**
8036  * _scsih_sas_volume_add - add new volume
8037  * @ioc: per adapter object
8038  * @element: IR config element data
8039  * Context: user.
8040  */
8041 static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8042 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8043 	Mpi2EventIrConfigElement_t *element)
8044 {
8045 	struct _raid_device *raid_device;
8046 	unsigned long flags;
8047 	u64 wwid;
8048 	u16 handle = le16_to_cpu(element->VolDevHandle);
8049 	int rc;
8050 
8051 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8052 	if (!wwid) {
8053 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8054 			__FILE__, __LINE__, __func__);
8055 		return;
8056 	}
8057 
8058 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8059 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8060 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8061 
8062 	if (raid_device)
8063 		return;
8064 
8065 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8066 	if (!raid_device) {
8067 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8068 			__FILE__, __LINE__, __func__);
8069 		return;
8070 	}
8071 
8072 	raid_device->id = ioc->sas_id++;
8073 	raid_device->channel = RAID_CHANNEL;
8074 	raid_device->handle = handle;
8075 	raid_device->wwid = wwid;
8076 	_scsih_raid_device_add(ioc, raid_device);
8077 	if (!ioc->wait_for_discovery_to_complete) {
8078 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8079 		    raid_device->id, 0);
8080 		if (rc)
8081 			_scsih_raid_device_remove(ioc, raid_device);
8082 	} else {
8083 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8084 		_scsih_determine_boot_device(ioc, raid_device, 1);
8085 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8086 	}
8087 }
8088 
8089 /**
8090  * _scsih_sas_volume_delete - delete volume
8091  * @ioc: per adapter object
8092  * @handle: volume device handle
8093  * Context: user.
8094  */
8095 static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)8096 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8097 {
8098 	struct _raid_device *raid_device;
8099 	unsigned long flags;
8100 	struct MPT3SAS_TARGET *sas_target_priv_data;
8101 	struct scsi_target *starget = NULL;
8102 
8103 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8104 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8105 	if (raid_device) {
8106 		if (raid_device->starget) {
8107 			starget = raid_device->starget;
8108 			sas_target_priv_data = starget->hostdata;
8109 			sas_target_priv_data->deleted = 1;
8110 		}
8111 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8112 			 raid_device->handle, (u64)raid_device->wwid);
8113 		list_del(&raid_device->list);
8114 		kfree(raid_device);
8115 	}
8116 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8117 	if (starget)
8118 		scsi_remove_target(&starget->dev);
8119 }
8120 
8121 /**
8122  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
8123  * @ioc: per adapter object
8124  * @element: IR config element data
8125  * Context: user.
8126  */
8127 static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8128 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
8129 	Mpi2EventIrConfigElement_t *element)
8130 {
8131 	struct _sas_device *sas_device;
8132 	struct scsi_target *starget = NULL;
8133 	struct MPT3SAS_TARGET *sas_target_priv_data;
8134 	unsigned long flags;
8135 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8136 
8137 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8138 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
8139 	if (sas_device) {
8140 		sas_device->volume_handle = 0;
8141 		sas_device->volume_wwid = 0;
8142 		clear_bit(handle, ioc->pd_handles);
8143 		if (sas_device->starget && sas_device->starget->hostdata) {
8144 			starget = sas_device->starget;
8145 			sas_target_priv_data = starget->hostdata;
8146 			sas_target_priv_data->flags &=
8147 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
8148 		}
8149 	}
8150 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8151 	if (!sas_device)
8152 		return;
8153 
8154 	/* exposing raid component */
8155 	if (starget)
8156 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
8157 
8158 	sas_device_put(sas_device);
8159 }
8160 
8161 /**
8162  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
8163  * @ioc: per adapter object
8164  * @element: IR config element data
8165  * Context: user.
8166  */
8167 static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8168 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
8169 	Mpi2EventIrConfigElement_t *element)
8170 {
8171 	struct _sas_device *sas_device;
8172 	struct scsi_target *starget = NULL;
8173 	struct MPT3SAS_TARGET *sas_target_priv_data;
8174 	unsigned long flags;
8175 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8176 	u16 volume_handle = 0;
8177 	u64 volume_wwid = 0;
8178 
8179 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
8180 	if (volume_handle)
8181 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
8182 		    &volume_wwid);
8183 
8184 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8185 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
8186 	if (sas_device) {
8187 		set_bit(handle, ioc->pd_handles);
8188 		if (sas_device->starget && sas_device->starget->hostdata) {
8189 			starget = sas_device->starget;
8190 			sas_target_priv_data = starget->hostdata;
8191 			sas_target_priv_data->flags |=
8192 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
8193 			sas_device->volume_handle = volume_handle;
8194 			sas_device->volume_wwid = volume_wwid;
8195 		}
8196 	}
8197 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8198 	if (!sas_device)
8199 		return;
8200 
8201 	/* hiding raid component */
8202 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8203 
8204 	if (starget)
8205 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
8206 
8207 	sas_device_put(sas_device);
8208 }
8209 
8210 /**
8211  * _scsih_sas_pd_delete - delete pd component
8212  * @ioc: per adapter object
8213  * @element: IR config element data
8214  * Context: user.
8215  */
8216 static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8217 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
8218 	Mpi2EventIrConfigElement_t *element)
8219 {
8220 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8221 
8222 	_scsih_device_remove_by_handle(ioc, handle);
8223 }
8224 
8225 /**
8226  * _scsih_sas_pd_add - remove pd component
8227  * @ioc: per adapter object
8228  * @element: IR config element data
8229  * Context: user.
8230  */
8231 static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)8232 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
8233 	Mpi2EventIrConfigElement_t *element)
8234 {
8235 	struct _sas_device *sas_device;
8236 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8237 	Mpi2ConfigReply_t mpi_reply;
8238 	Mpi2SasDevicePage0_t sas_device_pg0;
8239 	u32 ioc_status;
8240 	u64 sas_address;
8241 	u16 parent_handle;
8242 
8243 	set_bit(handle, ioc->pd_handles);
8244 
8245 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8246 	if (sas_device) {
8247 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8248 		sas_device_put(sas_device);
8249 		return;
8250 	}
8251 
8252 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
8253 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
8254 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8255 			__FILE__, __LINE__, __func__);
8256 		return;
8257 	}
8258 
8259 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8260 	    MPI2_IOCSTATUS_MASK;
8261 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8262 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8263 			__FILE__, __LINE__, __func__);
8264 		return;
8265 	}
8266 
8267 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8268 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8269 		mpt3sas_transport_update_links(ioc, sas_address, handle,
8270 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8271 
8272 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8273 	_scsih_add_device(ioc, handle, 0, 1);
8274 }
8275 
8276 /**
8277  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
8278  * @ioc: per adapter object
8279  * @event_data: event data payload
8280  * Context: user.
8281  */
8282 static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)8283 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8284 	Mpi2EventDataIrConfigChangeList_t *event_data)
8285 {
8286 	Mpi2EventIrConfigElement_t *element;
8287 	u8 element_type;
8288 	int i;
8289 	char *reason_str = NULL, *element_str = NULL;
8290 
8291 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8292 
8293 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
8294 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
8295 		 "foreign" : "native",
8296 		 event_data->NumElements);
8297 	for (i = 0; i < event_data->NumElements; i++, element++) {
8298 		switch (element->ReasonCode) {
8299 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8300 			reason_str = "add";
8301 			break;
8302 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8303 			reason_str = "remove";
8304 			break;
8305 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
8306 			reason_str = "no change";
8307 			break;
8308 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8309 			reason_str = "hide";
8310 			break;
8311 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8312 			reason_str = "unhide";
8313 			break;
8314 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8315 			reason_str = "volume_created";
8316 			break;
8317 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8318 			reason_str = "volume_deleted";
8319 			break;
8320 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8321 			reason_str = "pd_created";
8322 			break;
8323 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8324 			reason_str = "pd_deleted";
8325 			break;
8326 		default:
8327 			reason_str = "unknown reason";
8328 			break;
8329 		}
8330 		element_type = le16_to_cpu(element->ElementFlags) &
8331 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8332 		switch (element_type) {
8333 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8334 			element_str = "volume";
8335 			break;
8336 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8337 			element_str = "phys disk";
8338 			break;
8339 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8340 			element_str = "hot spare";
8341 			break;
8342 		default:
8343 			element_str = "unknown element";
8344 			break;
8345 		}
8346 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
8347 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8348 		    reason_str, le16_to_cpu(element->VolDevHandle),
8349 		    le16_to_cpu(element->PhysDiskDevHandle),
8350 		    element->PhysDiskNum);
8351 	}
8352 }
8353 
8354 /**
8355  * _scsih_sas_ir_config_change_event - handle ir configuration change events
8356  * @ioc: per adapter object
8357  * @fw_event: The fw_event_work object
8358  * Context: user.
8359  */
8360 static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8361 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8362 	struct fw_event_work *fw_event)
8363 {
8364 	Mpi2EventIrConfigElement_t *element;
8365 	int i;
8366 	u8 foreign_config;
8367 	Mpi2EventDataIrConfigChangeList_t *event_data =
8368 		(Mpi2EventDataIrConfigChangeList_t *)
8369 		fw_event->event_data;
8370 
8371 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8372 	     (!ioc->hide_ir_msg))
8373 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
8374 
8375 	foreign_config = (le32_to_cpu(event_data->Flags) &
8376 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8377 
8378 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8379 	if (ioc->shost_recovery &&
8380 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8381 		for (i = 0; i < event_data->NumElements; i++, element++) {
8382 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8383 				_scsih_ir_fastpath(ioc,
8384 					le16_to_cpu(element->PhysDiskDevHandle),
8385 					element->PhysDiskNum);
8386 		}
8387 		return;
8388 	}
8389 
8390 	for (i = 0; i < event_data->NumElements; i++, element++) {
8391 
8392 		switch (element->ReasonCode) {
8393 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8394 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8395 			if (!foreign_config)
8396 				_scsih_sas_volume_add(ioc, element);
8397 			break;
8398 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8399 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8400 			if (!foreign_config)
8401 				_scsih_sas_volume_delete(ioc,
8402 				    le16_to_cpu(element->VolDevHandle));
8403 			break;
8404 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8405 			if (!ioc->is_warpdrive)
8406 				_scsih_sas_pd_hide(ioc, element);
8407 			break;
8408 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8409 			if (!ioc->is_warpdrive)
8410 				_scsih_sas_pd_expose(ioc, element);
8411 			break;
8412 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8413 			if (!ioc->is_warpdrive)
8414 				_scsih_sas_pd_add(ioc, element);
8415 			break;
8416 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8417 			if (!ioc->is_warpdrive)
8418 				_scsih_sas_pd_delete(ioc, element);
8419 			break;
8420 		}
8421 	}
8422 }
8423 
8424 /**
8425  * _scsih_sas_ir_volume_event - IR volume event
8426  * @ioc: per adapter object
8427  * @fw_event: The fw_event_work object
8428  * Context: user.
8429  */
8430 static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8431 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8432 	struct fw_event_work *fw_event)
8433 {
8434 	u64 wwid;
8435 	unsigned long flags;
8436 	struct _raid_device *raid_device;
8437 	u16 handle;
8438 	u32 state;
8439 	int rc;
8440 	Mpi2EventDataIrVolume_t *event_data =
8441 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8442 
8443 	if (ioc->shost_recovery)
8444 		return;
8445 
8446 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8447 		return;
8448 
8449 	handle = le16_to_cpu(event_data->VolDevHandle);
8450 	state = le32_to_cpu(event_data->NewValue);
8451 	if (!ioc->hide_ir_msg)
8452 		dewtprintk(ioc,
8453 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8454 				    __func__, handle,
8455 				    le32_to_cpu(event_data->PreviousValue),
8456 				    state));
8457 	switch (state) {
8458 	case MPI2_RAID_VOL_STATE_MISSING:
8459 	case MPI2_RAID_VOL_STATE_FAILED:
8460 		_scsih_sas_volume_delete(ioc, handle);
8461 		break;
8462 
8463 	case MPI2_RAID_VOL_STATE_ONLINE:
8464 	case MPI2_RAID_VOL_STATE_DEGRADED:
8465 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8466 
8467 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8468 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8469 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8470 
8471 		if (raid_device)
8472 			break;
8473 
8474 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8475 		if (!wwid) {
8476 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8477 				__FILE__, __LINE__, __func__);
8478 			break;
8479 		}
8480 
8481 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8482 		if (!raid_device) {
8483 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8484 				__FILE__, __LINE__, __func__);
8485 			break;
8486 		}
8487 
8488 		raid_device->id = ioc->sas_id++;
8489 		raid_device->channel = RAID_CHANNEL;
8490 		raid_device->handle = handle;
8491 		raid_device->wwid = wwid;
8492 		_scsih_raid_device_add(ioc, raid_device);
8493 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8494 		    raid_device->id, 0);
8495 		if (rc)
8496 			_scsih_raid_device_remove(ioc, raid_device);
8497 		break;
8498 
8499 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8500 	default:
8501 		break;
8502 	}
8503 }
8504 
8505 /**
8506  * _scsih_sas_ir_physical_disk_event - PD event
8507  * @ioc: per adapter object
8508  * @fw_event: The fw_event_work object
8509  * Context: user.
8510  */
8511 static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8512 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8513 	struct fw_event_work *fw_event)
8514 {
8515 	u16 handle, parent_handle;
8516 	u32 state;
8517 	struct _sas_device *sas_device;
8518 	Mpi2ConfigReply_t mpi_reply;
8519 	Mpi2SasDevicePage0_t sas_device_pg0;
8520 	u32 ioc_status;
8521 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8522 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8523 	u64 sas_address;
8524 
8525 	if (ioc->shost_recovery)
8526 		return;
8527 
8528 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8529 		return;
8530 
8531 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8532 	state = le32_to_cpu(event_data->NewValue);
8533 
8534 	if (!ioc->hide_ir_msg)
8535 		dewtprintk(ioc,
8536 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8537 				    __func__, handle,
8538 				    le32_to_cpu(event_data->PreviousValue),
8539 				    state));
8540 
8541 	switch (state) {
8542 	case MPI2_RAID_PD_STATE_ONLINE:
8543 	case MPI2_RAID_PD_STATE_DEGRADED:
8544 	case MPI2_RAID_PD_STATE_REBUILDING:
8545 	case MPI2_RAID_PD_STATE_OPTIMAL:
8546 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8547 
8548 		if (!ioc->is_warpdrive)
8549 			set_bit(handle, ioc->pd_handles);
8550 
8551 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8552 		if (sas_device) {
8553 			sas_device_put(sas_device);
8554 			return;
8555 		}
8556 
8557 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8558 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8559 		    handle))) {
8560 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8561 				__FILE__, __LINE__, __func__);
8562 			return;
8563 		}
8564 
8565 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8566 		    MPI2_IOCSTATUS_MASK;
8567 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8568 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8569 				__FILE__, __LINE__, __func__);
8570 			return;
8571 		}
8572 
8573 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8574 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8575 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8576 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8577 
8578 		_scsih_add_device(ioc, handle, 0, 1);
8579 
8580 		break;
8581 
8582 	case MPI2_RAID_PD_STATE_OFFLINE:
8583 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8584 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8585 	default:
8586 		break;
8587 	}
8588 }
8589 
8590 /**
8591  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8592  * @ioc: per adapter object
8593  * @event_data: event data payload
8594  * Context: user.
8595  */
8596 static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)8597 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8598 	Mpi2EventDataIrOperationStatus_t *event_data)
8599 {
8600 	char *reason_str = NULL;
8601 
8602 	switch (event_data->RAIDOperation) {
8603 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8604 		reason_str = "resync";
8605 		break;
8606 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8607 		reason_str = "online capacity expansion";
8608 		break;
8609 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8610 		reason_str = "consistency check";
8611 		break;
8612 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8613 		reason_str = "background init";
8614 		break;
8615 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8616 		reason_str = "make data consistent";
8617 		break;
8618 	}
8619 
8620 	if (!reason_str)
8621 		return;
8622 
8623 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8624 		 reason_str,
8625 		 le16_to_cpu(event_data->VolDevHandle),
8626 		 event_data->PercentComplete);
8627 }
8628 
8629 /**
8630  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8631  * @ioc: per adapter object
8632  * @fw_event: The fw_event_work object
8633  * Context: user.
8634  */
8635 static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8636 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8637 	struct fw_event_work *fw_event)
8638 {
8639 	Mpi2EventDataIrOperationStatus_t *event_data =
8640 		(Mpi2EventDataIrOperationStatus_t *)
8641 		fw_event->event_data;
8642 	static struct _raid_device *raid_device;
8643 	unsigned long flags;
8644 	u16 handle;
8645 
8646 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8647 	    (!ioc->hide_ir_msg))
8648 		_scsih_sas_ir_operation_status_event_debug(ioc,
8649 		     event_data);
8650 
8651 	/* code added for raid transport support */
8652 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8653 
8654 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8655 		handle = le16_to_cpu(event_data->VolDevHandle);
8656 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8657 		if (raid_device)
8658 			raid_device->percent_complete =
8659 			    event_data->PercentComplete;
8660 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8661 	}
8662 }
8663 
8664 /**
8665  * _scsih_prep_device_scan - initialize parameters prior to device scan
8666  * @ioc: per adapter object
8667  *
8668  * Set the deleted flag prior to device scan.  If the device is found during
8669  * the scan, then we clear the deleted flag.
8670  */
8671 static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)8672 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8673 {
8674 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8675 	struct scsi_device *sdev;
8676 
8677 	shost_for_each_device(sdev, ioc->shost) {
8678 		sas_device_priv_data = sdev->hostdata;
8679 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8680 			sas_device_priv_data->sas_target->deleted = 1;
8681 	}
8682 }
8683 
8684 /**
8685  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8686  * @ioc: per adapter object
8687  * @sas_device_pg0: SAS Device page 0
8688  *
8689  * After host reset, find out whether devices are still responding.
8690  * Used in _scsih_remove_unresponsive_sas_devices.
8691  */
8692 static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)8693 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8694 Mpi2SasDevicePage0_t *sas_device_pg0)
8695 {
8696 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8697 	struct scsi_target *starget;
8698 	struct _sas_device *sas_device = NULL;
8699 	struct _enclosure_node *enclosure_dev = NULL;
8700 	unsigned long flags;
8701 
8702 	if (sas_device_pg0->EnclosureHandle) {
8703 		enclosure_dev =
8704 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8705 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8706 		if (enclosure_dev == NULL)
8707 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8708 				 sas_device_pg0->EnclosureHandle);
8709 	}
8710 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8711 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8712 		if ((sas_device->sas_address == le64_to_cpu(
8713 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8714 		    le16_to_cpu(sas_device_pg0->Slot))) {
8715 			sas_device->responding = 1;
8716 			starget = sas_device->starget;
8717 			if (starget && starget->hostdata) {
8718 				sas_target_priv_data = starget->hostdata;
8719 				sas_target_priv_data->tm_busy = 0;
8720 				sas_target_priv_data->deleted = 0;
8721 			} else
8722 				sas_target_priv_data = NULL;
8723 			if (starget) {
8724 				starget_printk(KERN_INFO, starget,
8725 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8726 				    le16_to_cpu(sas_device_pg0->DevHandle),
8727 				    (unsigned long long)
8728 				    sas_device->sas_address);
8729 
8730 				if (sas_device->enclosure_handle != 0)
8731 					starget_printk(KERN_INFO, starget,
8732 					 "enclosure logical id(0x%016llx),"
8733 					 " slot(%d)\n",
8734 					 (unsigned long long)
8735 					 sas_device->enclosure_logical_id,
8736 					 sas_device->slot);
8737 			}
8738 			if (le16_to_cpu(sas_device_pg0->Flags) &
8739 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8740 				sas_device->enclosure_level =
8741 				   sas_device_pg0->EnclosureLevel;
8742 				memcpy(&sas_device->connector_name[0],
8743 					&sas_device_pg0->ConnectorName[0], 4);
8744 			} else {
8745 				sas_device->enclosure_level = 0;
8746 				sas_device->connector_name[0] = '\0';
8747 			}
8748 
8749 			sas_device->enclosure_handle =
8750 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8751 			sas_device->is_chassis_slot_valid = 0;
8752 			if (enclosure_dev) {
8753 				sas_device->enclosure_logical_id = le64_to_cpu(
8754 					enclosure_dev->pg0.EnclosureLogicalID);
8755 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8756 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8757 					sas_device->is_chassis_slot_valid = 1;
8758 					sas_device->chassis_slot =
8759 						enclosure_dev->pg0.ChassisSlot;
8760 				}
8761 			}
8762 
8763 			if (sas_device->handle == le16_to_cpu(
8764 			    sas_device_pg0->DevHandle))
8765 				goto out;
8766 			pr_info("\thandle changed from(0x%04x)!!!\n",
8767 			    sas_device->handle);
8768 			sas_device->handle = le16_to_cpu(
8769 			    sas_device_pg0->DevHandle);
8770 			if (sas_target_priv_data)
8771 				sas_target_priv_data->handle =
8772 				    le16_to_cpu(sas_device_pg0->DevHandle);
8773 			goto out;
8774 		}
8775 	}
8776  out:
8777 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8778 }
8779 
8780 /**
8781  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8782  *	And create enclosure list by scanning all Enclosure Page(0)s
8783  * @ioc: per adapter object
8784  */
8785 static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)8786 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8787 {
8788 	struct _enclosure_node *enclosure_dev;
8789 	Mpi2ConfigReply_t mpi_reply;
8790 	u16 enclosure_handle;
8791 	int rc;
8792 
8793 	/* Free existing enclosure list */
8794 	mpt3sas_free_enclosure_list(ioc);
8795 
8796 	/* Re constructing enclosure list after reset*/
8797 	enclosure_handle = 0xFFFF;
8798 	do {
8799 		enclosure_dev =
8800 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8801 		if (!enclosure_dev) {
8802 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8803 				__FILE__, __LINE__, __func__);
8804 			return;
8805 		}
8806 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8807 				&enclosure_dev->pg0,
8808 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8809 				enclosure_handle);
8810 
8811 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8812 						MPI2_IOCSTATUS_MASK)) {
8813 			kfree(enclosure_dev);
8814 			return;
8815 		}
8816 		list_add_tail(&enclosure_dev->list,
8817 						&ioc->enclosure_list);
8818 		enclosure_handle =
8819 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8820 	} while (1);
8821 }
8822 
8823 /**
8824  * _scsih_search_responding_sas_devices -
8825  * @ioc: per adapter object
8826  *
8827  * After host reset, find out whether devices are still responding.
8828  * If not remove.
8829  */
8830 static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)8831 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8832 {
8833 	Mpi2SasDevicePage0_t sas_device_pg0;
8834 	Mpi2ConfigReply_t mpi_reply;
8835 	u16 ioc_status;
8836 	u16 handle;
8837 	u32 device_info;
8838 
8839 	ioc_info(ioc, "search for end-devices: start\n");
8840 
8841 	if (list_empty(&ioc->sas_device_list))
8842 		goto out;
8843 
8844 	handle = 0xFFFF;
8845 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8846 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8847 	    handle))) {
8848 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8849 		    MPI2_IOCSTATUS_MASK;
8850 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8851 			break;
8852 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8853 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8854 		if (!(_scsih_is_end_device(device_info)))
8855 			continue;
8856 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8857 	}
8858 
8859  out:
8860 	ioc_info(ioc, "search for end-devices: complete\n");
8861 }
8862 
8863 /**
8864  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8865  * @ioc: per adapter object
8866  * @pcie_device_pg0: PCIe Device page 0
8867  *
8868  * After host reset, find out whether devices are still responding.
8869  * Used in _scsih_remove_unresponding_devices.
8870  */
8871 static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)8872 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8873 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8874 {
8875 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8876 	struct scsi_target *starget;
8877 	struct _pcie_device *pcie_device;
8878 	unsigned long flags;
8879 
8880 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8881 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8882 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8883 		    && (pcie_device->slot == le16_to_cpu(
8884 		    pcie_device_pg0->Slot))) {
8885 			pcie_device->access_status =
8886 					pcie_device_pg0->AccessStatus;
8887 			pcie_device->responding = 1;
8888 			starget = pcie_device->starget;
8889 			if (starget && starget->hostdata) {
8890 				sas_target_priv_data = starget->hostdata;
8891 				sas_target_priv_data->tm_busy = 0;
8892 				sas_target_priv_data->deleted = 0;
8893 			} else
8894 				sas_target_priv_data = NULL;
8895 			if (starget) {
8896 				starget_printk(KERN_INFO, starget,
8897 				    "handle(0x%04x), wwid(0x%016llx) ",
8898 				    pcie_device->handle,
8899 				    (unsigned long long)pcie_device->wwid);
8900 				if (pcie_device->enclosure_handle != 0)
8901 					starget_printk(KERN_INFO, starget,
8902 					    "enclosure logical id(0x%016llx), "
8903 					    "slot(%d)\n",
8904 					    (unsigned long long)
8905 					    pcie_device->enclosure_logical_id,
8906 					    pcie_device->slot);
8907 			}
8908 
8909 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8910 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8911 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8912 				pcie_device->enclosure_level =
8913 				    pcie_device_pg0->EnclosureLevel;
8914 				memcpy(&pcie_device->connector_name[0],
8915 				    &pcie_device_pg0->ConnectorName[0], 4);
8916 			} else {
8917 				pcie_device->enclosure_level = 0;
8918 				pcie_device->connector_name[0] = '\0';
8919 			}
8920 
8921 			if (pcie_device->handle == le16_to_cpu(
8922 			    pcie_device_pg0->DevHandle))
8923 				goto out;
8924 			pr_info("\thandle changed from(0x%04x)!!!\n",
8925 			    pcie_device->handle);
8926 			pcie_device->handle = le16_to_cpu(
8927 			    pcie_device_pg0->DevHandle);
8928 			if (sas_target_priv_data)
8929 				sas_target_priv_data->handle =
8930 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8931 			goto out;
8932 		}
8933 	}
8934 
8935  out:
8936 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8937 }
8938 
8939 /**
8940  * _scsih_search_responding_pcie_devices -
8941  * @ioc: per adapter object
8942  *
8943  * After host reset, find out whether devices are still responding.
8944  * If not remove.
8945  */
8946 static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)8947 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8948 {
8949 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8950 	Mpi2ConfigReply_t mpi_reply;
8951 	u16 ioc_status;
8952 	u16 handle;
8953 	u32 device_info;
8954 
8955 	ioc_info(ioc, "search for end-devices: start\n");
8956 
8957 	if (list_empty(&ioc->pcie_device_list))
8958 		goto out;
8959 
8960 	handle = 0xFFFF;
8961 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8962 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8963 		handle))) {
8964 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8965 		    MPI2_IOCSTATUS_MASK;
8966 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8967 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8968 				 __func__, ioc_status,
8969 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8970 			break;
8971 		}
8972 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8973 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8974 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8975 			continue;
8976 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8977 	}
8978 out:
8979 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8980 }
8981 
8982 /**
8983  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8984  * @ioc: per adapter object
8985  * @wwid: world wide identifier for raid volume
8986  * @handle: device handle
8987  *
8988  * After host reset, find out whether devices are still responding.
8989  * Used in _scsih_remove_unresponsive_raid_devices.
8990  */
8991 static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)8992 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8993 	u16 handle)
8994 {
8995 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8996 	struct scsi_target *starget;
8997 	struct _raid_device *raid_device;
8998 	unsigned long flags;
8999 
9000 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9001 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9002 		if (raid_device->wwid == wwid && raid_device->starget) {
9003 			starget = raid_device->starget;
9004 			if (starget && starget->hostdata) {
9005 				sas_target_priv_data = starget->hostdata;
9006 				sas_target_priv_data->deleted = 0;
9007 			} else
9008 				sas_target_priv_data = NULL;
9009 			raid_device->responding = 1;
9010 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9011 			starget_printk(KERN_INFO, raid_device->starget,
9012 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
9013 			    (unsigned long long)raid_device->wwid);
9014 
9015 			/*
9016 			 * WARPDRIVE: The handles of the PDs might have changed
9017 			 * across the host reset so re-initialize the
9018 			 * required data for Direct IO
9019 			 */
9020 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
9021 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
9022 			if (raid_device->handle == handle) {
9023 				spin_unlock_irqrestore(&ioc->raid_device_lock,
9024 				    flags);
9025 				return;
9026 			}
9027 			pr_info("\thandle changed from(0x%04x)!!!\n",
9028 			    raid_device->handle);
9029 			raid_device->handle = handle;
9030 			if (sas_target_priv_data)
9031 				sas_target_priv_data->handle = handle;
9032 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9033 			return;
9034 		}
9035 	}
9036 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9037 }
9038 
9039 /**
9040  * _scsih_search_responding_raid_devices -
9041  * @ioc: per adapter object
9042  *
9043  * After host reset, find out whether devices are still responding.
9044  * If not remove.
9045  */
9046 static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)9047 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9048 {
9049 	Mpi2RaidVolPage1_t volume_pg1;
9050 	Mpi2RaidVolPage0_t volume_pg0;
9051 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9052 	Mpi2ConfigReply_t mpi_reply;
9053 	u16 ioc_status;
9054 	u16 handle;
9055 	u8 phys_disk_num;
9056 
9057 	if (!ioc->ir_firmware)
9058 		return;
9059 
9060 	ioc_info(ioc, "search for raid volumes: start\n");
9061 
9062 	if (list_empty(&ioc->raid_device_list))
9063 		goto out;
9064 
9065 	handle = 0xFFFF;
9066 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9067 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9068 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9069 		    MPI2_IOCSTATUS_MASK;
9070 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9071 			break;
9072 		handle = le16_to_cpu(volume_pg1.DevHandle);
9073 
9074 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9075 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9076 		     sizeof(Mpi2RaidVolPage0_t)))
9077 			continue;
9078 
9079 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9080 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9081 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9082 			_scsih_mark_responding_raid_device(ioc,
9083 			    le64_to_cpu(volume_pg1.WWID), handle);
9084 	}
9085 
9086 	/* refresh the pd_handles */
9087 	if (!ioc->is_warpdrive) {
9088 		phys_disk_num = 0xFF;
9089 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9090 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9091 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9092 		    phys_disk_num))) {
9093 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9094 			    MPI2_IOCSTATUS_MASK;
9095 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9096 				break;
9097 			phys_disk_num = pd_pg0.PhysDiskNum;
9098 			handle = le16_to_cpu(pd_pg0.DevHandle);
9099 			set_bit(handle, ioc->pd_handles);
9100 		}
9101 	}
9102  out:
9103 	ioc_info(ioc, "search for responding raid volumes: complete\n");
9104 }
9105 
9106 /**
9107  * _scsih_mark_responding_expander - mark a expander as responding
9108  * @ioc: per adapter object
9109  * @expander_pg0:SAS Expander Config Page0
9110  *
9111  * After host reset, find out whether devices are still responding.
9112  * Used in _scsih_remove_unresponsive_expanders.
9113  */
9114 static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)9115 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
9116 	Mpi2ExpanderPage0_t *expander_pg0)
9117 {
9118 	struct _sas_node *sas_expander = NULL;
9119 	unsigned long flags;
9120 	int i;
9121 	struct _enclosure_node *enclosure_dev = NULL;
9122 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
9123 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
9124 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
9125 
9126 	if (enclosure_handle)
9127 		enclosure_dev =
9128 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9129 							enclosure_handle);
9130 
9131 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9132 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
9133 		if (sas_expander->sas_address != sas_address)
9134 			continue;
9135 		sas_expander->responding = 1;
9136 
9137 		if (enclosure_dev) {
9138 			sas_expander->enclosure_logical_id =
9139 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
9140 			sas_expander->enclosure_handle =
9141 			    le16_to_cpu(expander_pg0->EnclosureHandle);
9142 		}
9143 
9144 		if (sas_expander->handle == handle)
9145 			goto out;
9146 		pr_info("\texpander(0x%016llx): handle changed" \
9147 		    " from(0x%04x) to (0x%04x)!!!\n",
9148 		    (unsigned long long)sas_expander->sas_address,
9149 		    sas_expander->handle, handle);
9150 		sas_expander->handle = handle;
9151 		for (i = 0 ; i < sas_expander->num_phys ; i++)
9152 			sas_expander->phy[i].handle = handle;
9153 		goto out;
9154 	}
9155  out:
9156 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9157 }
9158 
9159 /**
9160  * _scsih_search_responding_expanders -
9161  * @ioc: per adapter object
9162  *
9163  * After host reset, find out whether devices are still responding.
9164  * If not remove.
9165  */
9166 static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)9167 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
9168 {
9169 	Mpi2ExpanderPage0_t expander_pg0;
9170 	Mpi2ConfigReply_t mpi_reply;
9171 	u16 ioc_status;
9172 	u64 sas_address;
9173 	u16 handle;
9174 
9175 	ioc_info(ioc, "search for expanders: start\n");
9176 
9177 	if (list_empty(&ioc->sas_expander_list))
9178 		goto out;
9179 
9180 	handle = 0xFFFF;
9181 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9182 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9183 
9184 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9185 		    MPI2_IOCSTATUS_MASK;
9186 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9187 			break;
9188 
9189 		handle = le16_to_cpu(expander_pg0.DevHandle);
9190 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
9191 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
9192 			handle,
9193 		    (unsigned long long)sas_address);
9194 		_scsih_mark_responding_expander(ioc, &expander_pg0);
9195 	}
9196 
9197  out:
9198 	ioc_info(ioc, "search for expanders: complete\n");
9199 }
9200 
9201 /**
9202  * _scsih_remove_unresponding_devices - removing unresponding devices
9203  * @ioc: per adapter object
9204  */
9205 static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)9206 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
9207 {
9208 	struct _sas_device *sas_device, *sas_device_next;
9209 	struct _sas_node *sas_expander, *sas_expander_next;
9210 	struct _raid_device *raid_device, *raid_device_next;
9211 	struct _pcie_device *pcie_device, *pcie_device_next;
9212 	struct list_head tmp_list;
9213 	unsigned long flags;
9214 	LIST_HEAD(head);
9215 
9216 	ioc_info(ioc, "removing unresponding devices: start\n");
9217 
9218 	/* removing unresponding end devices */
9219 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
9220 	/*
9221 	 * Iterate, pulling off devices marked as non-responding. We become the
9222 	 * owner for the reference the list had on any object we prune.
9223 	 */
9224 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9225 	list_for_each_entry_safe(sas_device, sas_device_next,
9226 	    &ioc->sas_device_list, list) {
9227 		if (!sas_device->responding)
9228 			list_move_tail(&sas_device->list, &head);
9229 		else
9230 			sas_device->responding = 0;
9231 	}
9232 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9233 
9234 	/*
9235 	 * Now, uninitialize and remove the unresponding devices we pruned.
9236 	 */
9237 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
9238 		_scsih_remove_device(ioc, sas_device);
9239 		list_del_init(&sas_device->list);
9240 		sas_device_put(sas_device);
9241 	}
9242 
9243 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
9244 	INIT_LIST_HEAD(&head);
9245 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9246 	list_for_each_entry_safe(pcie_device, pcie_device_next,
9247 	    &ioc->pcie_device_list, list) {
9248 		if (!pcie_device->responding)
9249 			list_move_tail(&pcie_device->list, &head);
9250 		else
9251 			pcie_device->responding = 0;
9252 	}
9253 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9254 
9255 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
9256 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9257 		list_del_init(&pcie_device->list);
9258 		pcie_device_put(pcie_device);
9259 	}
9260 
9261 	/* removing unresponding volumes */
9262 	if (ioc->ir_firmware) {
9263 		ioc_info(ioc, "removing unresponding devices: volumes\n");
9264 		list_for_each_entry_safe(raid_device, raid_device_next,
9265 		    &ioc->raid_device_list, list) {
9266 			if (!raid_device->responding)
9267 				_scsih_sas_volume_delete(ioc,
9268 				    raid_device->handle);
9269 			else
9270 				raid_device->responding = 0;
9271 		}
9272 	}
9273 
9274 	/* removing unresponding expanders */
9275 	ioc_info(ioc, "removing unresponding devices: expanders\n");
9276 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9277 	INIT_LIST_HEAD(&tmp_list);
9278 	list_for_each_entry_safe(sas_expander, sas_expander_next,
9279 	    &ioc->sas_expander_list, list) {
9280 		if (!sas_expander->responding)
9281 			list_move_tail(&sas_expander->list, &tmp_list);
9282 		else
9283 			sas_expander->responding = 0;
9284 	}
9285 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9286 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
9287 	    list) {
9288 		_scsih_expander_node_remove(ioc, sas_expander);
9289 	}
9290 
9291 	ioc_info(ioc, "removing unresponding devices: complete\n");
9292 
9293 	/* unblock devices */
9294 	_scsih_ublock_io_all_device(ioc);
9295 }
9296 
9297 static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)9298 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
9299 	struct _sas_node *sas_expander, u16 handle)
9300 {
9301 	Mpi2ExpanderPage1_t expander_pg1;
9302 	Mpi2ConfigReply_t mpi_reply;
9303 	int i;
9304 
9305 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
9306 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
9307 		    &expander_pg1, i, handle))) {
9308 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9309 				__FILE__, __LINE__, __func__);
9310 			return;
9311 		}
9312 
9313 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9314 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9315 		    expander_pg1.NegotiatedLinkRate >> 4);
9316 	}
9317 }
9318 
9319 /**
9320  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9321  * @ioc: per adapter object
9322  */
9323 static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)9324 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9325 {
9326 	Mpi2ExpanderPage0_t expander_pg0;
9327 	Mpi2SasDevicePage0_t sas_device_pg0;
9328 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9329 	Mpi2RaidVolPage1_t volume_pg1;
9330 	Mpi2RaidVolPage0_t volume_pg0;
9331 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9332 	Mpi2EventIrConfigElement_t element;
9333 	Mpi2ConfigReply_t mpi_reply;
9334 	u8 phys_disk_num;
9335 	u16 ioc_status;
9336 	u16 handle, parent_handle;
9337 	u64 sas_address;
9338 	struct _sas_device *sas_device;
9339 	struct _pcie_device *pcie_device;
9340 	struct _sas_node *expander_device;
9341 	static struct _raid_device *raid_device;
9342 	u8 retry_count;
9343 	unsigned long flags;
9344 
9345 	ioc_info(ioc, "scan devices: start\n");
9346 
9347 	_scsih_sas_host_refresh(ioc);
9348 
9349 	ioc_info(ioc, "\tscan devices: expanders start\n");
9350 
9351 	/* expanders */
9352 	handle = 0xFFFF;
9353 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9354 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9355 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9356 		    MPI2_IOCSTATUS_MASK;
9357 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9358 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9359 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9360 			break;
9361 		}
9362 		handle = le16_to_cpu(expander_pg0.DevHandle);
9363 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
9364 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9365 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
9366 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9367 		if (expander_device)
9368 			_scsih_refresh_expander_links(ioc, expander_device,
9369 			    handle);
9370 		else {
9371 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9372 				 handle,
9373 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9374 			_scsih_expander_add(ioc, handle);
9375 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9376 				 handle,
9377 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9378 		}
9379 	}
9380 
9381 	ioc_info(ioc, "\tscan devices: expanders complete\n");
9382 
9383 	if (!ioc->ir_firmware)
9384 		goto skip_to_sas;
9385 
9386 	ioc_info(ioc, "\tscan devices: phys disk start\n");
9387 
9388 	/* phys disk */
9389 	phys_disk_num = 0xFF;
9390 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9391 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9392 	    phys_disk_num))) {
9393 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9394 		    MPI2_IOCSTATUS_MASK;
9395 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9396 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9397 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9398 			break;
9399 		}
9400 		phys_disk_num = pd_pg0.PhysDiskNum;
9401 		handle = le16_to_cpu(pd_pg0.DevHandle);
9402 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9403 		if (sas_device) {
9404 			sas_device_put(sas_device);
9405 			continue;
9406 		}
9407 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9408 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9409 		    handle) != 0)
9410 			continue;
9411 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9412 		    MPI2_IOCSTATUS_MASK;
9413 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9414 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9415 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9416 			break;
9417 		}
9418 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9419 		if (!_scsih_get_sas_address(ioc, parent_handle,
9420 		    &sas_address)) {
9421 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9422 				 handle,
9423 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9424 			mpt3sas_transport_update_links(ioc, sas_address,
9425 			    handle, sas_device_pg0.PhyNum,
9426 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9427 			set_bit(handle, ioc->pd_handles);
9428 			retry_count = 0;
9429 			/* This will retry adding the end device.
9430 			 * _scsih_add_device() will decide on retries and
9431 			 * return "1" when it should be retried
9432 			 */
9433 			while (_scsih_add_device(ioc, handle, retry_count++,
9434 			    1)) {
9435 				ssleep(1);
9436 			}
9437 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9438 				 handle,
9439 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9440 		}
9441 	}
9442 
9443 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9444 
9445 	ioc_info(ioc, "\tscan devices: volumes start\n");
9446 
9447 	/* volumes */
9448 	handle = 0xFFFF;
9449 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9450 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9451 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9452 		    MPI2_IOCSTATUS_MASK;
9453 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9454 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9455 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9456 			break;
9457 		}
9458 		handle = le16_to_cpu(volume_pg1.DevHandle);
9459 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9460 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9461 		    le64_to_cpu(volume_pg1.WWID));
9462 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9463 		if (raid_device)
9464 			continue;
9465 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9466 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9467 		     sizeof(Mpi2RaidVolPage0_t)))
9468 			continue;
9469 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9470 		    MPI2_IOCSTATUS_MASK;
9471 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9472 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9473 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9474 			break;
9475 		}
9476 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9477 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9478 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9479 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9480 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9481 			element.VolDevHandle = volume_pg1.DevHandle;
9482 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9483 				 volume_pg1.DevHandle);
9484 			_scsih_sas_volume_add(ioc, &element);
9485 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9486 				 volume_pg1.DevHandle);
9487 		}
9488 	}
9489 
9490 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9491 
9492  skip_to_sas:
9493 
9494 	ioc_info(ioc, "\tscan devices: end devices start\n");
9495 
9496 	/* sas devices */
9497 	handle = 0xFFFF;
9498 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9499 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9500 	    handle))) {
9501 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9502 		    MPI2_IOCSTATUS_MASK;
9503 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9504 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9505 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9506 			break;
9507 		}
9508 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9509 		if (!(_scsih_is_end_device(
9510 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9511 			continue;
9512 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9513 		    le64_to_cpu(sas_device_pg0.SASAddress));
9514 		if (sas_device) {
9515 			sas_device_put(sas_device);
9516 			continue;
9517 		}
9518 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9519 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9520 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9521 				 handle,
9522 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9523 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9524 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9525 			retry_count = 0;
9526 			/* This will retry adding the end device.
9527 			 * _scsih_add_device() will decide on retries and
9528 			 * return "1" when it should be retried
9529 			 */
9530 			while (_scsih_add_device(ioc, handle, retry_count++,
9531 			    0)) {
9532 				ssleep(1);
9533 			}
9534 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9535 				 handle,
9536 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9537 		}
9538 	}
9539 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9540 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9541 
9542 	/* pcie devices */
9543 	handle = 0xFFFF;
9544 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9545 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9546 		handle))) {
9547 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9548 				& MPI2_IOCSTATUS_MASK;
9549 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9550 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9551 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9552 			break;
9553 		}
9554 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9555 		if (!(_scsih_is_nvme_pciescsi_device(
9556 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9557 			continue;
9558 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9559 				le64_to_cpu(pcie_device_pg0.WWID));
9560 		if (pcie_device) {
9561 			pcie_device_put(pcie_device);
9562 			continue;
9563 		}
9564 		retry_count = 0;
9565 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9566 		_scsih_pcie_add_device(ioc, handle);
9567 
9568 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9569 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9570 	}
9571 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9572 	ioc_info(ioc, "scan devices: complete\n");
9573 }
9574 
9575 /**
9576  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9577  * @ioc: per adapter object
9578  *
9579  * The handler for doing any required cleanup or initialization.
9580  */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)9581 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9582 {
9583 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9584 }
9585 
9586 /**
9587  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
9588  *							scsi & tm cmds.
9589  * @ioc: per adapter object
9590  *
9591  * The handler for doing any required cleanup or initialization.
9592  */
9593 void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)9594 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
9595 {
9596 	dtmprintk(ioc,
9597 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
9598 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9599 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9600 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9601 		complete(&ioc->scsih_cmds.done);
9602 	}
9603 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9604 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9605 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9606 		complete(&ioc->tm_cmds.done);
9607 	}
9608 
9609 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9610 	memset(ioc->device_remove_in_progress, 0,
9611 	       ioc->device_remove_in_progress_sz);
9612 	_scsih_fw_event_cleanup_queue(ioc);
9613 	_scsih_flush_running_cmds(ioc);
9614 }
9615 
9616 /**
9617  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9618  * @ioc: per adapter object
9619  *
9620  * The handler for doing any required cleanup or initialization.
9621  */
9622 void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)9623 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9624 {
9625 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9626 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9627 					   !ioc->sas_hba.num_phys)) {
9628 		_scsih_prep_device_scan(ioc);
9629 		_scsih_create_enclosure_list_after_reset(ioc);
9630 		_scsih_search_responding_sas_devices(ioc);
9631 		_scsih_search_responding_pcie_devices(ioc);
9632 		_scsih_search_responding_raid_devices(ioc);
9633 		_scsih_search_responding_expanders(ioc);
9634 		_scsih_error_recovery_delete_devices(ioc);
9635 	}
9636 }
9637 
9638 /**
9639  * _mpt3sas_fw_work - delayed task for processing firmware events
9640  * @ioc: per adapter object
9641  * @fw_event: The fw_event_work object
9642  * Context: user.
9643  */
9644 static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9645 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9646 {
9647 	ioc->current_event = fw_event;
9648 	_scsih_fw_event_del_from_list(ioc, fw_event);
9649 
9650 	/* the queue is being flushed so ignore this event */
9651 	if (ioc->remove_host || ioc->pci_error_recovery) {
9652 		fw_event_work_put(fw_event);
9653 		ioc->current_event = NULL;
9654 		return;
9655 	}
9656 
9657 	switch (fw_event->event) {
9658 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9659 		mpt3sas_process_trigger_data(ioc,
9660 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9661 			fw_event->event_data);
9662 		break;
9663 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9664 		while (scsi_host_in_recovery(ioc->shost) ||
9665 					 ioc->shost_recovery) {
9666 			/*
9667 			 * If we're unloading or cancelling the work, bail.
9668 			 * Otherwise, this can become an infinite loop.
9669 			 */
9670 			if (ioc->remove_host || ioc->fw_events_cleanup)
9671 				goto out;
9672 			ssleep(1);
9673 		}
9674 		_scsih_remove_unresponding_devices(ioc);
9675 		_scsih_scan_for_devices_after_reset(ioc);
9676 		_scsih_set_nvme_max_shutdown_latency(ioc);
9677 		break;
9678 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9679 		ioc->start_scan = 0;
9680 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9681 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9682 			    missing_delay[1]);
9683 		dewtprintk(ioc,
9684 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9685 		break;
9686 	case MPT3SAS_TURN_ON_PFA_LED:
9687 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9688 		break;
9689 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9690 		_scsih_sas_topology_change_event(ioc, fw_event);
9691 		break;
9692 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9693 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9694 			_scsih_sas_device_status_change_event_debug(ioc,
9695 			    (Mpi2EventDataSasDeviceStatusChange_t *)
9696 			    fw_event->event_data);
9697 		break;
9698 	case MPI2_EVENT_SAS_DISCOVERY:
9699 		_scsih_sas_discovery_event(ioc, fw_event);
9700 		break;
9701 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9702 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9703 		break;
9704 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9705 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9706 		break;
9707 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9708 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9709 		    fw_event);
9710 		break;
9711 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9712 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9713 		break;
9714 	case MPI2_EVENT_IR_VOLUME:
9715 		_scsih_sas_ir_volume_event(ioc, fw_event);
9716 		break;
9717 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9718 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9719 		break;
9720 	case MPI2_EVENT_IR_OPERATION_STATUS:
9721 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9722 		break;
9723 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9724 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9725 		break;
9726 	case MPI2_EVENT_PCIE_ENUMERATION:
9727 		_scsih_pcie_enumeration_event(ioc, fw_event);
9728 		break;
9729 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9730 		_scsih_pcie_topology_change_event(ioc, fw_event);
9731 		ioc->current_event = NULL;
9732 			return;
9733 	break;
9734 	}
9735 out:
9736 	fw_event_work_put(fw_event);
9737 	ioc->current_event = NULL;
9738 }
9739 
9740 /**
9741  * _firmware_event_work
9742  * @work: The fw_event_work object
9743  * Context: user.
9744  *
9745  * wrappers for the work thread handling firmware events
9746  */
9747 
9748 static void
_firmware_event_work(struct work_struct * work)9749 _firmware_event_work(struct work_struct *work)
9750 {
9751 	struct fw_event_work *fw_event = container_of(work,
9752 	    struct fw_event_work, work);
9753 
9754 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9755 }
9756 
9757 /**
9758  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9759  * @ioc: per adapter object
9760  * @msix_index: MSIX table index supplied by the OS
9761  * @reply: reply message frame(lower 32bit addr)
9762  * Context: interrupt.
9763  *
9764  * This function merely adds a new work task into ioc->firmware_event_thread.
9765  * The tasks are worked from _firmware_event_work in user context.
9766  *
9767  * Return: 1 meaning mf should be freed from _base_interrupt
9768  *         0 means the mf is freed from this function.
9769  */
9770 u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)9771 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9772 	u32 reply)
9773 {
9774 	struct fw_event_work *fw_event;
9775 	Mpi2EventNotificationReply_t *mpi_reply;
9776 	u16 event;
9777 	u16 sz;
9778 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9779 
9780 	/* events turned off due to host reset */
9781 	if (ioc->pci_error_recovery)
9782 		return 1;
9783 
9784 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9785 
9786 	if (unlikely(!mpi_reply)) {
9787 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9788 			__FILE__, __LINE__, __func__);
9789 		return 1;
9790 	}
9791 
9792 	event = le16_to_cpu(mpi_reply->Event);
9793 
9794 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9795 		mpt3sas_trigger_event(ioc, event, 0);
9796 
9797 	switch (event) {
9798 	/* handle these */
9799 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9800 	{
9801 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9802 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9803 		    mpi_reply->EventData;
9804 
9805 		if (baen_data->Primitive !=
9806 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9807 			return 1;
9808 
9809 		if (ioc->broadcast_aen_busy) {
9810 			ioc->broadcast_aen_pending++;
9811 			return 1;
9812 		} else
9813 			ioc->broadcast_aen_busy = 1;
9814 		break;
9815 	}
9816 
9817 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9818 		_scsih_check_topo_delete_events(ioc,
9819 		    (Mpi2EventDataSasTopologyChangeList_t *)
9820 		    mpi_reply->EventData);
9821 		break;
9822 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9823 	_scsih_check_pcie_topo_remove_events(ioc,
9824 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9825 		    mpi_reply->EventData);
9826 		break;
9827 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9828 		_scsih_check_ir_config_unhide_events(ioc,
9829 		    (Mpi2EventDataIrConfigChangeList_t *)
9830 		    mpi_reply->EventData);
9831 		break;
9832 	case MPI2_EVENT_IR_VOLUME:
9833 		_scsih_check_volume_delete_events(ioc,
9834 		    (Mpi2EventDataIrVolume_t *)
9835 		    mpi_reply->EventData);
9836 		break;
9837 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9838 	{
9839 		Mpi2EventDataLogEntryAdded_t *log_entry;
9840 		u32 *log_code;
9841 
9842 		if (!ioc->is_warpdrive)
9843 			break;
9844 
9845 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9846 		    mpi_reply->EventData;
9847 		log_code = (u32 *)log_entry->LogData;
9848 
9849 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9850 		    != MPT2_WARPDRIVE_LOGENTRY)
9851 			break;
9852 
9853 		switch (le32_to_cpu(*log_code)) {
9854 		case MPT2_WARPDRIVE_LC_SSDT:
9855 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9856 			break;
9857 		case MPT2_WARPDRIVE_LC_SSDLW:
9858 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9859 			break;
9860 		case MPT2_WARPDRIVE_LC_SSDLF:
9861 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9862 			break;
9863 		case MPT2_WARPDRIVE_LC_BRMF:
9864 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9865 			break;
9866 		}
9867 
9868 		break;
9869 	}
9870 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9871 		_scsih_sas_device_status_change_event(ioc,
9872 		    (Mpi2EventDataSasDeviceStatusChange_t *)
9873 		    mpi_reply->EventData);
9874 		break;
9875 	case MPI2_EVENT_IR_OPERATION_STATUS:
9876 	case MPI2_EVENT_SAS_DISCOVERY:
9877 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9878 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9879 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9880 	case MPI2_EVENT_PCIE_ENUMERATION:
9881 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9882 		break;
9883 
9884 	case MPI2_EVENT_TEMP_THRESHOLD:
9885 		_scsih_temp_threshold_events(ioc,
9886 			(Mpi2EventDataTemperature_t *)
9887 			mpi_reply->EventData);
9888 		break;
9889 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9890 		ActiveCableEventData =
9891 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9892 		switch (ActiveCableEventData->ReasonCode) {
9893 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9894 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9895 				   ActiveCableEventData->ReceptacleID);
9896 			pr_notice("cannot be powered and devices connected\n");
9897 			pr_notice("to this active cable will not be seen\n");
9898 			pr_notice("This active cable requires %d mW of power\n",
9899 			     ActiveCableEventData->ActiveCablePowerRequirement);
9900 			break;
9901 
9902 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9903 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9904 				   ActiveCableEventData->ReceptacleID);
9905 			pr_notice(
9906 			    "is not running at optimal speed(12 Gb/s rate)\n");
9907 			break;
9908 		}
9909 
9910 		break;
9911 
9912 	default: /* ignore the rest */
9913 		return 1;
9914 	}
9915 
9916 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9917 	fw_event = alloc_fw_event_work(sz);
9918 	if (!fw_event) {
9919 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9920 			__FILE__, __LINE__, __func__);
9921 		return 1;
9922 	}
9923 
9924 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9925 	fw_event->ioc = ioc;
9926 	fw_event->VF_ID = mpi_reply->VF_ID;
9927 	fw_event->VP_ID = mpi_reply->VP_ID;
9928 	fw_event->event = event;
9929 	_scsih_fw_event_add(ioc, fw_event);
9930 	fw_event_work_put(fw_event);
9931 	return 1;
9932 }
9933 
9934 /**
9935  * _scsih_expander_node_remove - removing expander device from list.
9936  * @ioc: per adapter object
9937  * @sas_expander: the sas_device object
9938  *
9939  * Removing object and freeing associated memory from the
9940  * ioc->sas_expander_list.
9941  */
9942 static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)9943 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9944 	struct _sas_node *sas_expander)
9945 {
9946 	struct _sas_port *mpt3sas_port, *next;
9947 	unsigned long flags;
9948 
9949 	/* remove sibling ports attached to this expander */
9950 	list_for_each_entry_safe(mpt3sas_port, next,
9951 	   &sas_expander->sas_port_list, port_list) {
9952 		if (ioc->shost_recovery)
9953 			return;
9954 		if (mpt3sas_port->remote_identify.device_type ==
9955 		    SAS_END_DEVICE)
9956 			mpt3sas_device_remove_by_sas_address(ioc,
9957 			    mpt3sas_port->remote_identify.sas_address);
9958 		else if (mpt3sas_port->remote_identify.device_type ==
9959 		    SAS_EDGE_EXPANDER_DEVICE ||
9960 		    mpt3sas_port->remote_identify.device_type ==
9961 		    SAS_FANOUT_EXPANDER_DEVICE)
9962 			mpt3sas_expander_remove(ioc,
9963 			    mpt3sas_port->remote_identify.sas_address);
9964 	}
9965 
9966 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9967 	    sas_expander->sas_address_parent);
9968 
9969 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9970 		 sas_expander->handle, (unsigned long long)
9971 		 sas_expander->sas_address);
9972 
9973 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9974 	list_del(&sas_expander->list);
9975 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9976 
9977 	kfree(sas_expander->phy);
9978 	kfree(sas_expander);
9979 }
9980 
9981 /**
9982  * _scsih_nvme_shutdown - NVMe shutdown notification
9983  * @ioc: per adapter object
9984  *
9985  * Sending IoUnitControl request with shutdown operation code to alert IOC that
9986  * the host system is shutting down so that IOC can issue NVMe shutdown to
9987  * NVMe drives attached to it.
9988  */
9989 static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)9990 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
9991 {
9992 	Mpi26IoUnitControlRequest_t *mpi_request;
9993 	Mpi26IoUnitControlReply_t *mpi_reply;
9994 	u16 smid;
9995 
9996 	/* are there any NVMe devices ? */
9997 	if (list_empty(&ioc->pcie_device_list))
9998 		return;
9999 
10000 	mutex_lock(&ioc->scsih_cmds.mutex);
10001 
10002 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10003 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10004 		goto out;
10005 	}
10006 
10007 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10008 
10009 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10010 	if (!smid) {
10011 		ioc_err(ioc,
10012 		    "%s: failed obtaining a smid\n", __func__);
10013 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10014 		goto out;
10015 	}
10016 
10017 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10018 	ioc->scsih_cmds.smid = smid;
10019 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10020 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10021 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10022 
10023 	init_completion(&ioc->scsih_cmds.done);
10024 	ioc->put_smid_default(ioc, smid);
10025 	/* Wait for max_shutdown_latency seconds */
10026 	ioc_info(ioc,
10027 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10028 		ioc->max_shutdown_latency);
10029 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
10030 			ioc->max_shutdown_latency*HZ);
10031 
10032 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10033 		ioc_err(ioc, "%s: timeout\n", __func__);
10034 		goto out;
10035 	}
10036 
10037 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10038 		mpi_reply = ioc->scsih_cmds.reply;
10039 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
10040 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
10041 			le16_to_cpu(mpi_reply->IOCStatus),
10042 			le32_to_cpu(mpi_reply->IOCLogInfo));
10043 	}
10044  out:
10045 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10046 	mutex_unlock(&ioc->scsih_cmds.mutex);
10047 }
10048 
10049 
10050 /**
10051  * _scsih_ir_shutdown - IR shutdown notification
10052  * @ioc: per adapter object
10053  *
10054  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10055  * the host system is shutting down.
10056  */
10057 static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)10058 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10059 {
10060 	Mpi2RaidActionRequest_t *mpi_request;
10061 	Mpi2RaidActionReply_t *mpi_reply;
10062 	u16 smid;
10063 
10064 	/* is IR firmware build loaded ? */
10065 	if (!ioc->ir_firmware)
10066 		return;
10067 
10068 	/* are there any volumes ? */
10069 	if (list_empty(&ioc->raid_device_list))
10070 		return;
10071 
10072 	mutex_lock(&ioc->scsih_cmds.mutex);
10073 
10074 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10075 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10076 		goto out;
10077 	}
10078 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10079 
10080 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10081 	if (!smid) {
10082 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10083 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10084 		goto out;
10085 	}
10086 
10087 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10088 	ioc->scsih_cmds.smid = smid;
10089 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
10090 
10091 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
10092 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
10093 
10094 	if (!ioc->hide_ir_msg)
10095 		ioc_info(ioc, "IR shutdown (sending)\n");
10096 	init_completion(&ioc->scsih_cmds.done);
10097 	ioc->put_smid_default(ioc, smid);
10098 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
10099 
10100 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10101 		ioc_err(ioc, "%s: timeout\n", __func__);
10102 		goto out;
10103 	}
10104 
10105 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10106 		mpi_reply = ioc->scsih_cmds.reply;
10107 		if (!ioc->hide_ir_msg)
10108 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
10109 				 le16_to_cpu(mpi_reply->IOCStatus),
10110 				 le32_to_cpu(mpi_reply->IOCLogInfo));
10111 	}
10112 
10113  out:
10114 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10115 	mutex_unlock(&ioc->scsih_cmds.mutex);
10116 }
10117 
10118 /**
10119  * _scsih_get_shost_and_ioc - get shost and ioc
10120  *			and verify whether they are NULL or not
10121  * @pdev: PCI device struct
10122  * @shost: address of scsi host pointer
10123  * @ioc: address of HBA adapter pointer
10124  *
10125  * Return zero if *shost and *ioc are not NULL otherwise return error number.
10126  */
10127 static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)10128 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
10129 	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
10130 {
10131 	*shost = pci_get_drvdata(pdev);
10132 	if (*shost == NULL) {
10133 		dev_err(&pdev->dev, "pdev's driver data is null\n");
10134 		return -ENXIO;
10135 	}
10136 
10137 	*ioc = shost_priv(*shost);
10138 	if (*ioc == NULL) {
10139 		dev_err(&pdev->dev, "shost's private data is null\n");
10140 		return -ENXIO;
10141 	}
10142 
10143 	return 0;
10144 }
10145 
10146 /**
10147  * scsih_remove - detach and remove add host
10148  * @pdev: PCI device struct
10149  *
10150  * Routine called when unloading the driver.
10151  */
scsih_remove(struct pci_dev * pdev)10152 static void scsih_remove(struct pci_dev *pdev)
10153 {
10154 	struct Scsi_Host *shost;
10155 	struct MPT3SAS_ADAPTER *ioc;
10156 	struct _sas_port *mpt3sas_port, *next_port;
10157 	struct _raid_device *raid_device, *next;
10158 	struct MPT3SAS_TARGET *sas_target_priv_data;
10159 	struct _pcie_device *pcie_device, *pcienext;
10160 	struct workqueue_struct	*wq;
10161 	unsigned long flags;
10162 	Mpi2ConfigReply_t mpi_reply;
10163 
10164 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
10165 		return;
10166 
10167 	ioc->remove_host = 1;
10168 
10169 	if (!pci_device_is_present(pdev))
10170 		_scsih_flush_running_cmds(ioc);
10171 
10172 	_scsih_fw_event_cleanup_queue(ioc);
10173 
10174 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
10175 	wq = ioc->firmware_event_thread;
10176 	ioc->firmware_event_thread = NULL;
10177 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10178 	if (wq)
10179 		destroy_workqueue(wq);
10180 	/*
10181 	 * Copy back the unmodified ioc page1. so that on next driver load,
10182 	 * current modified changes on ioc page1 won't take effect.
10183 	 */
10184 	if (ioc->is_aero_ioc)
10185 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10186 				&ioc->ioc_pg1_copy);
10187 	/* release all the volumes */
10188 	_scsih_ir_shutdown(ioc);
10189 	mpt3sas_destroy_debugfs(ioc);
10190 	sas_remove_host(shost);
10191 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
10192 	    list) {
10193 		if (raid_device->starget) {
10194 			sas_target_priv_data =
10195 			    raid_device->starget->hostdata;
10196 			sas_target_priv_data->deleted = 1;
10197 			scsi_remove_target(&raid_device->starget->dev);
10198 		}
10199 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
10200 			 raid_device->handle, (u64)raid_device->wwid);
10201 		_scsih_raid_device_remove(ioc, raid_device);
10202 	}
10203 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
10204 		list) {
10205 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10206 		list_del_init(&pcie_device->list);
10207 		pcie_device_put(pcie_device);
10208 	}
10209 
10210 	/* free ports attached to the sas_host */
10211 	list_for_each_entry_safe(mpt3sas_port, next_port,
10212 	   &ioc->sas_hba.sas_port_list, port_list) {
10213 		if (mpt3sas_port->remote_identify.device_type ==
10214 		    SAS_END_DEVICE)
10215 			mpt3sas_device_remove_by_sas_address(ioc,
10216 			    mpt3sas_port->remote_identify.sas_address);
10217 		else if (mpt3sas_port->remote_identify.device_type ==
10218 		    SAS_EDGE_EXPANDER_DEVICE ||
10219 		    mpt3sas_port->remote_identify.device_type ==
10220 		    SAS_FANOUT_EXPANDER_DEVICE)
10221 			mpt3sas_expander_remove(ioc,
10222 			    mpt3sas_port->remote_identify.sas_address);
10223 	}
10224 
10225 	/* free phys attached to the sas_host */
10226 	if (ioc->sas_hba.num_phys) {
10227 		kfree(ioc->sas_hba.phy);
10228 		ioc->sas_hba.phy = NULL;
10229 		ioc->sas_hba.num_phys = 0;
10230 	}
10231 
10232 	mpt3sas_base_detach(ioc);
10233 	spin_lock(&gioc_lock);
10234 	list_del(&ioc->list);
10235 	spin_unlock(&gioc_lock);
10236 	scsi_host_put(shost);
10237 }
10238 
10239 /**
10240  * scsih_shutdown - routine call during system shutdown
10241  * @pdev: PCI device struct
10242  */
10243 static void
scsih_shutdown(struct pci_dev * pdev)10244 scsih_shutdown(struct pci_dev *pdev)
10245 {
10246 	struct Scsi_Host *shost;
10247 	struct MPT3SAS_ADAPTER *ioc;
10248 	struct workqueue_struct	*wq;
10249 	unsigned long flags;
10250 	Mpi2ConfigReply_t mpi_reply;
10251 
10252 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
10253 		return;
10254 
10255 	ioc->remove_host = 1;
10256 
10257 	if (!pci_device_is_present(pdev))
10258 		_scsih_flush_running_cmds(ioc);
10259 
10260 	_scsih_fw_event_cleanup_queue(ioc);
10261 
10262 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
10263 	wq = ioc->firmware_event_thread;
10264 	ioc->firmware_event_thread = NULL;
10265 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10266 	if (wq)
10267 		destroy_workqueue(wq);
10268 	/*
10269 	 * Copy back the unmodified ioc page1 so that on next driver load,
10270 	 * current modified changes on ioc page1 won't take effect.
10271 	 */
10272 	if (ioc->is_aero_ioc)
10273 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10274 				&ioc->ioc_pg1_copy);
10275 
10276 	_scsih_ir_shutdown(ioc);
10277 	_scsih_nvme_shutdown(ioc);
10278 	mpt3sas_base_detach(ioc);
10279 }
10280 
10281 
10282 /**
10283  * _scsih_probe_boot_devices - reports 1st device
10284  * @ioc: per adapter object
10285  *
10286  * If specified in bios page 2, this routine reports the 1st
10287  * device scsi-ml or sas transport for persistent boot device
10288  * purposes.  Please refer to function _scsih_determine_boot_device()
10289  */
10290 static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)10291 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
10292 {
10293 	u32 channel;
10294 	void *device;
10295 	struct _sas_device *sas_device;
10296 	struct _raid_device *raid_device;
10297 	struct _pcie_device *pcie_device;
10298 	u16 handle;
10299 	u64 sas_address_parent;
10300 	u64 sas_address;
10301 	unsigned long flags;
10302 	int rc;
10303 	int tid;
10304 
10305 	 /* no Bios, return immediately */
10306 	if (!ioc->bios_pg3.BiosVersion)
10307 		return;
10308 
10309 	device = NULL;
10310 	if (ioc->req_boot_device.device) {
10311 		device =  ioc->req_boot_device.device;
10312 		channel = ioc->req_boot_device.channel;
10313 	} else if (ioc->req_alt_boot_device.device) {
10314 		device =  ioc->req_alt_boot_device.device;
10315 		channel = ioc->req_alt_boot_device.channel;
10316 	} else if (ioc->current_boot_device.device) {
10317 		device =  ioc->current_boot_device.device;
10318 		channel = ioc->current_boot_device.channel;
10319 	}
10320 
10321 	if (!device)
10322 		return;
10323 
10324 	if (channel == RAID_CHANNEL) {
10325 		raid_device = device;
10326 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10327 		    raid_device->id, 0);
10328 		if (rc)
10329 			_scsih_raid_device_remove(ioc, raid_device);
10330 	} else if (channel == PCIE_CHANNEL) {
10331 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10332 		pcie_device = device;
10333 		tid = pcie_device->id;
10334 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
10335 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10336 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
10337 		if (rc)
10338 			_scsih_pcie_device_remove(ioc, pcie_device);
10339 	} else {
10340 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
10341 		sas_device = device;
10342 		handle = sas_device->handle;
10343 		sas_address_parent = sas_device->sas_address_parent;
10344 		sas_address = sas_device->sas_address;
10345 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
10346 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10347 
10348 		if (ioc->hide_drives)
10349 			return;
10350 		if (!mpt3sas_transport_port_add(ioc, handle,
10351 		    sas_address_parent)) {
10352 			_scsih_sas_device_remove(ioc, sas_device);
10353 		} else if (!sas_device->starget) {
10354 			if (!ioc->is_driver_loading) {
10355 				mpt3sas_transport_port_remove(ioc,
10356 				    sas_address,
10357 				    sas_address_parent);
10358 				_scsih_sas_device_remove(ioc, sas_device);
10359 			}
10360 		}
10361 	}
10362 }
10363 
10364 /**
10365  * _scsih_probe_raid - reporting raid volumes to scsi-ml
10366  * @ioc: per adapter object
10367  *
10368  * Called during initial loading of the driver.
10369  */
10370 static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)10371 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
10372 {
10373 	struct _raid_device *raid_device, *raid_next;
10374 	int rc;
10375 
10376 	list_for_each_entry_safe(raid_device, raid_next,
10377 	    &ioc->raid_device_list, list) {
10378 		if (raid_device->starget)
10379 			continue;
10380 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10381 		    raid_device->id, 0);
10382 		if (rc)
10383 			_scsih_raid_device_remove(ioc, raid_device);
10384 	}
10385 }
10386 
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)10387 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
10388 {
10389 	struct _sas_device *sas_device = NULL;
10390 	unsigned long flags;
10391 
10392 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10393 	if (!list_empty(&ioc->sas_device_init_list)) {
10394 		sas_device = list_first_entry(&ioc->sas_device_init_list,
10395 				struct _sas_device, list);
10396 		sas_device_get(sas_device);
10397 	}
10398 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10399 
10400 	return sas_device;
10401 }
10402 
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)10403 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10404 		struct _sas_device *sas_device)
10405 {
10406 	unsigned long flags;
10407 
10408 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10409 
10410 	/*
10411 	 * Since we dropped the lock during the call to port_add(), we need to
10412 	 * be careful here that somebody else didn't move or delete this item
10413 	 * while we were busy with other things.
10414 	 *
10415 	 * If it was on the list, we need a put() for the reference the list
10416 	 * had. Either way, we need a get() for the destination list.
10417 	 */
10418 	if (!list_empty(&sas_device->list)) {
10419 		list_del_init(&sas_device->list);
10420 		sas_device_put(sas_device);
10421 	}
10422 
10423 	sas_device_get(sas_device);
10424 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
10425 
10426 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10427 }
10428 
10429 /**
10430  * _scsih_probe_sas - reporting sas devices to sas transport
10431  * @ioc: per adapter object
10432  *
10433  * Called during initial loading of the driver.
10434  */
10435 static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)10436 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10437 {
10438 	struct _sas_device *sas_device;
10439 
10440 	if (ioc->hide_drives)
10441 		return;
10442 
10443 	while ((sas_device = get_next_sas_device(ioc))) {
10444 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10445 		    sas_device->sas_address_parent)) {
10446 			_scsih_sas_device_remove(ioc, sas_device);
10447 			sas_device_put(sas_device);
10448 			continue;
10449 		} else if (!sas_device->starget) {
10450 			/*
10451 			 * When asyn scanning is enabled, its not possible to
10452 			 * remove devices while scanning is turned on due to an
10453 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10454 			 * sysfs_addrm_start()
10455 			 */
10456 			if (!ioc->is_driver_loading) {
10457 				mpt3sas_transport_port_remove(ioc,
10458 				    sas_device->sas_address,
10459 				    sas_device->sas_address_parent);
10460 				_scsih_sas_device_remove(ioc, sas_device);
10461 				sas_device_put(sas_device);
10462 				continue;
10463 			}
10464 		}
10465 		sas_device_make_active(ioc, sas_device);
10466 		sas_device_put(sas_device);
10467 	}
10468 }
10469 
10470 /**
10471  * get_next_pcie_device - Get the next pcie device
10472  * @ioc: per adapter object
10473  *
10474  * Get the next pcie device from pcie_device_init_list list.
10475  *
10476  * Return: pcie device structure if pcie_device_init_list list is not empty
10477  * otherwise returns NULL
10478  */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)10479 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10480 {
10481 	struct _pcie_device *pcie_device = NULL;
10482 	unsigned long flags;
10483 
10484 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10485 	if (!list_empty(&ioc->pcie_device_init_list)) {
10486 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10487 				struct _pcie_device, list);
10488 		pcie_device_get(pcie_device);
10489 	}
10490 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10491 
10492 	return pcie_device;
10493 }
10494 
10495 /**
10496  * pcie_device_make_active - Add pcie device to pcie_device_list list
10497  * @ioc: per adapter object
10498  * @pcie_device: pcie device object
10499  *
10500  * Add the pcie device which has registered with SCSI Transport Later to
10501  * pcie_device_list list
10502  */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)10503 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10504 		struct _pcie_device *pcie_device)
10505 {
10506 	unsigned long flags;
10507 
10508 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10509 
10510 	if (!list_empty(&pcie_device->list)) {
10511 		list_del_init(&pcie_device->list);
10512 		pcie_device_put(pcie_device);
10513 	}
10514 	pcie_device_get(pcie_device);
10515 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10516 
10517 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10518 }
10519 
10520 /**
10521  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10522  * @ioc: per adapter object
10523  *
10524  * Called during initial loading of the driver.
10525  */
10526 static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)10527 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10528 {
10529 	struct _pcie_device *pcie_device;
10530 	int rc;
10531 
10532 	/* PCIe Device List */
10533 	while ((pcie_device = get_next_pcie_device(ioc))) {
10534 		if (pcie_device->starget) {
10535 			pcie_device_put(pcie_device);
10536 			continue;
10537 		}
10538 		if (pcie_device->access_status ==
10539 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10540 			pcie_device_make_active(ioc, pcie_device);
10541 			pcie_device_put(pcie_device);
10542 			continue;
10543 		}
10544 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10545 			pcie_device->id, 0);
10546 		if (rc) {
10547 			_scsih_pcie_device_remove(ioc, pcie_device);
10548 			pcie_device_put(pcie_device);
10549 			continue;
10550 		} else if (!pcie_device->starget) {
10551 			/*
10552 			 * When async scanning is enabled, its not possible to
10553 			 * remove devices while scanning is turned on due to an
10554 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10555 			 * sysfs_addrm_start()
10556 			 */
10557 			if (!ioc->is_driver_loading) {
10558 			/* TODO-- Need to find out whether this condition will
10559 			 * occur or not
10560 			 */
10561 				_scsih_pcie_device_remove(ioc, pcie_device);
10562 				pcie_device_put(pcie_device);
10563 				continue;
10564 			}
10565 		}
10566 		pcie_device_make_active(ioc, pcie_device);
10567 		pcie_device_put(pcie_device);
10568 	}
10569 }
10570 
10571 /**
10572  * _scsih_probe_devices - probing for devices
10573  * @ioc: per adapter object
10574  *
10575  * Called during initial loading of the driver.
10576  */
10577 static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)10578 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10579 {
10580 	u16 volume_mapping_flags;
10581 
10582 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10583 		return;  /* return when IOC doesn't support initiator mode */
10584 
10585 	_scsih_probe_boot_devices(ioc);
10586 
10587 	if (ioc->ir_firmware) {
10588 		volume_mapping_flags =
10589 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10590 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10591 		if (volume_mapping_flags ==
10592 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10593 			_scsih_probe_raid(ioc);
10594 			_scsih_probe_sas(ioc);
10595 		} else {
10596 			_scsih_probe_sas(ioc);
10597 			_scsih_probe_raid(ioc);
10598 		}
10599 	} else {
10600 		_scsih_probe_sas(ioc);
10601 		_scsih_probe_pcie(ioc);
10602 	}
10603 }
10604 
10605 /**
10606  * scsih_scan_start - scsi lld callback for .scan_start
10607  * @shost: SCSI host pointer
10608  *
10609  * The shost has the ability to discover targets on its own instead
10610  * of scanning the entire bus.  In our implemention, we will kick off
10611  * firmware discovery.
10612  */
10613 static void
scsih_scan_start(struct Scsi_Host * shost)10614 scsih_scan_start(struct Scsi_Host *shost)
10615 {
10616 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10617 	int rc;
10618 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10619 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10620 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
10621 		mpt3sas_enable_diag_buffer(ioc, 1);
10622 
10623 	if (disable_discovery > 0)
10624 		return;
10625 
10626 	ioc->start_scan = 1;
10627 	rc = mpt3sas_port_enable(ioc);
10628 
10629 	if (rc != 0)
10630 		ioc_info(ioc, "port enable: FAILED\n");
10631 }
10632 
10633 /**
10634  * scsih_scan_finished - scsi lld callback for .scan_finished
10635  * @shost: SCSI host pointer
10636  * @time: elapsed time of the scan in jiffies
10637  *
10638  * This function will be called periodicallyn until it returns 1 with the
10639  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10640  * we wait for firmware discovery to complete, then return 1.
10641  */
10642 static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)10643 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10644 {
10645 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10646 
10647 	if (disable_discovery > 0) {
10648 		ioc->is_driver_loading = 0;
10649 		ioc->wait_for_discovery_to_complete = 0;
10650 		return 1;
10651 	}
10652 
10653 	if (time >= (300 * HZ)) {
10654 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10655 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10656 		ioc->is_driver_loading = 0;
10657 		return 1;
10658 	}
10659 
10660 	if (ioc->start_scan)
10661 		return 0;
10662 
10663 	if (ioc->start_scan_failed) {
10664 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10665 			 ioc->start_scan_failed);
10666 		ioc->is_driver_loading = 0;
10667 		ioc->wait_for_discovery_to_complete = 0;
10668 		ioc->remove_host = 1;
10669 		return 1;
10670 	}
10671 
10672 	ioc_info(ioc, "port enable: SUCCESS\n");
10673 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10674 
10675 	if (ioc->wait_for_discovery_to_complete) {
10676 		ioc->wait_for_discovery_to_complete = 0;
10677 		_scsih_probe_devices(ioc);
10678 	}
10679 	mpt3sas_base_start_watchdog(ioc);
10680 	ioc->is_driver_loading = 0;
10681 	return 1;
10682 }
10683 
10684 /* shost template for SAS 2.0 HBA devices */
10685 static struct scsi_host_template mpt2sas_driver_template = {
10686 	.module				= THIS_MODULE,
10687 	.name				= "Fusion MPT SAS Host",
10688 	.proc_name			= MPT2SAS_DRIVER_NAME,
10689 	.queuecommand			= scsih_qcmd,
10690 	.target_alloc			= scsih_target_alloc,
10691 	.slave_alloc			= scsih_slave_alloc,
10692 	.slave_configure		= scsih_slave_configure,
10693 	.target_destroy			= scsih_target_destroy,
10694 	.slave_destroy			= scsih_slave_destroy,
10695 	.scan_finished			= scsih_scan_finished,
10696 	.scan_start			= scsih_scan_start,
10697 	.change_queue_depth		= scsih_change_queue_depth,
10698 	.eh_abort_handler		= scsih_abort,
10699 	.eh_device_reset_handler	= scsih_dev_reset,
10700 	.eh_target_reset_handler	= scsih_target_reset,
10701 	.eh_host_reset_handler		= scsih_host_reset,
10702 	.bios_param			= scsih_bios_param,
10703 	.can_queue			= 1,
10704 	.this_id			= -1,
10705 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10706 	.max_sectors			= 32767,
10707 	.cmd_per_lun			= 7,
10708 	.shost_attrs			= mpt3sas_host_attrs,
10709 	.sdev_attrs			= mpt3sas_dev_attrs,
10710 	.track_queue_depth		= 1,
10711 	.cmd_size			= sizeof(struct scsiio_tracker),
10712 };
10713 
10714 /* raid transport support for SAS 2.0 HBA devices */
10715 static struct raid_function_template mpt2sas_raid_functions = {
10716 	.cookie		= &mpt2sas_driver_template,
10717 	.is_raid	= scsih_is_raid,
10718 	.get_resync	= scsih_get_resync,
10719 	.get_state	= scsih_get_state,
10720 };
10721 
10722 /* shost template for SAS 3.0 HBA devices */
10723 static struct scsi_host_template mpt3sas_driver_template = {
10724 	.module				= THIS_MODULE,
10725 	.name				= "Fusion MPT SAS Host",
10726 	.proc_name			= MPT3SAS_DRIVER_NAME,
10727 	.queuecommand			= scsih_qcmd,
10728 	.target_alloc			= scsih_target_alloc,
10729 	.slave_alloc			= scsih_slave_alloc,
10730 	.slave_configure		= scsih_slave_configure,
10731 	.target_destroy			= scsih_target_destroy,
10732 	.slave_destroy			= scsih_slave_destroy,
10733 	.scan_finished			= scsih_scan_finished,
10734 	.scan_start			= scsih_scan_start,
10735 	.change_queue_depth		= scsih_change_queue_depth,
10736 	.eh_abort_handler		= scsih_abort,
10737 	.eh_device_reset_handler	= scsih_dev_reset,
10738 	.eh_target_reset_handler	= scsih_target_reset,
10739 	.eh_host_reset_handler		= scsih_host_reset,
10740 	.bios_param			= scsih_bios_param,
10741 	.can_queue			= 1,
10742 	.this_id			= -1,
10743 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10744 	.max_sectors			= 32767,
10745 	.max_segment_size		= 0xffffffff,
10746 	.cmd_per_lun			= 7,
10747 	.shost_attrs			= mpt3sas_host_attrs,
10748 	.sdev_attrs			= mpt3sas_dev_attrs,
10749 	.track_queue_depth		= 1,
10750 	.cmd_size			= sizeof(struct scsiio_tracker),
10751 };
10752 
10753 /* raid transport support for SAS 3.0 HBA devices */
10754 static struct raid_function_template mpt3sas_raid_functions = {
10755 	.cookie		= &mpt3sas_driver_template,
10756 	.is_raid	= scsih_is_raid,
10757 	.get_resync	= scsih_get_resync,
10758 	.get_state	= scsih_get_state,
10759 };
10760 
10761 /**
10762  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10763  *					this device belongs to.
10764  * @pdev: PCI device struct
10765  *
10766  * return MPI2_VERSION for SAS 2.0 HBA devices,
10767  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10768  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10769  */
10770 static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)10771 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10772 {
10773 
10774 	switch (pdev->device) {
10775 	case MPI2_MFGPAGE_DEVID_SSS6200:
10776 	case MPI2_MFGPAGE_DEVID_SAS2004:
10777 	case MPI2_MFGPAGE_DEVID_SAS2008:
10778 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10779 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10780 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10781 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10782 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10783 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10784 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10785 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10786 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10787 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10788 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10789 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10790 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10791 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10792 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10793 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10794 		return MPI2_VERSION;
10795 	case MPI25_MFGPAGE_DEVID_SAS3004:
10796 	case MPI25_MFGPAGE_DEVID_SAS3008:
10797 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10798 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10799 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10800 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10801 		return MPI25_VERSION;
10802 	case MPI26_MFGPAGE_DEVID_SAS3216:
10803 	case MPI26_MFGPAGE_DEVID_SAS3224:
10804 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10805 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10806 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10807 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10808 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10809 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10810 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10811 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10812 	case MPI26_MFGPAGE_DEVID_SAS3508:
10813 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10814 	case MPI26_MFGPAGE_DEVID_SAS3408:
10815 	case MPI26_MFGPAGE_DEVID_SAS3516:
10816 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10817 	case MPI26_MFGPAGE_DEVID_SAS3416:
10818 	case MPI26_MFGPAGE_DEVID_SAS3616:
10819 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10820 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10821 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10822 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10823 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10824 	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
10825 	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
10826 	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
10827 	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
10828 		return MPI26_VERSION;
10829 	}
10830 	return 0;
10831 }
10832 
10833 /**
10834  * _scsih_probe - attach and add scsi host
10835  * @pdev: PCI device struct
10836  * @id: pci device id
10837  *
10838  * Return: 0 success, anything else error.
10839  */
10840 static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)10841 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10842 {
10843 	struct MPT3SAS_ADAPTER *ioc;
10844 	struct Scsi_Host *shost = NULL;
10845 	int rv;
10846 	u16 hba_mpi_version;
10847 
10848 	/* Determine in which MPI version class this pci device belongs */
10849 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10850 	if (hba_mpi_version == 0)
10851 		return -ENODEV;
10852 
10853 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10854 	 * for other generation HBA's return with -ENODEV
10855 	 */
10856 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10857 		return -ENODEV;
10858 
10859 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10860 	 * for other generation HBA's return with -ENODEV
10861 	 */
10862 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10863 		|| hba_mpi_version ==  MPI26_VERSION)))
10864 		return -ENODEV;
10865 
10866 	switch (hba_mpi_version) {
10867 	case MPI2_VERSION:
10868 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10869 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10870 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10871 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10872 		  sizeof(struct MPT3SAS_ADAPTER));
10873 		if (!shost)
10874 			return -ENODEV;
10875 		ioc = shost_priv(shost);
10876 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10877 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10878 		ioc->id = mpt2_ids++;
10879 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10880 		switch (pdev->device) {
10881 		case MPI2_MFGPAGE_DEVID_SSS6200:
10882 			ioc->is_warpdrive = 1;
10883 			ioc->hide_ir_msg = 1;
10884 			break;
10885 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10886 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10887 			ioc->is_mcpu_endpoint = 1;
10888 			break;
10889 		default:
10890 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10891 			break;
10892 		}
10893 		break;
10894 	case MPI25_VERSION:
10895 	case MPI26_VERSION:
10896 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10897 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10898 		  sizeof(struct MPT3SAS_ADAPTER));
10899 		if (!shost)
10900 			return -ENODEV;
10901 		ioc = shost_priv(shost);
10902 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10903 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10904 		ioc->id = mpt3_ids++;
10905 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10906 		switch (pdev->device) {
10907 		case MPI26_MFGPAGE_DEVID_SAS3508:
10908 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10909 		case MPI26_MFGPAGE_DEVID_SAS3408:
10910 		case MPI26_MFGPAGE_DEVID_SAS3516:
10911 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10912 		case MPI26_MFGPAGE_DEVID_SAS3416:
10913 		case MPI26_MFGPAGE_DEVID_SAS3616:
10914 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10915 			ioc->is_gen35_ioc = 1;
10916 			break;
10917 		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
10918 		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
10919 			dev_err(&pdev->dev,
10920 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
10921 			    pdev->device, pdev->subsystem_vendor,
10922 			    pdev->subsystem_device);
10923 			return 1;
10924 		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
10925 		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
10926 			dev_err(&pdev->dev,
10927 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
10928 			    pdev->device, pdev->subsystem_vendor,
10929 			    pdev->subsystem_device);
10930 			return 1;
10931 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10932 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10933 			dev_info(&pdev->dev,
10934 			    "HBA is in Configurable Secure mode\n");
10935 			fallthrough;
10936 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10937 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10938 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10939 			break;
10940 		default:
10941 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10942 		}
10943 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10944 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10945 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10946 			ioc->combined_reply_queue = 1;
10947 			if (ioc->is_gen35_ioc)
10948 				ioc->combined_reply_index_count =
10949 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10950 			else
10951 				ioc->combined_reply_index_count =
10952 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10953 		}
10954 		break;
10955 	default:
10956 		return -ENODEV;
10957 	}
10958 
10959 	INIT_LIST_HEAD(&ioc->list);
10960 	spin_lock(&gioc_lock);
10961 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10962 	spin_unlock(&gioc_lock);
10963 	ioc->shost = shost;
10964 	ioc->pdev = pdev;
10965 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10966 	ioc->tm_cb_idx = tm_cb_idx;
10967 	ioc->ctl_cb_idx = ctl_cb_idx;
10968 	ioc->base_cb_idx = base_cb_idx;
10969 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10970 	ioc->transport_cb_idx = transport_cb_idx;
10971 	ioc->scsih_cb_idx = scsih_cb_idx;
10972 	ioc->config_cb_idx = config_cb_idx;
10973 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10974 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10975 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10976 	ioc->logging_level = logging_level;
10977 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10978 	/* Host waits for minimum of six seconds */
10979 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
10980 	/*
10981 	 * Enable MEMORY MOVE support flag.
10982 	 */
10983 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10984 
10985 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10986 
10987 	/* misc semaphores and spin locks */
10988 	mutex_init(&ioc->reset_in_progress_mutex);
10989 	/* initializing pci_access_mutex lock */
10990 	mutex_init(&ioc->pci_access_mutex);
10991 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10992 	spin_lock_init(&ioc->scsi_lookup_lock);
10993 	spin_lock_init(&ioc->sas_device_lock);
10994 	spin_lock_init(&ioc->sas_node_lock);
10995 	spin_lock_init(&ioc->fw_event_lock);
10996 	spin_lock_init(&ioc->raid_device_lock);
10997 	spin_lock_init(&ioc->pcie_device_lock);
10998 	spin_lock_init(&ioc->diag_trigger_lock);
10999 
11000 	INIT_LIST_HEAD(&ioc->sas_device_list);
11001 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
11002 	INIT_LIST_HEAD(&ioc->sas_expander_list);
11003 	INIT_LIST_HEAD(&ioc->enclosure_list);
11004 	INIT_LIST_HEAD(&ioc->pcie_device_list);
11005 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11006 	INIT_LIST_HEAD(&ioc->fw_event_list);
11007 	INIT_LIST_HEAD(&ioc->raid_device_list);
11008 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11009 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
11010 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
11011 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11012 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
11013 	INIT_LIST_HEAD(&ioc->reply_queue_list);
11014 
11015 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
11016 
11017 	/* init shost parameters */
11018 	shost->max_cmd_len = 32;
11019 	shost->max_lun = max_lun;
11020 	shost->transportt = mpt3sas_transport_template;
11021 	shost->unique_id = ioc->id;
11022 
11023 	if (ioc->is_mcpu_endpoint) {
11024 		/* mCPU MPI support 64K max IO */
11025 		shost->max_sectors = 128;
11026 		ioc_info(ioc, "The max_sectors value is set to %d\n",
11027 			 shost->max_sectors);
11028 	} else {
11029 		if (max_sectors != 0xFFFF) {
11030 			if (max_sectors < 64) {
11031 				shost->max_sectors = 64;
11032 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
11033 					 max_sectors);
11034 			} else if (max_sectors > 32767) {
11035 				shost->max_sectors = 32767;
11036 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
11037 					 max_sectors);
11038 			} else {
11039 				shost->max_sectors = max_sectors & 0xFFFE;
11040 				ioc_info(ioc, "The max_sectors value is set to %d\n",
11041 					 shost->max_sectors);
11042 			}
11043 		}
11044 	}
11045 	/* register EEDP capabilities with SCSI layer */
11046 	if (prot_mask >= 0)
11047 		scsi_host_set_prot(shost, (prot_mask & 0x07));
11048 	else
11049 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
11050 				   | SHOST_DIF_TYPE2_PROTECTION
11051 				   | SHOST_DIF_TYPE3_PROTECTION);
11052 
11053 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
11054 
11055 	/* event thread */
11056 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
11057 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
11058 	ioc->firmware_event_thread = alloc_ordered_workqueue(
11059 	    ioc->firmware_event_name, 0);
11060 	if (!ioc->firmware_event_thread) {
11061 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11062 			__FILE__, __LINE__, __func__);
11063 		rv = -ENODEV;
11064 		goto out_thread_fail;
11065 	}
11066 
11067 	ioc->is_driver_loading = 1;
11068 	if ((mpt3sas_base_attach(ioc))) {
11069 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11070 			__FILE__, __LINE__, __func__);
11071 		rv = -ENODEV;
11072 		goto out_attach_fail;
11073 	}
11074 
11075 	if (ioc->is_warpdrive) {
11076 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
11077 			ioc->hide_drives = 0;
11078 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
11079 			ioc->hide_drives = 1;
11080 		else {
11081 			if (mpt3sas_get_num_volumes(ioc))
11082 				ioc->hide_drives = 1;
11083 			else
11084 				ioc->hide_drives = 0;
11085 		}
11086 	} else
11087 		ioc->hide_drives = 0;
11088 
11089 	rv = scsi_add_host(shost, &pdev->dev);
11090 	if (rv) {
11091 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
11092 			__FILE__, __LINE__, __func__);
11093 		goto out_add_shost_fail;
11094 	}
11095 
11096 	scsi_scan_host(shost);
11097 	mpt3sas_setup_debugfs(ioc);
11098 	return 0;
11099 out_add_shost_fail:
11100 	mpt3sas_base_detach(ioc);
11101  out_attach_fail:
11102 	destroy_workqueue(ioc->firmware_event_thread);
11103  out_thread_fail:
11104 	spin_lock(&gioc_lock);
11105 	list_del(&ioc->list);
11106 	spin_unlock(&gioc_lock);
11107 	scsi_host_put(shost);
11108 	return rv;
11109 }
11110 
11111 #ifdef CONFIG_PM
11112 /**
11113  * scsih_suspend - power management suspend main entry point
11114  * @pdev: PCI device struct
11115  * @state: PM state change to (usually PCI_D3)
11116  *
11117  * Return: 0 success, anything else error.
11118  */
11119 static int
scsih_suspend(struct pci_dev * pdev,pm_message_t state)11120 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
11121 {
11122 	struct Scsi_Host *shost;
11123 	struct MPT3SAS_ADAPTER *ioc;
11124 	pci_power_t device_state;
11125 	int rc;
11126 
11127 	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
11128 	if (rc)
11129 		return rc;
11130 
11131 	mpt3sas_base_stop_watchdog(ioc);
11132 	flush_scheduled_work();
11133 	scsi_block_requests(shost);
11134 	_scsih_nvme_shutdown(ioc);
11135 	device_state = pci_choose_state(pdev, state);
11136 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
11137 		 pdev, pci_name(pdev), device_state);
11138 
11139 	pci_save_state(pdev);
11140 	mpt3sas_base_free_resources(ioc);
11141 	pci_set_power_state(pdev, device_state);
11142 	return 0;
11143 }
11144 
11145 /**
11146  * scsih_resume - power management resume main entry point
11147  * @pdev: PCI device struct
11148  *
11149  * Return: 0 success, anything else error.
11150  */
11151 static int
scsih_resume(struct pci_dev * pdev)11152 scsih_resume(struct pci_dev *pdev)
11153 {
11154 	struct Scsi_Host *shost;
11155 	struct MPT3SAS_ADAPTER *ioc;
11156 	pci_power_t device_state = pdev->current_state;
11157 	int r;
11158 
11159 	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
11160 	if (r)
11161 		return r;
11162 
11163 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
11164 		 pdev, pci_name(pdev), device_state);
11165 
11166 	pci_set_power_state(pdev, PCI_D0);
11167 	pci_enable_wake(pdev, PCI_D0, 0);
11168 	pci_restore_state(pdev);
11169 	ioc->pdev = pdev;
11170 	r = mpt3sas_base_map_resources(ioc);
11171 	if (r)
11172 		return r;
11173 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
11174 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
11175 	scsi_unblock_requests(shost);
11176 	mpt3sas_base_start_watchdog(ioc);
11177 	return 0;
11178 }
11179 #endif /* CONFIG_PM */
11180 
11181 /**
11182  * scsih_pci_error_detected - Called when a PCI error is detected.
11183  * @pdev: PCI device struct
11184  * @state: PCI channel state
11185  *
11186  * Description: Called when a PCI error is detected.
11187  *
11188  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
11189  */
11190 static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)11191 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11192 {
11193 	struct Scsi_Host *shost;
11194 	struct MPT3SAS_ADAPTER *ioc;
11195 
11196 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11197 		return PCI_ERS_RESULT_DISCONNECT;
11198 
11199 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
11200 
11201 	switch (state) {
11202 	case pci_channel_io_normal:
11203 		return PCI_ERS_RESULT_CAN_RECOVER;
11204 	case pci_channel_io_frozen:
11205 		/* Fatal error, prepare for slot reset */
11206 		ioc->pci_error_recovery = 1;
11207 		scsi_block_requests(ioc->shost);
11208 		mpt3sas_base_stop_watchdog(ioc);
11209 		mpt3sas_base_free_resources(ioc);
11210 		return PCI_ERS_RESULT_NEED_RESET;
11211 	case pci_channel_io_perm_failure:
11212 		/* Permanent error, prepare for device removal */
11213 		ioc->pci_error_recovery = 1;
11214 		mpt3sas_base_stop_watchdog(ioc);
11215 		_scsih_flush_running_cmds(ioc);
11216 		return PCI_ERS_RESULT_DISCONNECT;
11217 	}
11218 	return PCI_ERS_RESULT_NEED_RESET;
11219 }
11220 
11221 /**
11222  * scsih_pci_slot_reset - Called when PCI slot has been reset.
11223  * @pdev: PCI device struct
11224  *
11225  * Description: This routine is called by the pci error recovery
11226  * code after the PCI slot has been reset, just before we
11227  * should resume normal operations.
11228  */
11229 static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)11230 scsih_pci_slot_reset(struct pci_dev *pdev)
11231 {
11232 	struct Scsi_Host *shost;
11233 	struct MPT3SAS_ADAPTER *ioc;
11234 	int rc;
11235 
11236 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11237 		return PCI_ERS_RESULT_DISCONNECT;
11238 
11239 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
11240 
11241 	ioc->pci_error_recovery = 0;
11242 	ioc->pdev = pdev;
11243 	pci_restore_state(pdev);
11244 	rc = mpt3sas_base_map_resources(ioc);
11245 	if (rc)
11246 		return PCI_ERS_RESULT_DISCONNECT;
11247 
11248 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
11249 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
11250 
11251 	ioc_warn(ioc, "hard reset: %s\n",
11252 		 (rc == 0) ? "success" : "failed");
11253 
11254 	if (!rc)
11255 		return PCI_ERS_RESULT_RECOVERED;
11256 	else
11257 		return PCI_ERS_RESULT_DISCONNECT;
11258 }
11259 
11260 /**
11261  * scsih_pci_resume() - resume normal ops after PCI reset
11262  * @pdev: pointer to PCI device
11263  *
11264  * Called when the error recovery driver tells us that its
11265  * OK to resume normal operation. Use completion to allow
11266  * halted scsi ops to resume.
11267  */
11268 static void
scsih_pci_resume(struct pci_dev * pdev)11269 scsih_pci_resume(struct pci_dev *pdev)
11270 {
11271 	struct Scsi_Host *shost;
11272 	struct MPT3SAS_ADAPTER *ioc;
11273 
11274 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11275 		return;
11276 
11277 	ioc_info(ioc, "PCI error: resume callback!!\n");
11278 
11279 	mpt3sas_base_start_watchdog(ioc);
11280 	scsi_unblock_requests(ioc->shost);
11281 }
11282 
11283 /**
11284  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
11285  * @pdev: pointer to PCI device
11286  */
11287 static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)11288 scsih_pci_mmio_enabled(struct pci_dev *pdev)
11289 {
11290 	struct Scsi_Host *shost;
11291 	struct MPT3SAS_ADAPTER *ioc;
11292 
11293 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11294 		return PCI_ERS_RESULT_DISCONNECT;
11295 
11296 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
11297 
11298 	/* TODO - dump whatever for debugging purposes */
11299 
11300 	/* This called only if scsih_pci_error_detected returns
11301 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
11302 	 * works, no need to reset slot.
11303 	 */
11304 	return PCI_ERS_RESULT_RECOVERED;
11305 }
11306 
11307 /**
11308  * scsih__ncq_prio_supp - Check for NCQ command priority support
11309  * @sdev: scsi device struct
11310  *
11311  * This is called when a user indicates they would like to enable
11312  * ncq command priorities. This works only on SATA devices.
11313  */
scsih_ncq_prio_supp(struct scsi_device * sdev)11314 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
11315 {
11316 	unsigned char *buf;
11317 	bool ncq_prio_supp = false;
11318 
11319 	if (!scsi_device_supports_vpd(sdev))
11320 		return ncq_prio_supp;
11321 
11322 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
11323 	if (!buf)
11324 		return ncq_prio_supp;
11325 
11326 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
11327 		ncq_prio_supp = (buf[213] >> 4) & 1;
11328 
11329 	kfree(buf);
11330 	return ncq_prio_supp;
11331 }
11332 /*
11333  * The pci device ids are defined in mpi/mpi2_cnfg.h.
11334  */
11335 static const struct pci_device_id mpt3sas_pci_table[] = {
11336 	/* Spitfire ~ 2004 */
11337 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
11338 		PCI_ANY_ID, PCI_ANY_ID },
11339 	/* Falcon ~ 2008 */
11340 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
11341 		PCI_ANY_ID, PCI_ANY_ID },
11342 	/* Liberator ~ 2108 */
11343 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
11344 		PCI_ANY_ID, PCI_ANY_ID },
11345 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
11346 		PCI_ANY_ID, PCI_ANY_ID },
11347 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
11348 		PCI_ANY_ID, PCI_ANY_ID },
11349 	/* Meteor ~ 2116 */
11350 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
11351 		PCI_ANY_ID, PCI_ANY_ID },
11352 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
11353 		PCI_ANY_ID, PCI_ANY_ID },
11354 	/* Thunderbolt ~ 2208 */
11355 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
11356 		PCI_ANY_ID, PCI_ANY_ID },
11357 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
11358 		PCI_ANY_ID, PCI_ANY_ID },
11359 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
11360 		PCI_ANY_ID, PCI_ANY_ID },
11361 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
11362 		PCI_ANY_ID, PCI_ANY_ID },
11363 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
11364 		PCI_ANY_ID, PCI_ANY_ID },
11365 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
11366 		PCI_ANY_ID, PCI_ANY_ID },
11367 	/* Mustang ~ 2308 */
11368 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
11369 		PCI_ANY_ID, PCI_ANY_ID },
11370 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
11371 		PCI_ANY_ID, PCI_ANY_ID },
11372 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
11373 		PCI_ANY_ID, PCI_ANY_ID },
11374 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
11375 		PCI_ANY_ID, PCI_ANY_ID },
11376 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
11377 		PCI_ANY_ID, PCI_ANY_ID },
11378 	/* SSS6200 */
11379 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
11380 		PCI_ANY_ID, PCI_ANY_ID },
11381 	/* Fury ~ 3004 and 3008 */
11382 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
11383 		PCI_ANY_ID, PCI_ANY_ID },
11384 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
11385 		PCI_ANY_ID, PCI_ANY_ID },
11386 	/* Invader ~ 3108 */
11387 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
11388 		PCI_ANY_ID, PCI_ANY_ID },
11389 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
11390 		PCI_ANY_ID, PCI_ANY_ID },
11391 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
11392 		PCI_ANY_ID, PCI_ANY_ID },
11393 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
11394 		PCI_ANY_ID, PCI_ANY_ID },
11395 	/* Cutlass ~ 3216 and 3224 */
11396 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
11397 		PCI_ANY_ID, PCI_ANY_ID },
11398 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
11399 		PCI_ANY_ID, PCI_ANY_ID },
11400 	/* Intruder ~ 3316 and 3324 */
11401 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
11402 		PCI_ANY_ID, PCI_ANY_ID },
11403 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
11404 		PCI_ANY_ID, PCI_ANY_ID },
11405 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
11406 		PCI_ANY_ID, PCI_ANY_ID },
11407 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
11408 		PCI_ANY_ID, PCI_ANY_ID },
11409 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
11410 		PCI_ANY_ID, PCI_ANY_ID },
11411 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
11412 		PCI_ANY_ID, PCI_ANY_ID },
11413 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
11414 		PCI_ANY_ID, PCI_ANY_ID },
11415 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
11416 		PCI_ANY_ID, PCI_ANY_ID },
11417 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
11418 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
11419 		PCI_ANY_ID, PCI_ANY_ID },
11420 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
11421 		PCI_ANY_ID, PCI_ANY_ID },
11422 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
11423 		PCI_ANY_ID, PCI_ANY_ID },
11424 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
11425 		PCI_ANY_ID, PCI_ANY_ID },
11426 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
11427 		PCI_ANY_ID, PCI_ANY_ID },
11428 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
11429 		PCI_ANY_ID, PCI_ANY_ID },
11430 	/* Mercator ~ 3616*/
11431 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
11432 		PCI_ANY_ID, PCI_ANY_ID },
11433 
11434 	/* Aero SI 0x00E1 Configurable Secure
11435 	 * 0x00E2 Hard Secure
11436 	 */
11437 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
11438 		PCI_ANY_ID, PCI_ANY_ID },
11439 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
11440 		PCI_ANY_ID, PCI_ANY_ID },
11441 
11442 	/*
11443 	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
11444 	 */
11445 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
11446 		PCI_ANY_ID, PCI_ANY_ID },
11447 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
11448 		PCI_ANY_ID, PCI_ANY_ID },
11449 
11450 	/* Atlas PCIe Switch Management Port */
11451 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
11452 		PCI_ANY_ID, PCI_ANY_ID },
11453 
11454 	/* Sea SI 0x00E5 Configurable Secure
11455 	 * 0x00E6 Hard Secure
11456 	 */
11457 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
11458 		PCI_ANY_ID, PCI_ANY_ID },
11459 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
11460 		PCI_ANY_ID, PCI_ANY_ID },
11461 
11462 	/*
11463 	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
11464 	 */
11465 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
11466 		PCI_ANY_ID, PCI_ANY_ID },
11467 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
11468 		PCI_ANY_ID, PCI_ANY_ID },
11469 
11470 	{0}     /* Terminating entry */
11471 };
11472 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
11473 
11474 static struct pci_error_handlers _mpt3sas_err_handler = {
11475 	.error_detected	= scsih_pci_error_detected,
11476 	.mmio_enabled	= scsih_pci_mmio_enabled,
11477 	.slot_reset	= scsih_pci_slot_reset,
11478 	.resume		= scsih_pci_resume,
11479 };
11480 
11481 static struct pci_driver mpt3sas_driver = {
11482 	.name		= MPT3SAS_DRIVER_NAME,
11483 	.id_table	= mpt3sas_pci_table,
11484 	.probe		= _scsih_probe,
11485 	.remove		= scsih_remove,
11486 	.shutdown	= scsih_shutdown,
11487 	.err_handler	= &_mpt3sas_err_handler,
11488 #ifdef CONFIG_PM
11489 	.suspend	= scsih_suspend,
11490 	.resume		= scsih_resume,
11491 #endif
11492 };
11493 
11494 /**
11495  * scsih_init - main entry point for this driver.
11496  *
11497  * Return: 0 success, anything else error.
11498  */
11499 static int
scsih_init(void)11500 scsih_init(void)
11501 {
11502 	mpt2_ids = 0;
11503 	mpt3_ids = 0;
11504 
11505 	mpt3sas_base_initialize_callback_handler();
11506 
11507 	 /* queuecommand callback hander */
11508 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11509 
11510 	/* task management callback handler */
11511 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11512 
11513 	/* base internal commands callback handler */
11514 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11515 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11516 	    mpt3sas_port_enable_done);
11517 
11518 	/* transport internal commands callback handler */
11519 	transport_cb_idx = mpt3sas_base_register_callback_handler(
11520 	    mpt3sas_transport_done);
11521 
11522 	/* scsih internal commands callback handler */
11523 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11524 
11525 	/* configuration page API internal commands callback handler */
11526 	config_cb_idx = mpt3sas_base_register_callback_handler(
11527 	    mpt3sas_config_done);
11528 
11529 	/* ctl module callback handler */
11530 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11531 
11532 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11533 	    _scsih_tm_tr_complete);
11534 
11535 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11536 	    _scsih_tm_volume_tr_complete);
11537 
11538 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11539 	    _scsih_sas_control_complete);
11540 
11541 	mpt3sas_init_debugfs();
11542 	return 0;
11543 }
11544 
11545 /**
11546  * scsih_exit - exit point for this driver (when it is a module).
11547  *
11548  * Return: 0 success, anything else error.
11549  */
11550 static void
scsih_exit(void)11551 scsih_exit(void)
11552 {
11553 
11554 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11555 	mpt3sas_base_release_callback_handler(tm_cb_idx);
11556 	mpt3sas_base_release_callback_handler(base_cb_idx);
11557 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11558 	mpt3sas_base_release_callback_handler(transport_cb_idx);
11559 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
11560 	mpt3sas_base_release_callback_handler(config_cb_idx);
11561 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
11562 
11563 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11564 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11565 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11566 
11567 /* raid transport support */
11568 	if (hbas_to_enumerate != 1)
11569 		raid_class_release(mpt3sas_raid_template);
11570 	if (hbas_to_enumerate != 2)
11571 		raid_class_release(mpt2sas_raid_template);
11572 	sas_release_transport(mpt3sas_transport_template);
11573 	mpt3sas_exit_debugfs();
11574 }
11575 
11576 /**
11577  * _mpt3sas_init - main entry point for this driver.
11578  *
11579  * Return: 0 success, anything else error.
11580  */
11581 static int __init
_mpt3sas_init(void)11582 _mpt3sas_init(void)
11583 {
11584 	int error;
11585 
11586 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11587 					MPT3SAS_DRIVER_VERSION);
11588 
11589 	mpt3sas_transport_template =
11590 	    sas_attach_transport(&mpt3sas_transport_functions);
11591 	if (!mpt3sas_transport_template)
11592 		return -ENODEV;
11593 
11594 	/* No need attach mpt3sas raid functions template
11595 	 * if hbas_to_enumarate value is one.
11596 	 */
11597 	if (hbas_to_enumerate != 1) {
11598 		mpt3sas_raid_template =
11599 				raid_class_attach(&mpt3sas_raid_functions);
11600 		if (!mpt3sas_raid_template) {
11601 			sas_release_transport(mpt3sas_transport_template);
11602 			return -ENODEV;
11603 		}
11604 	}
11605 
11606 	/* No need to attach mpt2sas raid functions template
11607 	 * if hbas_to_enumarate value is two
11608 	 */
11609 	if (hbas_to_enumerate != 2) {
11610 		mpt2sas_raid_template =
11611 				raid_class_attach(&mpt2sas_raid_functions);
11612 		if (!mpt2sas_raid_template) {
11613 			sas_release_transport(mpt3sas_transport_template);
11614 			return -ENODEV;
11615 		}
11616 	}
11617 
11618 	error = scsih_init();
11619 	if (error) {
11620 		scsih_exit();
11621 		return error;
11622 	}
11623 
11624 	mpt3sas_ctl_init(hbas_to_enumerate);
11625 
11626 	error = pci_register_driver(&mpt3sas_driver);
11627 	if (error) {
11628 		mpt3sas_ctl_exit(hbas_to_enumerate);
11629 		scsih_exit();
11630 	}
11631 
11632 	return error;
11633 }
11634 
11635 /**
11636  * _mpt3sas_exit - exit point for this driver (when it is a module).
11637  *
11638  */
11639 static void __exit
_mpt3sas_exit(void)11640 _mpt3sas_exit(void)
11641 {
11642 	pr_info("mpt3sas version %s unloading\n",
11643 				MPT3SAS_DRIVER_VERSION);
11644 
11645 	mpt3sas_ctl_exit(hbas_to_enumerate);
11646 
11647 	pci_unregister_driver(&mpt3sas_driver);
11648 
11649 	scsih_exit();
11650 }
11651 
11652 module_init(_mpt3sas_init);
11653 module_exit(_mpt3sas_exit);
11654