• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/idr.h>
12 
13 /* global driver scop variables */
14 LIST_HEAD(mrioc_list);
15 DEFINE_SPINLOCK(mrioc_list_lock);
16 static DEFINE_IDA(mrioc_ida);
17 static int warn_non_secure_ctlr;
18 atomic64_t event_counter;
19 
20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
23 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
24 
25 /* Module parameters*/
26 int prot_mask = -1;
27 module_param(prot_mask, int, 0);
28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
29 
30 static int prot_guard_mask = 3;
31 module_param(prot_guard_mask, int, 0);
32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
33 static int logging_level;
34 module_param(logging_level, int, 0);
35 MODULE_PARM_DESC(logging_level,
36 	" bits for enabling additional logging info (default=0)");
37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
38 module_param(max_sgl_entries, int, 0444);
39 MODULE_PARM_DESC(max_sgl_entries,
40 	"Preferred max number of SG entries to be used for a single I/O\n"
41 	"The actual value will be determined by the driver\n"
42 	"(Minimum=256, Maximum=2048, default=256)");
43 
44 /* Forward declarations*/
45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
46 	struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
47 
48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION	(0xFFFF)
49 
50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH	(0xFFFE)
51 
52 /*
53  * SAS Log info code for a NCQ collateral abort after an NCQ error:
54  * IOC_LOGINFO_PREFIX_PL | PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR
55  * See: drivers/message/fusion/lsi/mpi_log_sas.h
56  */
57 #define IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR	0x31080000
58 
59 /**
60  * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
61  * @mrioc: Adapter instance reference
62  * @scmd: SCSI command reference
63  *
64  * Calculate the host tag based on block tag for a given scmd.
65  *
66  * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
67  */
mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd)68 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
69 	struct scsi_cmnd *scmd)
70 {
71 	struct scmd_priv *priv = NULL;
72 	u32 unique_tag;
73 	u16 host_tag, hw_queue;
74 
75 	unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
76 
77 	hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
78 	if (hw_queue >= mrioc->num_op_reply_q)
79 		return MPI3MR_HOSTTAG_INVALID;
80 	host_tag = blk_mq_unique_tag_to_tag(unique_tag);
81 
82 	if (WARN_ON(host_tag >= mrioc->max_host_ios))
83 		return MPI3MR_HOSTTAG_INVALID;
84 
85 	priv = scsi_cmd_priv(scmd);
86 	/*host_tag 0 is invalid hence incrementing by 1*/
87 	priv->host_tag = host_tag + 1;
88 	priv->scmd = scmd;
89 	priv->in_lld_scope = 1;
90 	priv->req_q_idx = hw_queue;
91 	priv->meta_chain_idx = -1;
92 	priv->chain_idx = -1;
93 	priv->meta_sg_valid = 0;
94 	return priv->host_tag;
95 }
96 
97 /**
98  * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
99  * @mrioc: Adapter instance reference
100  * @host_tag: Host tag
101  * @qidx: Operational queue index
102  *
103  * Identify the block tag from the host tag and queue index and
104  * retrieve associated scsi command using scsi_host_find_tag().
105  *
106  * Return: SCSI command reference or NULL.
107  */
mpi3mr_scmd_from_host_tag(struct mpi3mr_ioc * mrioc,u16 host_tag,u16 qidx)108 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
109 	struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
110 {
111 	struct scsi_cmnd *scmd = NULL;
112 	struct scmd_priv *priv = NULL;
113 	u32 unique_tag = host_tag - 1;
114 
115 	if (WARN_ON(host_tag > mrioc->max_host_ios))
116 		goto out;
117 
118 	unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
119 
120 	scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
121 	if (scmd) {
122 		priv = scsi_cmd_priv(scmd);
123 		if (!priv->in_lld_scope)
124 			scmd = NULL;
125 	}
126 out:
127 	return scmd;
128 }
129 
130 /**
131  * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
132  * @mrioc: Adapter instance reference
133  * @scmd: SCSI command reference
134  *
135  * Invalidate the SCSI command private data to mark the command
136  * is not in LLD scope anymore.
137  *
138  * Return: Nothing.
139  */
mpi3mr_clear_scmd_priv(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd)140 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
141 	struct scsi_cmnd *scmd)
142 {
143 	struct scmd_priv *priv = NULL;
144 
145 	priv = scsi_cmd_priv(scmd);
146 
147 	if (WARN_ON(priv->in_lld_scope == 0))
148 		return;
149 	priv->host_tag = MPI3MR_HOSTTAG_INVALID;
150 	priv->req_q_idx = 0xFFFF;
151 	priv->scmd = NULL;
152 	priv->in_lld_scope = 0;
153 	priv->meta_sg_valid = 0;
154 	if (priv->chain_idx >= 0) {
155 		clear_bit(priv->chain_idx, mrioc->chain_bitmap);
156 		priv->chain_idx = -1;
157 	}
158 	if (priv->meta_chain_idx >= 0) {
159 		clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
160 		priv->meta_chain_idx = -1;
161 	}
162 }
163 
164 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
165 	struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
166 static void mpi3mr_fwevt_worker(struct work_struct *work);
167 
168 /**
169  * mpi3mr_fwevt_free - firmware event memory dealloctor
170  * @r: k reference pointer of the firmware event
171  *
172  * Free firmware event memory when no reference.
173  */
mpi3mr_fwevt_free(struct kref * r)174 static void mpi3mr_fwevt_free(struct kref *r)
175 {
176 	kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
177 }
178 
179 /**
180  * mpi3mr_fwevt_get - k reference incrementor
181  * @fwevt: Firmware event reference
182  *
183  * Increment firmware event reference count.
184  */
mpi3mr_fwevt_get(struct mpi3mr_fwevt * fwevt)185 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
186 {
187 	kref_get(&fwevt->ref_count);
188 }
189 
190 /**
191  * mpi3mr_fwevt_put - k reference decrementor
192  * @fwevt: Firmware event reference
193  *
194  * decrement firmware event reference count.
195  */
mpi3mr_fwevt_put(struct mpi3mr_fwevt * fwevt)196 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
197 {
198 	kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
199 }
200 
201 /**
202  * mpi3mr_alloc_fwevt - Allocate firmware event
203  * @len: length of firmware event data to allocate
204  *
205  * Allocate firmware event with required length and initialize
206  * the reference counter.
207  *
208  * Return: firmware event reference.
209  */
mpi3mr_alloc_fwevt(int len)210 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
211 {
212 	struct mpi3mr_fwevt *fwevt;
213 
214 	fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
215 	if (!fwevt)
216 		return NULL;
217 
218 	kref_init(&fwevt->ref_count);
219 	return fwevt;
220 }
221 
222 /**
223  * mpi3mr_fwevt_add_to_list - Add firmware event to the list
224  * @mrioc: Adapter instance reference
225  * @fwevt: Firmware event reference
226  *
227  * Add the given firmware event to the firmware event list.
228  *
229  * Return: Nothing.
230  */
mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)231 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
232 	struct mpi3mr_fwevt *fwevt)
233 {
234 	unsigned long flags;
235 
236 	if (!mrioc->fwevt_worker_thread)
237 		return;
238 
239 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
240 	/* get fwevt reference count while adding it to fwevt_list */
241 	mpi3mr_fwevt_get(fwevt);
242 	INIT_LIST_HEAD(&fwevt->list);
243 	list_add_tail(&fwevt->list, &mrioc->fwevt_list);
244 	INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
245 	/* get fwevt reference count while enqueueing it to worker queue */
246 	mpi3mr_fwevt_get(fwevt);
247 	queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
248 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
249 }
250 
251 /**
252  * mpi3mr_hdb_trigger_data_event - Add hdb trigger data event to
253  * the list
254  * @mrioc: Adapter instance reference
255  * @event_data: Event data
256  *
257  * Add the given hdb trigger data event to the firmware event
258  * list.
259  *
260  * Return: Nothing.
261  */
mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc * mrioc,struct trigger_event_data * event_data)262 void mpi3mr_hdb_trigger_data_event(struct mpi3mr_ioc *mrioc,
263 	struct trigger_event_data *event_data)
264 {
265 	struct mpi3mr_fwevt *fwevt;
266 	u16 sz = sizeof(*event_data);
267 
268 	fwevt = mpi3mr_alloc_fwevt(sz);
269 	if (!fwevt) {
270 		ioc_warn(mrioc, "failed to queue hdb trigger data event\n");
271 		return;
272 	}
273 
274 	fwevt->mrioc = mrioc;
275 	fwevt->event_id = MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER;
276 	fwevt->send_ack = 0;
277 	fwevt->process_evt = 1;
278 	fwevt->evt_ctx = 0;
279 	fwevt->event_data_size = sz;
280 	memcpy(fwevt->event_data, event_data, sz);
281 
282 	mpi3mr_fwevt_add_to_list(mrioc, fwevt);
283 }
284 
285 /**
286  * mpi3mr_fwevt_del_from_list - Delete firmware event from list
287  * @mrioc: Adapter instance reference
288  * @fwevt: Firmware event reference
289  *
290  * Delete the given firmware event from the firmware event list.
291  *
292  * Return: Nothing.
293  */
mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)294 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
295 	struct mpi3mr_fwevt *fwevt)
296 {
297 	unsigned long flags;
298 
299 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
300 	if (!list_empty(&fwevt->list)) {
301 		list_del_init(&fwevt->list);
302 		/*
303 		 * Put fwevt reference count after
304 		 * removing it from fwevt_list
305 		 */
306 		mpi3mr_fwevt_put(fwevt);
307 	}
308 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
309 }
310 
311 /**
312  * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
313  * @mrioc: Adapter instance reference
314  *
315  * Dequeue a firmware event from the firmware event list.
316  *
317  * Return: firmware event.
318  */
mpi3mr_dequeue_fwevt(struct mpi3mr_ioc * mrioc)319 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
320 	struct mpi3mr_ioc *mrioc)
321 {
322 	unsigned long flags;
323 	struct mpi3mr_fwevt *fwevt = NULL;
324 
325 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
326 	if (!list_empty(&mrioc->fwevt_list)) {
327 		fwevt = list_first_entry(&mrioc->fwevt_list,
328 		    struct mpi3mr_fwevt, list);
329 		list_del_init(&fwevt->list);
330 		/*
331 		 * Put fwevt reference count after
332 		 * removing it from fwevt_list
333 		 */
334 		mpi3mr_fwevt_put(fwevt);
335 	}
336 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
337 
338 	return fwevt;
339 }
340 
341 /**
342  * mpi3mr_cancel_work - cancel firmware event
343  * @fwevt: fwevt object which needs to be canceled
344  *
345  * Return: Nothing.
346  */
mpi3mr_cancel_work(struct mpi3mr_fwevt * fwevt)347 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
348 {
349 	/*
350 	 * Wait on the fwevt to complete. If this returns 1, then
351 	 * the event was never executed.
352 	 *
353 	 * If it did execute, we wait for it to finish, and the put will
354 	 * happen from mpi3mr_process_fwevt()
355 	 */
356 	if (cancel_work_sync(&fwevt->work)) {
357 		/*
358 		 * Put fwevt reference count after
359 		 * dequeuing it from worker queue
360 		 */
361 		mpi3mr_fwevt_put(fwevt);
362 		/*
363 		 * Put fwevt reference count to neutralize
364 		 * kref_init increment
365 		 */
366 		mpi3mr_fwevt_put(fwevt);
367 	}
368 }
369 
370 /**
371  * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
372  * @mrioc: Adapter instance reference
373  *
374  * Flush all pending firmware events from the firmware event
375  * list.
376  *
377  * Return: Nothing.
378  */
mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc * mrioc)379 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
380 {
381 	struct mpi3mr_fwevt *fwevt = NULL;
382 
383 	if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
384 	    !mrioc->fwevt_worker_thread)
385 		return;
386 
387 	while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
388 		mpi3mr_cancel_work(fwevt);
389 
390 	if (mrioc->current_event) {
391 		fwevt = mrioc->current_event;
392 		/*
393 		 * Don't call cancel_work_sync() API for the
394 		 * fwevt work if the controller reset is
395 		 * get called as part of processing the
396 		 * same fwevt work (or) when worker thread is
397 		 * waiting for device add/remove APIs to complete.
398 		 * Otherwise we will see deadlock.
399 		 */
400 		if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
401 			fwevt->discard = 1;
402 			return;
403 		}
404 
405 		mpi3mr_cancel_work(fwevt);
406 	}
407 }
408 
409 /**
410  * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
411  * @mrioc: Adapter instance reference
412  * @tg: Throttle group information pointer
413  *
414  * Accessor to queue on synthetically generated driver event to
415  * the event worker thread, the driver event will be used to
416  * reduce the QD of all VDs in the TG from the worker thread.
417  *
418  * Return: None.
419  */
mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc * mrioc,struct mpi3mr_throttle_group_info * tg)420 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
421 	struct mpi3mr_throttle_group_info *tg)
422 {
423 	struct mpi3mr_fwevt *fwevt;
424 	u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
425 
426 	/*
427 	 * If the QD reduction event is already queued due to throttle and if
428 	 * the QD is not restored through device info change event
429 	 * then dont queue further reduction events
430 	 */
431 	if (tg->fw_qd != tg->modified_qd)
432 		return;
433 
434 	fwevt = mpi3mr_alloc_fwevt(sz);
435 	if (!fwevt) {
436 		ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
437 		return;
438 	}
439 	*(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
440 	fwevt->mrioc = mrioc;
441 	fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
442 	fwevt->send_ack = 0;
443 	fwevt->process_evt = 1;
444 	fwevt->evt_ctx = 0;
445 	fwevt->event_data_size = sz;
446 	tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
447 
448 	dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
449 	    tg->id);
450 	mpi3mr_fwevt_add_to_list(mrioc, fwevt);
451 }
452 
453 /**
454  * mpi3mr_invalidate_devhandles -Invalidate device handles
455  * @mrioc: Adapter instance reference
456  *
457  * Invalidate the device handles in the target device structures
458  * . Called post reset prior to reinitializing the controller.
459  *
460  * Return: Nothing.
461  */
mpi3mr_invalidate_devhandles(struct mpi3mr_ioc * mrioc)462 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
463 {
464 	struct mpi3mr_tgt_dev *tgtdev;
465 	struct mpi3mr_stgt_priv_data *tgt_priv;
466 
467 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
468 		tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
469 		if (tgtdev->starget && tgtdev->starget->hostdata) {
470 			tgt_priv = tgtdev->starget->hostdata;
471 			tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
472 			tgt_priv->io_throttle_enabled = 0;
473 			tgt_priv->io_divert = 0;
474 			tgt_priv->throttle_group = NULL;
475 			tgt_priv->wslen = 0;
476 			if (tgtdev->host_exposed)
477 				atomic_set(&tgt_priv->block_io, 1);
478 		}
479 	}
480 }
481 
482 /**
483  * mpi3mr_print_scmd - print individual SCSI command
484  * @rq: Block request
485  * @data: Adapter instance reference
486  *
487  * Print the SCSI command details if it is in LLD scope.
488  *
489  * Return: true always.
490  */
mpi3mr_print_scmd(struct request * rq,void * data)491 static bool mpi3mr_print_scmd(struct request *rq, void *data)
492 {
493 	struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
494 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
495 	struct scmd_priv *priv = NULL;
496 
497 	if (scmd) {
498 		priv = scsi_cmd_priv(scmd);
499 		if (!priv->in_lld_scope)
500 			goto out;
501 
502 		ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
503 		    __func__, priv->host_tag, priv->req_q_idx + 1);
504 		scsi_print_command(scmd);
505 	}
506 
507 out:
508 	return(true);
509 }
510 
511 /**
512  * mpi3mr_flush_scmd - Flush individual SCSI command
513  * @rq: Block request
514  * @data: Adapter instance reference
515  *
516  * Return the SCSI command to the upper layers if it is in LLD
517  * scope.
518  *
519  * Return: true always.
520  */
521 
mpi3mr_flush_scmd(struct request * rq,void * data)522 static bool mpi3mr_flush_scmd(struct request *rq, void *data)
523 {
524 	struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
525 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
526 	struct scmd_priv *priv = NULL;
527 
528 	if (scmd) {
529 		priv = scsi_cmd_priv(scmd);
530 		if (!priv->in_lld_scope)
531 			goto out;
532 
533 		if (priv->meta_sg_valid)
534 			dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
535 			    scsi_prot_sg_count(scmd), scmd->sc_data_direction);
536 		mpi3mr_clear_scmd_priv(mrioc, scmd);
537 		scsi_dma_unmap(scmd);
538 		scmd->result = DID_RESET << 16;
539 		scsi_print_command(scmd);
540 		scsi_done(scmd);
541 		mrioc->flush_io_count++;
542 	}
543 
544 out:
545 	return(true);
546 }
547 
548 /**
549  * mpi3mr_count_dev_pending - Count commands pending for a lun
550  * @rq: Block request
551  * @data: SCSI device reference
552  *
553  * This is an iterator function called for each SCSI command in
554  * a host and if the command is pending in the LLD for the
555  * specific device(lun) then device specific pending I/O counter
556  * is updated in the device structure.
557  *
558  * Return: true always.
559  */
560 
mpi3mr_count_dev_pending(struct request * rq,void * data)561 static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
562 {
563 	struct scsi_device *sdev = (struct scsi_device *)data;
564 	struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
565 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
566 	struct scmd_priv *priv;
567 
568 	if (scmd) {
569 		priv = scsi_cmd_priv(scmd);
570 		if (!priv->in_lld_scope)
571 			goto out;
572 		if (scmd->device == sdev)
573 			sdev_priv_data->pend_count++;
574 	}
575 
576 out:
577 	return true;
578 }
579 
580 /**
581  * mpi3mr_count_tgt_pending - Count commands pending for target
582  * @rq: Block request
583  * @data: SCSI target reference
584  *
585  * This is an iterator function called for each SCSI command in
586  * a host and if the command is pending in the LLD for the
587  * specific target then target specific pending I/O counter is
588  * updated in the target structure.
589  *
590  * Return: true always.
591  */
592 
mpi3mr_count_tgt_pending(struct request * rq,void * data)593 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
594 {
595 	struct scsi_target *starget = (struct scsi_target *)data;
596 	struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
597 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
598 	struct scmd_priv *priv;
599 
600 	if (scmd) {
601 		priv = scsi_cmd_priv(scmd);
602 		if (!priv->in_lld_scope)
603 			goto out;
604 		if (scmd->device && (scsi_target(scmd->device) == starget))
605 			stgt_priv_data->pend_count++;
606 	}
607 
608 out:
609 	return true;
610 }
611 
612 /**
613  * mpi3mr_flush_host_io -  Flush host I/Os
614  * @mrioc: Adapter instance reference
615  *
616  * Flush all of the pending I/Os by calling
617  * blk_mq_tagset_busy_iter() for each possible tag. This is
618  * executed post controller reset
619  *
620  * Return: Nothing.
621  */
mpi3mr_flush_host_io(struct mpi3mr_ioc * mrioc)622 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
623 {
624 	struct Scsi_Host *shost = mrioc->shost;
625 
626 	mrioc->flush_io_count = 0;
627 	ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
628 	blk_mq_tagset_busy_iter(&shost->tag_set,
629 	    mpi3mr_flush_scmd, (void *)mrioc);
630 	ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
631 	    mrioc->flush_io_count);
632 }
633 
634 /**
635  * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
636  * @mrioc: Adapter instance reference
637  *
638  * This function waits for currently running IO poll threads to
639  * exit and then flushes all host I/Os and any internal pending
640  * cmds. This is executed after controller is marked as
641  * unrecoverable.
642  *
643  * Return: Nothing.
644  */
mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc * mrioc)645 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
646 {
647 	struct Scsi_Host *shost = mrioc->shost;
648 	int i;
649 
650 	if (!mrioc->unrecoverable)
651 		return;
652 
653 	if (mrioc->op_reply_qinfo) {
654 		for (i = 0; i < mrioc->num_queues; i++) {
655 			while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
656 				udelay(500);
657 			atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
658 		}
659 	}
660 	mrioc->flush_io_count = 0;
661 	blk_mq_tagset_busy_iter(&shost->tag_set,
662 	    mpi3mr_flush_scmd, (void *)mrioc);
663 	mpi3mr_flush_delayed_cmd_lists(mrioc);
664 	mpi3mr_flush_drv_cmds(mrioc);
665 }
666 
667 /**
668  * mpi3mr_alloc_tgtdev - target device allocator
669  *
670  * Allocate target device instance and initialize the reference
671  * count
672  *
673  * Return: target device instance.
674  */
mpi3mr_alloc_tgtdev(void)675 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
676 {
677 	struct mpi3mr_tgt_dev *tgtdev;
678 
679 	tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
680 	if (!tgtdev)
681 		return NULL;
682 	kref_init(&tgtdev->ref_count);
683 	return tgtdev;
684 }
685 
686 /**
687  * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
688  * @mrioc: Adapter instance reference
689  * @tgtdev: Target device
690  *
691  * Add the target device to the target device list
692  *
693  * Return: Nothing.
694  */
mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev)695 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
696 	struct mpi3mr_tgt_dev *tgtdev)
697 {
698 	unsigned long flags;
699 
700 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
701 	mpi3mr_tgtdev_get(tgtdev);
702 	INIT_LIST_HEAD(&tgtdev->list);
703 	list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
704 	tgtdev->state = MPI3MR_DEV_CREATED;
705 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
706 }
707 
708 /**
709  * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
710  * @mrioc: Adapter instance reference
711  * @tgtdev: Target device
712  * @must_delete: Must delete the target device from the list irrespective
713  * of the device state.
714  *
715  * Remove the target device from the target device list
716  *
717  * Return: Nothing.
718  */
mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev,bool must_delete)719 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
720 	struct mpi3mr_tgt_dev *tgtdev, bool must_delete)
721 {
722 	unsigned long flags;
723 
724 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
725 	if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) {
726 		if (!list_empty(&tgtdev->list)) {
727 			list_del_init(&tgtdev->list);
728 			tgtdev->state = MPI3MR_DEV_DELETED;
729 			mpi3mr_tgtdev_put(tgtdev);
730 		}
731 	}
732 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
733 }
734 
735 /**
736  * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
737  * @mrioc: Adapter instance reference
738  * @handle: Device handle
739  *
740  * Accessor to retrieve target device from the device handle.
741  * Non Lock version
742  *
743  * Return: Target device reference.
744  */
__mpi3mr_get_tgtdev_by_handle(struct mpi3mr_ioc * mrioc,u16 handle)745 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_by_handle(
746 	struct mpi3mr_ioc *mrioc, u16 handle)
747 {
748 	struct mpi3mr_tgt_dev *tgtdev;
749 
750 	assert_spin_locked(&mrioc->tgtdev_lock);
751 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
752 		if (tgtdev->dev_handle == handle)
753 			goto found_tgtdev;
754 	return NULL;
755 
756 found_tgtdev:
757 	mpi3mr_tgtdev_get(tgtdev);
758 	return tgtdev;
759 }
760 
761 /**
762  * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
763  * @mrioc: Adapter instance reference
764  * @handle: Device handle
765  *
766  * Accessor to retrieve target device from the device handle.
767  * Lock version
768  *
769  * Return: Target device reference.
770  */
mpi3mr_get_tgtdev_by_handle(struct mpi3mr_ioc * mrioc,u16 handle)771 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
772 	struct mpi3mr_ioc *mrioc, u16 handle)
773 {
774 	struct mpi3mr_tgt_dev *tgtdev;
775 	unsigned long flags;
776 
777 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
778 	tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
779 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
780 	return tgtdev;
781 }
782 
783 /**
784  * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
785  * @mrioc: Adapter instance reference
786  * @persist_id: Persistent ID
787  *
788  * Accessor to retrieve target device from the Persistent ID.
789  * Non Lock version
790  *
791  * Return: Target device reference.
792  */
__mpi3mr_get_tgtdev_by_perst_id(struct mpi3mr_ioc * mrioc,u16 persist_id)793 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_by_perst_id(
794 	struct mpi3mr_ioc *mrioc, u16 persist_id)
795 {
796 	struct mpi3mr_tgt_dev *tgtdev;
797 
798 	assert_spin_locked(&mrioc->tgtdev_lock);
799 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
800 		if (tgtdev->perst_id == persist_id)
801 			goto found_tgtdev;
802 	return NULL;
803 
804 found_tgtdev:
805 	mpi3mr_tgtdev_get(tgtdev);
806 	return tgtdev;
807 }
808 
809 /**
810  * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
811  * @mrioc: Adapter instance reference
812  * @persist_id: Persistent ID
813  *
814  * Accessor to retrieve target device from the Persistent ID.
815  * Lock version
816  *
817  * Return: Target device reference.
818  */
mpi3mr_get_tgtdev_by_perst_id(struct mpi3mr_ioc * mrioc,u16 persist_id)819 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
820 	struct mpi3mr_ioc *mrioc, u16 persist_id)
821 {
822 	struct mpi3mr_tgt_dev *tgtdev;
823 	unsigned long flags;
824 
825 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
826 	tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
827 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
828 	return tgtdev;
829 }
830 
831 /**
832  * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
833  * @mrioc: Adapter instance reference
834  * @tgt_priv: Target private data
835  *
836  * Accessor to return target device from the target private
837  * data. Non Lock version
838  *
839  * Return: Target device reference.
840  */
__mpi3mr_get_tgtdev_from_tgtpriv(struct mpi3mr_ioc * mrioc,struct mpi3mr_stgt_priv_data * tgt_priv)841 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_from_tgtpriv(
842 	struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
843 {
844 	struct mpi3mr_tgt_dev *tgtdev;
845 
846 	assert_spin_locked(&mrioc->tgtdev_lock);
847 	tgtdev = tgt_priv->tgt_dev;
848 	if (tgtdev)
849 		mpi3mr_tgtdev_get(tgtdev);
850 	return tgtdev;
851 }
852 
853 /**
854  * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
855  * @mrioc: Adapter instance reference
856  * @tg: Throttle group information pointer
857  * @divert_value: 1 or 0
858  *
859  * Accessor to set io_divert flag for each device associated
860  * with the given throttle group with the given value.
861  *
862  * Return: None.
863  */
mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc * mrioc,struct mpi3mr_throttle_group_info * tg,u8 divert_value)864 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
865 	struct mpi3mr_throttle_group_info *tg, u8 divert_value)
866 {
867 	unsigned long flags;
868 	struct mpi3mr_tgt_dev *tgtdev;
869 	struct mpi3mr_stgt_priv_data *tgt_priv;
870 
871 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
872 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
873 		if (tgtdev->starget && tgtdev->starget->hostdata) {
874 			tgt_priv = tgtdev->starget->hostdata;
875 			if (tgt_priv->throttle_group == tg)
876 				tgt_priv->io_divert = divert_value;
877 		}
878 	}
879 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
880 }
881 
882 /**
883  * mpi3mr_print_device_event_notice - print notice related to post processing of
884  *					device event after controller reset.
885  *
886  * @mrioc: Adapter instance reference
887  * @device_add: true for device add event and false for device removal event
888  *
889  * Return: None.
890  */
mpi3mr_print_device_event_notice(struct mpi3mr_ioc * mrioc,bool device_add)891 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
892 	bool device_add)
893 {
894 	ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
895 	    (device_add ? "addition" : "removal"));
896 	ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
897 	ioc_notice(mrioc, "are matched with attached devices for correctness\n");
898 }
899 
900 /**
901  * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
902  * @mrioc: Adapter instance reference
903  * @tgtdev: Target device structure
904  *
905  * Checks whether the device is exposed to upper layers and if it
906  * is then remove the device from upper layers by calling
907  * scsi_remove_target().
908  *
909  * Return: 0 on success, non zero on failure.
910  */
mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev)911 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
912 	struct mpi3mr_tgt_dev *tgtdev)
913 {
914 	struct mpi3mr_stgt_priv_data *tgt_priv;
915 
916 	ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
917 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
918 	if (tgtdev->starget && tgtdev->starget->hostdata) {
919 		tgt_priv = tgtdev->starget->hostdata;
920 		atomic_set(&tgt_priv->block_io, 0);
921 		tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
922 	}
923 
924 	if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
925 	    MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
926 		if (tgtdev->starget) {
927 			if (mrioc->current_event)
928 				mrioc->current_event->pending_at_sml = 1;
929 			scsi_remove_target(&tgtdev->starget->dev);
930 			tgtdev->host_exposed = 0;
931 			if (mrioc->current_event) {
932 				mrioc->current_event->pending_at_sml = 0;
933 				if (mrioc->current_event->discard) {
934 					mpi3mr_print_device_event_notice(mrioc,
935 					    false);
936 					return;
937 				}
938 			}
939 		}
940 	} else
941 		mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
942 	mpi3mr_global_trigger(mrioc,
943 	    MPI3_DRIVER2_GLOBALTRIGGER_DEVICE_REMOVAL_ENABLED);
944 
945 	ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
946 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
947 }
948 
949 /**
950  * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
951  * @mrioc: Adapter instance reference
952  * @perst_id: Persistent ID of the device
953  *
954  * Checks whether the device can be exposed to upper layers and
955  * if it is not then expose the device to upper layers by
956  * calling scsi_scan_target().
957  *
958  * Return: 0 on success, non zero on failure.
959  */
mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc * mrioc,u16 perst_id)960 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
961 	u16 perst_id)
962 {
963 	int retval = 0;
964 	struct mpi3mr_tgt_dev *tgtdev;
965 
966 	if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
967 		return -1;
968 
969 	tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
970 	if (!tgtdev) {
971 		retval = -1;
972 		goto out;
973 	}
974 	if (tgtdev->is_hidden || tgtdev->host_exposed) {
975 		retval = -1;
976 		goto out;
977 	}
978 	if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
979 	    MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
980 		tgtdev->host_exposed = 1;
981 		if (mrioc->current_event)
982 			mrioc->current_event->pending_at_sml = 1;
983 		scsi_scan_target(&mrioc->shost->shost_gendev,
984 		    mrioc->scsi_device_channel, tgtdev->perst_id,
985 		    SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
986 		if (!tgtdev->starget)
987 			tgtdev->host_exposed = 0;
988 		if (mrioc->current_event) {
989 			mrioc->current_event->pending_at_sml = 0;
990 			if (mrioc->current_event->discard) {
991 				mpi3mr_print_device_event_notice(mrioc, true);
992 				goto out;
993 			}
994 		}
995 	} else
996 		mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
997 out:
998 	if (tgtdev)
999 		mpi3mr_tgtdev_put(tgtdev);
1000 
1001 	return retval;
1002 }
1003 
1004 /**
1005  * mpi3mr_change_queue_depth- Change QD callback handler
1006  * @sdev: SCSI device reference
1007  * @q_depth: Queue depth
1008  *
1009  * Validate and limit QD and call scsi_change_queue_depth.
1010  *
1011  * Return: return value of scsi_change_queue_depth
1012  */
mpi3mr_change_queue_depth(struct scsi_device * sdev,int q_depth)1013 static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
1014 	int q_depth)
1015 {
1016 	struct scsi_target *starget = scsi_target(sdev);
1017 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1018 	int retval = 0;
1019 
1020 	if (!sdev->tagged_supported)
1021 		q_depth = 1;
1022 	if (q_depth > shost->can_queue)
1023 		q_depth = shost->can_queue;
1024 	else if (!q_depth)
1025 		q_depth = MPI3MR_DEFAULT_SDEV_QD;
1026 	retval = scsi_change_queue_depth(sdev, q_depth);
1027 	sdev->max_queue_depth = sdev->queue_depth;
1028 
1029 	return retval;
1030 }
1031 
mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev * tgt_dev,struct queue_limits * lim)1032 static void mpi3mr_configure_nvme_dev(struct mpi3mr_tgt_dev *tgt_dev,
1033 		struct queue_limits *lim)
1034 {
1035 	u8 pgsz = tgt_dev->dev_spec.pcie_inf.pgsz ? : MPI3MR_DEFAULT_PGSZEXP;
1036 
1037 	lim->max_hw_sectors = tgt_dev->dev_spec.pcie_inf.mdts / 512;
1038 	lim->virt_boundary_mask = (1 << pgsz) - 1;
1039 }
1040 
mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev * tgt_dev,struct queue_limits * lim)1041 static void mpi3mr_configure_tgt_dev(struct mpi3mr_tgt_dev *tgt_dev,
1042 		struct queue_limits *lim)
1043 {
1044 	if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE &&
1045 	    (tgt_dev->dev_spec.pcie_inf.dev_info &
1046 	     MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1047 			MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
1048 		mpi3mr_configure_nvme_dev(tgt_dev, lim);
1049 }
1050 
1051 /**
1052  * mpi3mr_update_sdev - Update SCSI device information
1053  * @sdev: SCSI device reference
1054  * @data: target device reference
1055  *
1056  * This is an iterator function called for each SCSI device in a
1057  * target to update the target specific information into each
1058  * SCSI device.
1059  *
1060  * Return: Nothing.
1061  */
1062 static void
mpi3mr_update_sdev(struct scsi_device * sdev,void * data)1063 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
1064 {
1065 	struct mpi3mr_tgt_dev *tgtdev;
1066 	struct queue_limits lim;
1067 
1068 	tgtdev = (struct mpi3mr_tgt_dev *)data;
1069 	if (!tgtdev)
1070 		return;
1071 
1072 	mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
1073 
1074 	lim = queue_limits_start_update(sdev->request_queue);
1075 	mpi3mr_configure_tgt_dev(tgtdev, &lim);
1076 	WARN_ON_ONCE(queue_limits_commit_update(sdev->request_queue, &lim));
1077 }
1078 
1079 /**
1080  * mpi3mr_refresh_tgtdevs - Refresh target device exposure
1081  * @mrioc: Adapter instance reference
1082  *
1083  * This is executed post controller reset to identify any
1084  * missing devices during reset and remove from the upper layers
1085  * or expose any newly detected device to the upper layers.
1086  *
1087  * Return: Nothing.
1088  */
mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc * mrioc)1089 static void mpi3mr_refresh_tgtdevs(struct mpi3mr_ioc *mrioc)
1090 {
1091 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
1092 	struct mpi3mr_stgt_priv_data *tgt_priv;
1093 
1094 	dprint_reset(mrioc, "refresh target devices: check for removals\n");
1095 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
1096 	    list) {
1097 		if (((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) ||
1098 		     tgtdev->is_hidden) &&
1099 		     tgtdev->host_exposed && tgtdev->starget &&
1100 		     tgtdev->starget->hostdata) {
1101 			tgt_priv = tgtdev->starget->hostdata;
1102 			tgt_priv->dev_removed = 1;
1103 			atomic_set(&tgt_priv->block_io, 0);
1104 		}
1105 	}
1106 
1107 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
1108 	    list) {
1109 		if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
1110 			dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
1111 			    tgtdev->perst_id);
1112 			if (tgtdev->host_exposed)
1113 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1114 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
1115 			mpi3mr_tgtdev_put(tgtdev);
1116 		} else if (tgtdev->is_hidden & tgtdev->host_exposed) {
1117 			dprint_reset(mrioc, "hiding target device with perst_id(%d)\n",
1118 				     tgtdev->perst_id);
1119 			mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1120 		}
1121 	}
1122 
1123 	tgtdev = NULL;
1124 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1125 		if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
1126 		    !tgtdev->is_hidden) {
1127 			if (!tgtdev->host_exposed)
1128 				mpi3mr_report_tgtdev_to_host(mrioc,
1129 							     tgtdev->perst_id);
1130 			else if (tgtdev->starget)
1131 				starget_for_each_device(tgtdev->starget,
1132 							(void *)tgtdev, mpi3mr_update_sdev);
1133 	}
1134 	}
1135 }
1136 
1137 /**
1138  * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
1139  * @mrioc: Adapter instance reference
1140  * @tgtdev: Target device internal structure
1141  * @dev_pg0: New device page0
1142  * @is_added: Flag to indicate the device is just added
1143  *
1144  * Update the information from the device page0 into the driver
1145  * cached target device structure.
1146  *
1147  * Return: Nothing.
1148  */
mpi3mr_update_tgtdev(struct mpi3mr_ioc * mrioc,struct mpi3mr_tgt_dev * tgtdev,struct mpi3_device_page0 * dev_pg0,bool is_added)1149 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
1150 	struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
1151 	bool is_added)
1152 {
1153 	u16 flags = 0;
1154 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1155 	struct mpi3mr_enclosure_node *enclosure_dev = NULL;
1156 	u8 prot_mask = 0;
1157 
1158 	tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
1159 	tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1160 	tgtdev->dev_type = dev_pg0->device_form;
1161 	tgtdev->io_unit_port = dev_pg0->io_unit_port;
1162 	tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
1163 	tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
1164 	tgtdev->slot = le16_to_cpu(dev_pg0->slot);
1165 	tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
1166 	tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
1167 	tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
1168 
1169 	if (tgtdev->encl_handle)
1170 		enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
1171 		    tgtdev->encl_handle);
1172 	if (enclosure_dev)
1173 		tgtdev->enclosure_logical_id = le64_to_cpu(
1174 		    enclosure_dev->pg0.enclosure_logical_id);
1175 
1176 	flags = tgtdev->devpg0_flag;
1177 
1178 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
1179 
1180 	if (is_added == true)
1181 		tgtdev->io_throttle_enabled =
1182 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
1183 
1184 	switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
1185 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
1186 		tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS;
1187 		break;
1188 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
1189 		tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS;
1190 		break;
1191 	case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
1192 	default:
1193 		tgtdev->wslen = 0;
1194 		break;
1195 	}
1196 
1197 	if (tgtdev->starget && tgtdev->starget->hostdata) {
1198 		scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1199 		    tgtdev->starget->hostdata;
1200 		scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
1201 		scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
1202 		scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
1203 		scsi_tgt_priv_data->io_throttle_enabled =
1204 		    tgtdev->io_throttle_enabled;
1205 		if (is_added == true)
1206 			atomic_set(&scsi_tgt_priv_data->block_io, 0);
1207 		scsi_tgt_priv_data->wslen = tgtdev->wslen;
1208 	}
1209 
1210 	switch (dev_pg0->access_status) {
1211 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
1212 	case MPI3_DEVICE0_ASTATUS_PREPARE:
1213 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
1214 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
1215 		break;
1216 	default:
1217 		tgtdev->is_hidden = 1;
1218 		break;
1219 	}
1220 
1221 	switch (tgtdev->dev_type) {
1222 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
1223 	{
1224 		struct mpi3_device0_sas_sata_format *sasinf =
1225 		    &dev_pg0->device_specific.sas_sata_format;
1226 		u16 dev_info = le16_to_cpu(sasinf->device_info);
1227 
1228 		tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
1229 		tgtdev->dev_spec.sas_sata_inf.sas_address =
1230 		    le64_to_cpu(sasinf->sas_address);
1231 		tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
1232 		tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
1233 		    sasinf->attached_phy_identifier;
1234 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
1235 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
1236 			tgtdev->is_hidden = 1;
1237 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
1238 		    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
1239 			tgtdev->is_hidden = 1;
1240 
1241 		if (((tgtdev->devpg0_flag &
1242 		    MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
1243 		    && (tgtdev->devpg0_flag &
1244 		    MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
1245 		    (tgtdev->parent_handle == 0xFFFF))
1246 			tgtdev->non_stl = 1;
1247 		if (tgtdev->dev_spec.sas_sata_inf.hba_port)
1248 			tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
1249 			    dev_pg0->io_unit_port;
1250 		break;
1251 	}
1252 	case MPI3_DEVICE_DEVFORM_PCIE:
1253 	{
1254 		struct mpi3_device0_pcie_format *pcieinf =
1255 		    &dev_pg0->device_specific.pcie_format;
1256 		u16 dev_info = le16_to_cpu(pcieinf->device_info);
1257 
1258 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
1259 		tgtdev->dev_spec.pcie_inf.capb =
1260 		    le32_to_cpu(pcieinf->capabilities);
1261 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
1262 		/* 2^12 = 4096 */
1263 		tgtdev->dev_spec.pcie_inf.pgsz = 12;
1264 		if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
1265 			tgtdev->dev_spec.pcie_inf.mdts =
1266 			    le32_to_cpu(pcieinf->maximum_data_transfer_size);
1267 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
1268 			tgtdev->dev_spec.pcie_inf.reset_to =
1269 			    max_t(u8, pcieinf->controller_reset_to,
1270 			     MPI3MR_INTADMCMD_TIMEOUT);
1271 			tgtdev->dev_spec.pcie_inf.abort_to =
1272 			    max_t(u8, pcieinf->nvme_abort_to,
1273 			    MPI3MR_INTADMCMD_TIMEOUT);
1274 		}
1275 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
1276 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
1277 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1278 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
1279 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1280 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
1281 			tgtdev->is_hidden = 1;
1282 		tgtdev->non_stl = 1;
1283 		if (!mrioc->shost)
1284 			break;
1285 		prot_mask = scsi_host_get_prot(mrioc->shost);
1286 		if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
1287 			scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
1288 			ioc_info(mrioc,
1289 			    "%s : Disabling DIX0 prot capability\n", __func__);
1290 			ioc_info(mrioc,
1291 			    "because HBA does not support DIX0 operation on NVME drives\n");
1292 		}
1293 		break;
1294 	}
1295 	case MPI3_DEVICE_DEVFORM_VD:
1296 	{
1297 		struct mpi3_device0_vd_format *vdinf =
1298 		    &dev_pg0->device_specific.vd_format;
1299 		struct mpi3mr_throttle_group_info *tg = NULL;
1300 		u16 vdinf_io_throttle_group =
1301 		    le16_to_cpu(vdinf->io_throttle_group);
1302 
1303 		tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
1304 		if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
1305 			tgtdev->is_hidden = 1;
1306 		tgtdev->non_stl = 1;
1307 		tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
1308 		tgtdev->dev_spec.vd_inf.tg_high =
1309 		    le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
1310 		tgtdev->dev_spec.vd_inf.tg_low =
1311 		    le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
1312 		if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
1313 			tg = mrioc->throttle_groups + vdinf_io_throttle_group;
1314 			tg->id = vdinf_io_throttle_group;
1315 			tg->high = tgtdev->dev_spec.vd_inf.tg_high;
1316 			tg->low = tgtdev->dev_spec.vd_inf.tg_low;
1317 			tg->qd_reduction =
1318 			    tgtdev->dev_spec.vd_inf.tg_qd_reduction;
1319 			if (is_added == true)
1320 				tg->fw_qd = tgtdev->q_depth;
1321 			tg->modified_qd = tgtdev->q_depth;
1322 		}
1323 		tgtdev->dev_spec.vd_inf.tg = tg;
1324 		if (scsi_tgt_priv_data)
1325 			scsi_tgt_priv_data->throttle_group = tg;
1326 		break;
1327 	}
1328 	default:
1329 		break;
1330 	}
1331 }
1332 
1333 /**
1334  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1335  * @mrioc: Adapter instance reference
1336  * @fwevt: Firmware event information.
1337  *
1338  * Process Device status Change event and based on device's new
1339  * information, either expose the device to the upper layers, or
1340  * remove the device from upper layers.
1341  *
1342  * Return: Nothing.
1343  */
mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1344 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
1345 	struct mpi3mr_fwevt *fwevt)
1346 {
1347 	u16 dev_handle = 0;
1348 	u8 uhide = 0, delete = 0, cleanup = 0;
1349 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1350 	struct mpi3_event_data_device_status_change *evtdata =
1351 	    (struct mpi3_event_data_device_status_change *)fwevt->event_data;
1352 
1353 	dev_handle = le16_to_cpu(evtdata->dev_handle);
1354 	ioc_info(mrioc,
1355 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1356 	    __func__, dev_handle, evtdata->reason_code);
1357 	switch (evtdata->reason_code) {
1358 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1359 		delete = 1;
1360 		break;
1361 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1362 		uhide = 1;
1363 		break;
1364 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1365 		delete = 1;
1366 		cleanup = 1;
1367 		break;
1368 	default:
1369 		ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
1370 		    evtdata->reason_code);
1371 		break;
1372 	}
1373 
1374 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1375 	if (!tgtdev)
1376 		goto out;
1377 	if (uhide) {
1378 		tgtdev->is_hidden = 0;
1379 		if (!tgtdev->host_exposed)
1380 			mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1381 	}
1382 
1383 	if (delete)
1384 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1385 
1386 	if (cleanup) {
1387 		mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1388 		mpi3mr_tgtdev_put(tgtdev);
1389 	}
1390 
1391 out:
1392 	if (tgtdev)
1393 		mpi3mr_tgtdev_put(tgtdev);
1394 }
1395 
1396 /**
1397  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1398  * @mrioc: Adapter instance reference
1399  * @dev_pg0: New device page0
1400  *
1401  * Process Device Info Change event and based on device's new
1402  * information, either expose the device to the upper layers, or
1403  * remove the device from upper layers or update the details of
1404  * the device.
1405  *
1406  * Return: Nothing.
1407  */
mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3_device_page0 * dev_pg0)1408 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
1409 	struct mpi3_device_page0 *dev_pg0)
1410 {
1411 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1412 	u16 dev_handle = 0, perst_id = 0;
1413 
1414 	perst_id = le16_to_cpu(dev_pg0->persistent_id);
1415 	dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1416 	ioc_info(mrioc,
1417 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1418 	    __func__, dev_handle, perst_id);
1419 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1420 	if (!tgtdev)
1421 		goto out;
1422 	mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
1423 	if (!tgtdev->is_hidden && !tgtdev->host_exposed)
1424 		mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1425 	if (tgtdev->is_hidden && tgtdev->host_exposed)
1426 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1427 	if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
1428 		starget_for_each_device(tgtdev->starget, (void *)tgtdev,
1429 		    mpi3mr_update_sdev);
1430 out:
1431 	if (tgtdev)
1432 		mpi3mr_tgtdev_put(tgtdev);
1433 }
1434 
1435 /**
1436  * mpi3mr_free_enclosure_list - release enclosures
1437  * @mrioc: Adapter instance reference
1438  *
1439  * Free memory allocated during encloure add.
1440  *
1441  * Return nothing.
1442  */
mpi3mr_free_enclosure_list(struct mpi3mr_ioc * mrioc)1443 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
1444 {
1445 	struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
1446 
1447 	list_for_each_entry_safe(enclosure_dev,
1448 	    enclosure_dev_next, &mrioc->enclosure_list, list) {
1449 		list_del(&enclosure_dev->list);
1450 		kfree(enclosure_dev);
1451 	}
1452 }
1453 
1454 /**
1455  * mpi3mr_enclosure_find_by_handle - enclosure search by handle
1456  * @mrioc: Adapter instance reference
1457  * @handle: Firmware device handle of the enclosure
1458  *
1459  * This searches for enclosure device based on handle, then returns the
1460  * enclosure object.
1461  *
1462  * Return: Enclosure object reference or NULL
1463  */
mpi3mr_enclosure_find_by_handle(struct mpi3mr_ioc * mrioc,u16 handle)1464 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
1465 	struct mpi3mr_ioc *mrioc, u16 handle)
1466 {
1467 	struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
1468 
1469 	list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
1470 		if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
1471 			continue;
1472 		r = enclosure_dev;
1473 		goto out;
1474 	}
1475 out:
1476 	return r;
1477 }
1478 
1479 /**
1480  * mpi3mr_process_trigger_data_event_bh - Process trigger event
1481  * data
1482  * @mrioc: Adapter instance reference
1483  * @event_data: Event data
1484  *
1485  * This function releases diage buffers or issues diag fault
1486  * based on trigger conditions
1487  *
1488  * Return: Nothing
1489  */
mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc * mrioc,struct trigger_event_data * event_data)1490 static void mpi3mr_process_trigger_data_event_bh(struct mpi3mr_ioc *mrioc,
1491 	struct trigger_event_data *event_data)
1492 {
1493 	struct diag_buffer_desc *trace_hdb = event_data->trace_hdb;
1494 	struct diag_buffer_desc *fw_hdb = event_data->fw_hdb;
1495 	unsigned long flags;
1496 	int retval = 0;
1497 	u8 trigger_type = event_data->trigger_type;
1498 	union mpi3mr_trigger_data *trigger_data =
1499 		&event_data->trigger_specific_data;
1500 
1501 	if (event_data->snapdump)  {
1502 		if (trace_hdb)
1503 			mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
1504 			    trigger_data, 1);
1505 		if (fw_hdb)
1506 			mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
1507 			    trigger_data, 1);
1508 		mpi3mr_soft_reset_handler(mrioc,
1509 			    MPI3MR_RESET_FROM_TRIGGER, 1);
1510 		return;
1511 	}
1512 
1513 	if (trace_hdb) {
1514 		retval = mpi3mr_issue_diag_buf_release(mrioc, trace_hdb);
1515 		if (!retval) {
1516 			mpi3mr_set_trigger_data_in_hdb(trace_hdb, trigger_type,
1517 			    trigger_data, 1);
1518 		}
1519 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
1520 		mrioc->trace_release_trigger_active = false;
1521 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
1522 	}
1523 	if (fw_hdb) {
1524 		retval = mpi3mr_issue_diag_buf_release(mrioc, fw_hdb);
1525 		if (!retval) {
1526 			mpi3mr_set_trigger_data_in_hdb(fw_hdb, trigger_type,
1527 		    trigger_data, 1);
1528 		}
1529 		spin_lock_irqsave(&mrioc->trigger_lock, flags);
1530 		mrioc->fw_release_trigger_active = false;
1531 		spin_unlock_irqrestore(&mrioc->trigger_lock, flags);
1532 	}
1533 }
1534 
1535 /**
1536  * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
1537  * @mrioc: Adapter instance reference
1538  * @encl_pg0: Enclosure page 0.
1539  * @is_added: Added event or not
1540  *
1541  * Return nothing.
1542  */
mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc * mrioc,struct mpi3_enclosure_page0 * encl_pg0,u8 is_added)1543 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
1544 	struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
1545 {
1546 	char *reason_str = NULL;
1547 
1548 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
1549 		return;
1550 
1551 	if (is_added)
1552 		reason_str = "enclosure added";
1553 	else
1554 		reason_str = "enclosure dev status changed";
1555 
1556 	ioc_info(mrioc,
1557 	    "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
1558 	    reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
1559 	    (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
1560 	ioc_info(mrioc,
1561 	    "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
1562 	    le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
1563 	    le16_to_cpu(encl_pg0->flags),
1564 	    ((le16_to_cpu(encl_pg0->flags) &
1565 	      MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
1566 }
1567 
1568 /**
1569  * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
1570  * @mrioc: Adapter instance reference
1571  * @fwevt: Firmware event reference
1572  *
1573  * Prints information about the Enclosure device status or
1574  * Enclosure add events if logging is enabled and add or remove
1575  * the enclosure from the controller's internal list of
1576  * enclosures.
1577  *
1578  * Return: Nothing.
1579  */
mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1580 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
1581 	struct mpi3mr_fwevt *fwevt)
1582 {
1583 	struct mpi3mr_enclosure_node *enclosure_dev = NULL;
1584 	struct mpi3_enclosure_page0 *encl_pg0;
1585 	u16 encl_handle;
1586 	u8 added, present;
1587 
1588 	encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
1589 	added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
1590 	mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
1591 
1592 
1593 	encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
1594 	present = ((le16_to_cpu(encl_pg0->flags) &
1595 	      MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
1596 
1597 	if (encl_handle)
1598 		enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
1599 		    encl_handle);
1600 	if (!enclosure_dev && present) {
1601 		enclosure_dev =
1602 			kzalloc(sizeof(struct mpi3mr_enclosure_node),
1603 			    GFP_KERNEL);
1604 		if (!enclosure_dev)
1605 			return;
1606 		list_add_tail(&enclosure_dev->list,
1607 		    &mrioc->enclosure_list);
1608 	}
1609 	if (enclosure_dev) {
1610 		if (!present) {
1611 			list_del(&enclosure_dev->list);
1612 			kfree(enclosure_dev);
1613 		} else
1614 			memcpy(&enclosure_dev->pg0, encl_pg0,
1615 			    sizeof(enclosure_dev->pg0));
1616 
1617 	}
1618 }
1619 
1620 /**
1621  * mpi3mr_sastopochg_evt_debug - SASTopoChange details
1622  * @mrioc: Adapter instance reference
1623  * @event_data: SAS topology change list event data
1624  *
1625  * Prints information about the SAS topology change event.
1626  *
1627  * Return: Nothing.
1628  */
1629 static void
mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc * mrioc,struct mpi3_event_data_sas_topology_change_list * event_data)1630 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1631 	struct mpi3_event_data_sas_topology_change_list *event_data)
1632 {
1633 	int i;
1634 	u16 handle;
1635 	u8 reason_code, phy_number;
1636 	char *status_str = NULL;
1637 	u8 link_rate, prev_link_rate;
1638 
1639 	switch (event_data->exp_status) {
1640 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1641 		status_str = "remove";
1642 		break;
1643 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1644 		status_str =  "responding";
1645 		break;
1646 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1647 		status_str = "remove delay";
1648 		break;
1649 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1650 		status_str = "direct attached";
1651 		break;
1652 	default:
1653 		status_str = "unknown status";
1654 		break;
1655 	}
1656 	ioc_info(mrioc, "%s :sas topology change: (%s)\n",
1657 	    __func__, status_str);
1658 	ioc_info(mrioc,
1659 	    "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
1660 	    __func__, le16_to_cpu(event_data->expander_dev_handle),
1661 	    event_data->io_unit_port,
1662 	    le16_to_cpu(event_data->enclosure_handle),
1663 	    event_data->start_phy_num, event_data->num_entries);
1664 	for (i = 0; i < event_data->num_entries; i++) {
1665 		handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1666 		if (!handle)
1667 			continue;
1668 		phy_number = event_data->start_phy_num + i;
1669 		reason_code = event_data->phy_entry[i].status &
1670 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1671 		switch (reason_code) {
1672 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1673 			status_str = "target remove";
1674 			break;
1675 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1676 			status_str = "delay target remove";
1677 			break;
1678 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1679 			status_str = "link status change";
1680 			break;
1681 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1682 			status_str = "link status no change";
1683 			break;
1684 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1685 			status_str = "target responding";
1686 			break;
1687 		default:
1688 			status_str = "unknown";
1689 			break;
1690 		}
1691 		link_rate = event_data->phy_entry[i].link_rate >> 4;
1692 		prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1693 		ioc_info(mrioc,
1694 		    "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1695 		    __func__, phy_number, handle, status_str, link_rate,
1696 		    prev_link_rate);
1697 	}
1698 }
1699 
1700 /**
1701  * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
1702  * @mrioc: Adapter instance reference
1703  * @fwevt: Firmware event reference
1704  *
1705  * Prints information about the SAS topology change event and
1706  * for "not responding" event code, removes the device from the
1707  * upper layers.
1708  *
1709  * Return: Nothing.
1710  */
mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1711 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1712 	struct mpi3mr_fwevt *fwevt)
1713 {
1714 	struct mpi3_event_data_sas_topology_change_list *event_data =
1715 	    (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
1716 	int i;
1717 	u16 handle;
1718 	u8 reason_code;
1719 	u64 exp_sas_address = 0, parent_sas_address = 0;
1720 	struct mpi3mr_hba_port *hba_port = NULL;
1721 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1722 	struct mpi3mr_sas_node *sas_expander = NULL;
1723 	unsigned long flags;
1724 	u8 link_rate, prev_link_rate, parent_phy_number;
1725 
1726 	mpi3mr_sastopochg_evt_debug(mrioc, event_data);
1727 	if (mrioc->sas_transport_enabled) {
1728 		hba_port = mpi3mr_get_hba_port_by_id(mrioc,
1729 		    event_data->io_unit_port);
1730 		if (le16_to_cpu(event_data->expander_dev_handle)) {
1731 			spin_lock_irqsave(&mrioc->sas_node_lock, flags);
1732 			sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
1733 			    le16_to_cpu(event_data->expander_dev_handle));
1734 			if (sas_expander) {
1735 				exp_sas_address = sas_expander->sas_address;
1736 				hba_port = sas_expander->hba_port;
1737 			}
1738 			spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
1739 			parent_sas_address = exp_sas_address;
1740 		} else
1741 			parent_sas_address = mrioc->sas_hba.sas_address;
1742 	}
1743 
1744 	for (i = 0; i < event_data->num_entries; i++) {
1745 		if (fwevt->discard)
1746 			return;
1747 		handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1748 		if (!handle)
1749 			continue;
1750 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1751 		if (!tgtdev)
1752 			continue;
1753 
1754 		reason_code = event_data->phy_entry[i].status &
1755 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1756 
1757 		switch (reason_code) {
1758 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1759 			if (tgtdev->host_exposed)
1760 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1761 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1762 			mpi3mr_tgtdev_put(tgtdev);
1763 			break;
1764 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1765 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1766 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1767 		{
1768 			if (!mrioc->sas_transport_enabled || tgtdev->non_stl
1769 			    || tgtdev->is_hidden)
1770 				break;
1771 			link_rate = event_data->phy_entry[i].link_rate >> 4;
1772 			prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1773 			if (link_rate == prev_link_rate)
1774 				break;
1775 			if (!parent_sas_address)
1776 				break;
1777 			parent_phy_number = event_data->start_phy_num + i;
1778 			mpi3mr_update_links(mrioc, parent_sas_address, handle,
1779 			    parent_phy_number, link_rate, hba_port);
1780 			break;
1781 		}
1782 		default:
1783 			break;
1784 		}
1785 		if (tgtdev)
1786 			mpi3mr_tgtdev_put(tgtdev);
1787 	}
1788 
1789 	if (mrioc->sas_transport_enabled && (event_data->exp_status ==
1790 	    MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
1791 		if (sas_expander)
1792 			mpi3mr_expander_remove(mrioc, exp_sas_address,
1793 			    hba_port);
1794 	}
1795 }
1796 
1797 /**
1798  * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
1799  * @mrioc: Adapter instance reference
1800  * @event_data: PCIe topology change list event data
1801  *
1802  * Prints information about the PCIe topology change event.
1803  *
1804  * Return: Nothing.
1805  */
1806 static void
mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc * mrioc,struct mpi3_event_data_pcie_topology_change_list * event_data)1807 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1808 	struct mpi3_event_data_pcie_topology_change_list *event_data)
1809 {
1810 	int i;
1811 	u16 handle;
1812 	u16 reason_code;
1813 	u8 port_number;
1814 	char *status_str = NULL;
1815 	u8 link_rate, prev_link_rate;
1816 
1817 	switch (event_data->switch_status) {
1818 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1819 		status_str = "remove";
1820 		break;
1821 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1822 		status_str =  "responding";
1823 		break;
1824 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1825 		status_str = "remove delay";
1826 		break;
1827 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1828 		status_str = "direct attached";
1829 		break;
1830 	default:
1831 		status_str = "unknown status";
1832 		break;
1833 	}
1834 	ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
1835 	    __func__, status_str);
1836 	ioc_info(mrioc,
1837 	    "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
1838 	    __func__, le16_to_cpu(event_data->switch_dev_handle),
1839 	    le16_to_cpu(event_data->enclosure_handle),
1840 	    event_data->start_port_num, event_data->num_entries);
1841 	for (i = 0; i < event_data->num_entries; i++) {
1842 		handle =
1843 		    le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1844 		if (!handle)
1845 			continue;
1846 		port_number = event_data->start_port_num + i;
1847 		reason_code = event_data->port_entry[i].port_status;
1848 		switch (reason_code) {
1849 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1850 			status_str = "target remove";
1851 			break;
1852 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1853 			status_str = "delay target remove";
1854 			break;
1855 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1856 			status_str = "link status change";
1857 			break;
1858 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1859 			status_str = "link status no change";
1860 			break;
1861 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1862 			status_str = "target responding";
1863 			break;
1864 		default:
1865 			status_str = "unknown";
1866 			break;
1867 		}
1868 		link_rate = event_data->port_entry[i].current_port_info &
1869 		    MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1870 		prev_link_rate = event_data->port_entry[i].previous_port_info &
1871 		    MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1872 		ioc_info(mrioc,
1873 		    "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1874 		    __func__, port_number, handle, status_str, link_rate,
1875 		    prev_link_rate);
1876 	}
1877 }
1878 
1879 /**
1880  * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
1881  * @mrioc: Adapter instance reference
1882  * @fwevt: Firmware event reference
1883  *
1884  * Prints information about the PCIe topology change event and
1885  * for "not responding" event code, removes the device from the
1886  * upper layers.
1887  *
1888  * Return: Nothing.
1889  */
mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1890 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1891 	struct mpi3mr_fwevt *fwevt)
1892 {
1893 	struct mpi3_event_data_pcie_topology_change_list *event_data =
1894 	    (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
1895 	int i;
1896 	u16 handle;
1897 	u8 reason_code;
1898 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1899 
1900 	mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
1901 
1902 	for (i = 0; i < event_data->num_entries; i++) {
1903 		if (fwevt->discard)
1904 			return;
1905 		handle =
1906 		    le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1907 		if (!handle)
1908 			continue;
1909 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1910 		if (!tgtdev)
1911 			continue;
1912 
1913 		reason_code = event_data->port_entry[i].port_status;
1914 
1915 		switch (reason_code) {
1916 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1917 			if (tgtdev->host_exposed)
1918 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1919 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1920 			mpi3mr_tgtdev_put(tgtdev);
1921 			break;
1922 		default:
1923 			break;
1924 		}
1925 		if (tgtdev)
1926 			mpi3mr_tgtdev_put(tgtdev);
1927 	}
1928 }
1929 
1930 /**
1931  * mpi3mr_logdata_evt_bh -  Log data event bottomhalf
1932  * @mrioc: Adapter instance reference
1933  * @fwevt: Firmware event reference
1934  *
1935  * Extracts the event data and calls application interfacing
1936  * function to process the event further.
1937  *
1938  * Return: Nothing.
1939  */
mpi3mr_logdata_evt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)1940 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
1941 	struct mpi3mr_fwevt *fwevt)
1942 {
1943 	mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
1944 	    fwevt->event_data_size);
1945 }
1946 
1947 /**
1948  * mpi3mr_update_sdev_qd - Update SCSI device queue depath
1949  * @sdev: SCSI device reference
1950  * @data: Queue depth reference
1951  *
1952  * This is an iterator function called for each SCSI device in a
1953  * target to update the QD of each SCSI device.
1954  *
1955  * Return: Nothing.
1956  */
mpi3mr_update_sdev_qd(struct scsi_device * sdev,void * data)1957 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
1958 {
1959 	u16 *q_depth = (u16 *)data;
1960 
1961 	scsi_change_queue_depth(sdev, (int)*q_depth);
1962 	sdev->max_queue_depth = sdev->queue_depth;
1963 }
1964 
1965 /**
1966  * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
1967  * @mrioc: Adapter instance reference
1968  * @tg: Throttle group information pointer
1969  *
1970  * Accessor to reduce QD for each device associated with the
1971  * given throttle group.
1972  *
1973  * Return: None.
1974  */
mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc * mrioc,struct mpi3mr_throttle_group_info * tg)1975 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
1976 	struct mpi3mr_throttle_group_info *tg)
1977 {
1978 	unsigned long flags;
1979 	struct mpi3mr_tgt_dev *tgtdev;
1980 	struct mpi3mr_stgt_priv_data *tgt_priv;
1981 
1982 
1983 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1984 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1985 		if (tgtdev->starget && tgtdev->starget->hostdata) {
1986 			tgt_priv = tgtdev->starget->hostdata;
1987 			if (tgt_priv->throttle_group == tg) {
1988 				dprint_event_bh(mrioc,
1989 				    "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
1990 				    tgt_priv->perst_id, tgtdev->q_depth,
1991 				    tg->modified_qd);
1992 				starget_for_each_device(tgtdev->starget,
1993 				    (void *)&tg->modified_qd,
1994 				    mpi3mr_update_sdev_qd);
1995 			}
1996 		}
1997 	}
1998 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
1999 }
2000 
2001 /**
2002  * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
2003  * @mrioc: Adapter instance reference
2004  * @fwevt: Firmware event reference
2005  *
2006  * Identifies the firmware event and calls corresponding bottomg
2007  * half handler and sends event acknowledgment if required.
2008  *
2009  * Return: Nothing.
2010  */
mpi3mr_fwevt_bh(struct mpi3mr_ioc * mrioc,struct mpi3mr_fwevt * fwevt)2011 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
2012 	struct mpi3mr_fwevt *fwevt)
2013 {
2014 	struct mpi3_device_page0 *dev_pg0 = NULL;
2015 	u16 perst_id, handle, dev_info;
2016 	struct mpi3_device0_sas_sata_format *sasinf = NULL;
2017 	unsigned int timeout;
2018 
2019 	mpi3mr_fwevt_del_from_list(mrioc, fwevt);
2020 	mrioc->current_event = fwevt;
2021 
2022 	if (mrioc->stop_drv_processing)
2023 		goto out;
2024 
2025 	if (mrioc->unrecoverable) {
2026 		dprint_event_bh(mrioc,
2027 		    "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
2028 		    fwevt->event_id);
2029 		goto out;
2030 	}
2031 
2032 	if (!fwevt->process_evt)
2033 		goto evt_ack;
2034 
2035 	switch (fwevt->event_id) {
2036 	case MPI3_EVENT_DEVICE_ADDED:
2037 	{
2038 		dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
2039 		perst_id = le16_to_cpu(dev_pg0->persistent_id);
2040 		handle = le16_to_cpu(dev_pg0->dev_handle);
2041 		if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
2042 			mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
2043 		else if (mrioc->sas_transport_enabled &&
2044 		    (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
2045 			sasinf = &dev_pg0->device_specific.sas_sata_format;
2046 			dev_info = le16_to_cpu(sasinf->device_info);
2047 			if (!mrioc->sas_hba.num_phys)
2048 				mpi3mr_sas_host_add(mrioc);
2049 			else
2050 				mpi3mr_sas_host_refresh(mrioc);
2051 
2052 			if (mpi3mr_is_expander_device(dev_info))
2053 				mpi3mr_expander_add(mrioc, handle);
2054 		}
2055 		break;
2056 	}
2057 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
2058 	{
2059 		dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
2060 		perst_id = le16_to_cpu(dev_pg0->persistent_id);
2061 		if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
2062 			mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
2063 		break;
2064 	}
2065 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2066 	{
2067 		mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
2068 		break;
2069 	}
2070 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
2071 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
2072 	{
2073 		mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
2074 		break;
2075 	}
2076 
2077 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2078 	{
2079 		mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
2080 		break;
2081 	}
2082 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2083 	{
2084 		mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
2085 		break;
2086 	}
2087 	case MPI3_EVENT_LOG_DATA:
2088 	{
2089 		mpi3mr_logdata_evt_bh(mrioc, fwevt);
2090 		break;
2091 	}
2092 	case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
2093 	{
2094 		struct mpi3mr_throttle_group_info *tg;
2095 
2096 		tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
2097 		dprint_event_bh(mrioc,
2098 		    "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
2099 		    tg->id, tg->need_qd_reduction);
2100 		if (tg->need_qd_reduction) {
2101 			mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
2102 			tg->need_qd_reduction = 0;
2103 		}
2104 		break;
2105 	}
2106 	case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
2107 	{
2108 		timeout = MPI3MR_RESET_TIMEOUT * 2;
2109 		while ((mrioc->device_refresh_on || mrioc->block_on_pci_err) &&
2110 		    !mrioc->unrecoverable && !mrioc->pci_err_recovery) {
2111 			msleep(500);
2112 			if (!timeout--) {
2113 				mrioc->unrecoverable = 1;
2114 				break;
2115 			}
2116 		}
2117 
2118 		if (mrioc->unrecoverable || mrioc->pci_err_recovery)
2119 			break;
2120 
2121 		dprint_event_bh(mrioc,
2122 		    "scan for non responding and newly added devices after soft reset started\n");
2123 		if (mrioc->sas_transport_enabled) {
2124 			mpi3mr_refresh_sas_ports(mrioc);
2125 			mpi3mr_refresh_expanders(mrioc);
2126 		}
2127 		mpi3mr_refresh_tgtdevs(mrioc);
2128 		ioc_info(mrioc,
2129 		    "scan for non responding and newly added devices after soft reset completed\n");
2130 		break;
2131 	}
2132 	case MPI3MR_DRIVER_EVENT_PROCESS_TRIGGER:
2133 	{
2134 		mpi3mr_process_trigger_data_event_bh(mrioc,
2135 		    (struct trigger_event_data *)fwevt->event_data);
2136 		break;
2137 	}
2138 	default:
2139 		break;
2140 	}
2141 
2142 evt_ack:
2143 	if (fwevt->send_ack)
2144 		mpi3mr_process_event_ack(mrioc, fwevt->event_id,
2145 		    fwevt->evt_ctx);
2146 out:
2147 	/* Put fwevt reference count to neutralize kref_init increment */
2148 	mpi3mr_fwevt_put(fwevt);
2149 	mrioc->current_event = NULL;
2150 }
2151 
2152 /**
2153  * mpi3mr_fwevt_worker - Firmware event worker
2154  * @work: Work struct containing firmware event
2155  *
2156  * Extracts the firmware event and calls mpi3mr_fwevt_bh.
2157  *
2158  * Return: Nothing.
2159  */
mpi3mr_fwevt_worker(struct work_struct * work)2160 static void mpi3mr_fwevt_worker(struct work_struct *work)
2161 {
2162 	struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
2163 	    work);
2164 	mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
2165 	/*
2166 	 * Put fwevt reference count after
2167 	 * dequeuing it from worker queue
2168 	 */
2169 	mpi3mr_fwevt_put(fwevt);
2170 }
2171 
2172 /**
2173  * mpi3mr_create_tgtdev - Create and add a target device
2174  * @mrioc: Adapter instance reference
2175  * @dev_pg0: Device Page 0 data
2176  *
2177  * If the device specified by the device page 0 data is not
2178  * present in the driver's internal list, allocate the memory
2179  * for the device, populate the data and add to the list, else
2180  * update the device data.  The key is persistent ID.
2181  *
2182  * Return: 0 on success, -ENOMEM on memory allocation failure
2183  */
mpi3mr_create_tgtdev(struct mpi3mr_ioc * mrioc,struct mpi3_device_page0 * dev_pg0)2184 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
2185 	struct mpi3_device_page0 *dev_pg0)
2186 {
2187 	int retval = 0;
2188 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2189 	u16 perst_id = 0;
2190 	unsigned long flags;
2191 
2192 	perst_id = le16_to_cpu(dev_pg0->persistent_id);
2193 	if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
2194 		return retval;
2195 
2196 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2197 	tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
2198 	if (tgtdev)
2199 		tgtdev->state = MPI3MR_DEV_CREATED;
2200 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2201 
2202 	if (tgtdev) {
2203 		mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
2204 		mpi3mr_tgtdev_put(tgtdev);
2205 	} else {
2206 		tgtdev = mpi3mr_alloc_tgtdev();
2207 		if (!tgtdev)
2208 			return -ENOMEM;
2209 		mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
2210 		mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
2211 	}
2212 
2213 	return retval;
2214 }
2215 
2216 /**
2217  * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
2218  * @mrioc: Adapter instance reference
2219  *
2220  * Flush pending commands in the delayed lists due to a
2221  * controller reset or driver removal as a cleanup.
2222  *
2223  * Return: Nothing
2224  */
mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc * mrioc)2225 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
2226 {
2227 	struct delayed_dev_rmhs_node *_rmhs_node;
2228 	struct delayed_evt_ack_node *_evtack_node;
2229 
2230 	dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
2231 	while (!list_empty(&mrioc->delayed_rmhs_list)) {
2232 		_rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
2233 		    struct delayed_dev_rmhs_node, list);
2234 		list_del(&_rmhs_node->list);
2235 		kfree(_rmhs_node);
2236 	}
2237 	dprint_reset(mrioc, "flushing delayed event ack commands\n");
2238 	while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
2239 		_evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
2240 		    struct delayed_evt_ack_node, list);
2241 		list_del(&_evtack_node->list);
2242 		kfree(_evtack_node);
2243 	}
2244 }
2245 
2246 /**
2247  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
2248  * @mrioc: Adapter instance reference
2249  * @drv_cmd: Internal command tracker
2250  *
2251  * Issues a target reset TM to the firmware from the device
2252  * removal TM pend list or retry the removal handshake sequence
2253  * based on the IOU control request IOC status.
2254  *
2255  * Return: Nothing
2256  */
mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)2257 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
2258 	struct mpi3mr_drv_cmd *drv_cmd)
2259 {
2260 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2261 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
2262 
2263 	if (drv_cmd->state & MPI3MR_CMD_RESET)
2264 		goto clear_drv_cmd;
2265 
2266 	ioc_info(mrioc,
2267 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
2268 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
2269 	    drv_cmd->ioc_loginfo);
2270 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
2271 		if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
2272 			drv_cmd->retry_count++;
2273 			ioc_info(mrioc,
2274 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
2275 			    __func__, drv_cmd->dev_handle,
2276 			    drv_cmd->retry_count);
2277 			mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
2278 			    drv_cmd, drv_cmd->iou_rc);
2279 			return;
2280 		}
2281 		ioc_err(mrioc,
2282 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
2283 		    __func__, drv_cmd->dev_handle);
2284 	} else {
2285 		ioc_info(mrioc,
2286 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
2287 		    __func__, drv_cmd->dev_handle);
2288 		clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
2289 	}
2290 
2291 	if (!list_empty(&mrioc->delayed_rmhs_list)) {
2292 		delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
2293 		    struct delayed_dev_rmhs_node, list);
2294 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
2295 		drv_cmd->retry_count = 0;
2296 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
2297 		ioc_info(mrioc,
2298 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
2299 		    __func__, drv_cmd->dev_handle);
2300 		mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
2301 		    drv_cmd->iou_rc);
2302 		list_del(&delayed_dev_rmhs->list);
2303 		kfree(delayed_dev_rmhs);
2304 		return;
2305 	}
2306 
2307 clear_drv_cmd:
2308 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2309 	drv_cmd->callback = NULL;
2310 	drv_cmd->retry_count = 0;
2311 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2312 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
2313 }
2314 
2315 /**
2316  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
2317  * @mrioc: Adapter instance reference
2318  * @drv_cmd: Internal command tracker
2319  *
2320  * Issues a target reset TM to the firmware from the device
2321  * removal TM pend list or issue IO unit control request as
2322  * part of device removal or hidden acknowledgment handshake.
2323  *
2324  * Return: Nothing
2325  */
mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)2326 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
2327 	struct mpi3mr_drv_cmd *drv_cmd)
2328 {
2329 	struct mpi3_iounit_control_request iou_ctrl;
2330 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2331 	struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
2332 	int retval;
2333 
2334 	if (drv_cmd->state & MPI3MR_CMD_RESET)
2335 		goto clear_drv_cmd;
2336 
2337 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
2338 		tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
2339 
2340 	if (tm_reply)
2341 		pr_info(IOCNAME
2342 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
2343 		    mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
2344 		    drv_cmd->ioc_loginfo,
2345 		    le32_to_cpu(tm_reply->termination_count));
2346 
2347 	pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
2348 	    mrioc->name, drv_cmd->dev_handle, cmd_idx);
2349 
2350 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2351 
2352 	drv_cmd->state = MPI3MR_CMD_PENDING;
2353 	drv_cmd->is_waiting = 0;
2354 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
2355 	iou_ctrl.operation = drv_cmd->iou_rc;
2356 	iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
2357 	iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
2358 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2359 
2360 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
2361 	    1);
2362 	if (retval) {
2363 		pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
2364 		    mrioc->name);
2365 		goto clear_drv_cmd;
2366 	}
2367 
2368 	return;
2369 clear_drv_cmd:
2370 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2371 	drv_cmd->callback = NULL;
2372 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2373 	drv_cmd->retry_count = 0;
2374 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
2375 }
2376 
2377 /**
2378  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
2379  * @mrioc: Adapter instance reference
2380  * @handle: Device handle
2381  * @cmdparam: Internal command tracker
2382  * @iou_rc: IO unit reason code
2383  *
2384  * Issues a target reset TM to the firmware or add it to a pend
2385  * list as part of device removal or hidden acknowledgment
2386  * handshake.
2387  *
2388  * Return: Nothing
2389  */
mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc * mrioc,u16 handle,struct mpi3mr_drv_cmd * cmdparam,u8 iou_rc)2390 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
2391 	struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
2392 {
2393 	struct mpi3_scsi_task_mgmt_request tm_req;
2394 	int retval = 0;
2395 	u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
2396 	u8 retrycount = 5;
2397 	struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
2398 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
2399 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2400 	unsigned long flags;
2401 
2402 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2403 	tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2404 	if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE))
2405 		tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
2406 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2407 
2408 	if (drv_cmd)
2409 		goto issue_cmd;
2410 	do {
2411 		cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
2412 		    MPI3MR_NUM_DEVRMCMD);
2413 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
2414 			if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
2415 				break;
2416 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
2417 		}
2418 	} while (retrycount--);
2419 
2420 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
2421 		delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
2422 		    GFP_ATOMIC);
2423 		if (!delayed_dev_rmhs)
2424 			return;
2425 		INIT_LIST_HEAD(&delayed_dev_rmhs->list);
2426 		delayed_dev_rmhs->handle = handle;
2427 		delayed_dev_rmhs->iou_rc = iou_rc;
2428 		list_add_tail(&delayed_dev_rmhs->list,
2429 		    &mrioc->delayed_rmhs_list);
2430 		ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
2431 		    __func__, handle);
2432 		return;
2433 	}
2434 	drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
2435 
2436 issue_cmd:
2437 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2438 	ioc_info(mrioc,
2439 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
2440 	    __func__, handle, cmd_idx);
2441 
2442 	memset(&tm_req, 0, sizeof(tm_req));
2443 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
2444 		ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
2445 		goto out;
2446 	}
2447 	drv_cmd->state = MPI3MR_CMD_PENDING;
2448 	drv_cmd->is_waiting = 0;
2449 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
2450 	drv_cmd->dev_handle = handle;
2451 	drv_cmd->iou_rc = iou_rc;
2452 	tm_req.dev_handle = cpu_to_le16(handle);
2453 	tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2454 	tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
2455 	tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
2456 	tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
2457 
2458 	set_bit(handle, mrioc->removepend_bitmap);
2459 	retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
2460 	if (retval) {
2461 		ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
2462 		    __func__);
2463 		goto out_failed;
2464 	}
2465 out:
2466 	return;
2467 out_failed:
2468 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2469 	drv_cmd->callback = NULL;
2470 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2471 	drv_cmd->retry_count = 0;
2472 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
2473 }
2474 
2475 /**
2476  * mpi3mr_complete_evt_ack - event ack request completion
2477  * @mrioc: Adapter instance reference
2478  * @drv_cmd: Internal command tracker
2479  *
2480  * This is the completion handler for non blocking event
2481  * acknowledgment sent to the firmware and this will issue any
2482  * pending event acknowledgment request.
2483  *
2484  * Return: Nothing
2485  */
mpi3mr_complete_evt_ack(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)2486 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
2487 	struct mpi3mr_drv_cmd *drv_cmd)
2488 {
2489 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
2490 	struct delayed_evt_ack_node *delayed_evtack = NULL;
2491 
2492 	if (drv_cmd->state & MPI3MR_CMD_RESET)
2493 		goto clear_drv_cmd;
2494 
2495 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
2496 		dprint_event_th(mrioc,
2497 		    "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
2498 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2499 		    drv_cmd->ioc_loginfo);
2500 	}
2501 
2502 	if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
2503 		delayed_evtack =
2504 			list_entry(mrioc->delayed_evtack_cmds_list.next,
2505 			    struct delayed_evt_ack_node, list);
2506 		mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
2507 		    delayed_evtack->event_ctx);
2508 		list_del(&delayed_evtack->list);
2509 		kfree(delayed_evtack);
2510 		return;
2511 	}
2512 clear_drv_cmd:
2513 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2514 	drv_cmd->callback = NULL;
2515 	clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
2516 }
2517 
2518 /**
2519  * mpi3mr_send_event_ack - Issue event acknwoledgment request
2520  * @mrioc: Adapter instance reference
2521  * @event: MPI3 event id
2522  * @cmdparam: Internal command tracker
2523  * @event_ctx: event context
2524  *
2525  * Issues event acknowledgment request to the firmware if there
2526  * is a free command to send the event ack else it to a pend
2527  * list so that it will be processed on a completion of a prior
2528  * event acknowledgment .
2529  *
2530  * Return: Nothing
2531  */
mpi3mr_send_event_ack(struct mpi3mr_ioc * mrioc,u8 event,struct mpi3mr_drv_cmd * cmdparam,u32 event_ctx)2532 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
2533 	struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
2534 {
2535 	struct mpi3_event_ack_request evtack_req;
2536 	int retval = 0;
2537 	u8 retrycount = 5;
2538 	u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
2539 	struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
2540 	struct delayed_evt_ack_node *delayed_evtack = NULL;
2541 
2542 	if (drv_cmd) {
2543 		dprint_event_th(mrioc,
2544 		    "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
2545 		    event, event_ctx);
2546 		goto issue_cmd;
2547 	}
2548 	dprint_event_th(mrioc,
2549 	    "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
2550 	    event, event_ctx);
2551 	do {
2552 		cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
2553 		    MPI3MR_NUM_EVTACKCMD);
2554 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
2555 			if (!test_and_set_bit(cmd_idx,
2556 			    mrioc->evtack_cmds_bitmap))
2557 				break;
2558 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
2559 		}
2560 	} while (retrycount--);
2561 
2562 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
2563 		delayed_evtack = kzalloc(sizeof(*delayed_evtack),
2564 		    GFP_ATOMIC);
2565 		if (!delayed_evtack)
2566 			return;
2567 		INIT_LIST_HEAD(&delayed_evtack->list);
2568 		delayed_evtack->event = event;
2569 		delayed_evtack->event_ctx = event_ctx;
2570 		list_add_tail(&delayed_evtack->list,
2571 		    &mrioc->delayed_evtack_cmds_list);
2572 		dprint_event_th(mrioc,
2573 		    "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
2574 		    event, event_ctx);
2575 		return;
2576 	}
2577 	drv_cmd = &mrioc->evtack_cmds[cmd_idx];
2578 
2579 issue_cmd:
2580 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
2581 
2582 	memset(&evtack_req, 0, sizeof(evtack_req));
2583 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
2584 		dprint_event_th(mrioc,
2585 		    "sending event ack failed due to command in use\n");
2586 		goto out;
2587 	}
2588 	drv_cmd->state = MPI3MR_CMD_PENDING;
2589 	drv_cmd->is_waiting = 0;
2590 	drv_cmd->callback = mpi3mr_complete_evt_ack;
2591 	evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
2592 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
2593 	evtack_req.event = event;
2594 	evtack_req.event_context = cpu_to_le32(event_ctx);
2595 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
2596 	    sizeof(evtack_req), 1);
2597 	if (retval) {
2598 		dprint_event_th(mrioc,
2599 		    "posting event ack request is failed\n");
2600 		goto out_failed;
2601 	}
2602 
2603 	dprint_event_th(mrioc,
2604 	    "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
2605 	    event, event_ctx);
2606 out:
2607 	return;
2608 out_failed:
2609 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2610 	drv_cmd->callback = NULL;
2611 	clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
2612 }
2613 
2614 /**
2615  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
2616  * @mrioc: Adapter instance reference
2617  * @event_reply: event data
2618  *
2619  * Checks for the reason code and based on that either block I/O
2620  * to device, or unblock I/O to the device, or start the device
2621  * removal handshake with reason as remove with the firmware for
2622  * PCIe devices.
2623  *
2624  * Return: Nothing
2625  */
mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2626 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
2627 	struct mpi3_event_notification_reply *event_reply)
2628 {
2629 	struct mpi3_event_data_pcie_topology_change_list *topo_evt =
2630 	    (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
2631 	int i;
2632 	u16 handle;
2633 	u8 reason_code;
2634 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2635 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2636 
2637 	for (i = 0; i < topo_evt->num_entries; i++) {
2638 		handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
2639 		if (!handle)
2640 			continue;
2641 		reason_code = topo_evt->port_entry[i].port_status;
2642 		scsi_tgt_priv_data =  NULL;
2643 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2644 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2645 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2646 			    tgtdev->starget->hostdata;
2647 		switch (reason_code) {
2648 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
2649 			if (scsi_tgt_priv_data) {
2650 				scsi_tgt_priv_data->dev_removed = 1;
2651 				scsi_tgt_priv_data->dev_removedelay = 0;
2652 				atomic_set(&scsi_tgt_priv_data->block_io, 0);
2653 			}
2654 			mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2655 			    MPI3_CTRL_OP_REMOVE_DEVICE);
2656 			break;
2657 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
2658 			if (scsi_tgt_priv_data) {
2659 				scsi_tgt_priv_data->dev_removedelay = 1;
2660 				atomic_inc(&scsi_tgt_priv_data->block_io);
2661 			}
2662 			break;
2663 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
2664 			if (scsi_tgt_priv_data &&
2665 			    scsi_tgt_priv_data->dev_removedelay) {
2666 				scsi_tgt_priv_data->dev_removedelay = 0;
2667 				atomic_dec_if_positive
2668 				    (&scsi_tgt_priv_data->block_io);
2669 			}
2670 			break;
2671 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
2672 		default:
2673 			break;
2674 		}
2675 		if (tgtdev)
2676 			mpi3mr_tgtdev_put(tgtdev);
2677 	}
2678 }
2679 
2680 /**
2681  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
2682  * @mrioc: Adapter instance reference
2683  * @event_reply: event data
2684  *
2685  * Checks for the reason code and based on that either block I/O
2686  * to device, or unblock I/O to the device, or start the device
2687  * removal handshake with reason as remove with the firmware for
2688  * SAS/SATA devices.
2689  *
2690  * Return: Nothing
2691  */
mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2692 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
2693 	struct mpi3_event_notification_reply *event_reply)
2694 {
2695 	struct mpi3_event_data_sas_topology_change_list *topo_evt =
2696 	    (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
2697 	int i;
2698 	u16 handle;
2699 	u8 reason_code;
2700 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2701 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2702 
2703 	for (i = 0; i < topo_evt->num_entries; i++) {
2704 		handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
2705 		if (!handle)
2706 			continue;
2707 		reason_code = topo_evt->phy_entry[i].status &
2708 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
2709 		scsi_tgt_priv_data =  NULL;
2710 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2711 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2712 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2713 			    tgtdev->starget->hostdata;
2714 		switch (reason_code) {
2715 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
2716 			if (scsi_tgt_priv_data) {
2717 				scsi_tgt_priv_data->dev_removed = 1;
2718 				scsi_tgt_priv_data->dev_removedelay = 0;
2719 				atomic_set(&scsi_tgt_priv_data->block_io, 0);
2720 			}
2721 			mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2722 			    MPI3_CTRL_OP_REMOVE_DEVICE);
2723 			break;
2724 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
2725 			if (scsi_tgt_priv_data) {
2726 				scsi_tgt_priv_data->dev_removedelay = 1;
2727 				atomic_inc(&scsi_tgt_priv_data->block_io);
2728 			}
2729 			break;
2730 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
2731 			if (scsi_tgt_priv_data &&
2732 			    scsi_tgt_priv_data->dev_removedelay) {
2733 				scsi_tgt_priv_data->dev_removedelay = 0;
2734 				atomic_dec_if_positive
2735 				    (&scsi_tgt_priv_data->block_io);
2736 			}
2737 			break;
2738 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
2739 		default:
2740 			break;
2741 		}
2742 		if (tgtdev)
2743 			mpi3mr_tgtdev_put(tgtdev);
2744 	}
2745 }
2746 
2747 /**
2748  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
2749  * @mrioc: Adapter instance reference
2750  * @event_reply: event data
2751  *
2752  * Checks for the reason code and based on that either block I/O
2753  * to device, or unblock I/O to the device, or start the device
2754  * removal handshake with reason as remove/hide acknowledgment
2755  * with the firmware.
2756  *
2757  * Return: Nothing
2758  */
mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2759 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
2760 	struct mpi3_event_notification_reply *event_reply)
2761 {
2762 	u16 dev_handle = 0;
2763 	u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
2764 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2765 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2766 	struct mpi3_event_data_device_status_change *evtdata =
2767 	    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
2768 
2769 	if (mrioc->stop_drv_processing)
2770 		goto out;
2771 
2772 	dev_handle = le16_to_cpu(evtdata->dev_handle);
2773 
2774 	switch (evtdata->reason_code) {
2775 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
2776 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
2777 		block = 1;
2778 		break;
2779 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
2780 		delete = 1;
2781 		hide = 1;
2782 		break;
2783 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
2784 		delete = 1;
2785 		remove = 1;
2786 		break;
2787 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
2788 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
2789 		ublock = 1;
2790 		break;
2791 	default:
2792 		break;
2793 	}
2794 
2795 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2796 	if (!tgtdev)
2797 		goto out;
2798 	if (hide)
2799 		tgtdev->is_hidden = hide;
2800 	if (tgtdev->starget && tgtdev->starget->hostdata) {
2801 		scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2802 		    tgtdev->starget->hostdata;
2803 		if (block)
2804 			atomic_inc(&scsi_tgt_priv_data->block_io);
2805 		if (delete)
2806 			scsi_tgt_priv_data->dev_removed = 1;
2807 		if (ublock)
2808 			atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
2809 	}
2810 	if (remove)
2811 		mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2812 		    MPI3_CTRL_OP_REMOVE_DEVICE);
2813 	if (hide)
2814 		mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2815 		    MPI3_CTRL_OP_HIDDEN_ACK);
2816 
2817 out:
2818 	if (tgtdev)
2819 		mpi3mr_tgtdev_put(tgtdev);
2820 }
2821 
2822 /**
2823  * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
2824  * @mrioc: Adapter instance reference
2825  * @event_reply: event data
2826  *
2827  * Blocks and unblocks host level I/O based on the reason code
2828  *
2829  * Return: Nothing
2830  */
mpi3mr_preparereset_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2831 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
2832 	struct mpi3_event_notification_reply *event_reply)
2833 {
2834 	struct mpi3_event_data_prepare_for_reset *evtdata =
2835 	    (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
2836 
2837 	if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
2838 		dprint_event_th(mrioc,
2839 		    "prepare for reset event top half with rc=start\n");
2840 		if (mrioc->prepare_for_reset)
2841 			return;
2842 		mrioc->prepare_for_reset = 1;
2843 		mrioc->prepare_for_reset_timeout_counter = 0;
2844 	} else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
2845 		dprint_event_th(mrioc,
2846 		    "prepare for reset top half with rc=abort\n");
2847 		mrioc->prepare_for_reset = 0;
2848 		mrioc->prepare_for_reset_timeout_counter = 0;
2849 	}
2850 	if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2851 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2852 		mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
2853 		    le32_to_cpu(event_reply->event_context));
2854 }
2855 
2856 /**
2857  * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
2858  * @mrioc: Adapter instance reference
2859  * @event_reply: event data
2860  *
2861  * Identifies the new shutdown timeout value and update.
2862  *
2863  * Return: Nothing
2864  */
mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2865 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
2866 	struct mpi3_event_notification_reply *event_reply)
2867 {
2868 	struct mpi3_event_data_energy_pack_change *evtdata =
2869 	    (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
2870 	u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
2871 
2872 	if (shutdown_timeout <= 0) {
2873 		ioc_warn(mrioc,
2874 		    "%s :Invalid Shutdown Timeout received = %d\n",
2875 		    __func__, shutdown_timeout);
2876 		return;
2877 	}
2878 
2879 	ioc_info(mrioc,
2880 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
2881 	    __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
2882 	mrioc->facts.shutdown_timeout = shutdown_timeout;
2883 }
2884 
2885 /**
2886  * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
2887  * @mrioc: Adapter instance reference
2888  * @event_reply: event data
2889  *
2890  * Displays Cable manegemt event details.
2891  *
2892  * Return: Nothing
2893  */
mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2894 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
2895 	struct mpi3_event_notification_reply *event_reply)
2896 {
2897 	struct mpi3_event_data_cable_management *evtdata =
2898 	    (struct mpi3_event_data_cable_management *)event_reply->event_data;
2899 
2900 	switch (evtdata->status) {
2901 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
2902 	{
2903 		ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
2904 		    "Devices connected to this cable are not detected.\n"
2905 		    "This cable requires %d mW of power.\n",
2906 		    evtdata->receptacle_id,
2907 		    le32_to_cpu(evtdata->active_cable_power_requirement));
2908 		break;
2909 	}
2910 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
2911 	{
2912 		ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
2913 		    evtdata->receptacle_id);
2914 		break;
2915 	}
2916 	default:
2917 		break;
2918 	}
2919 }
2920 
2921 /**
2922  * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
2923  * @mrioc: Adapter instance reference
2924  *
2925  * Add driver specific event to make sure that the driver won't process the
2926  * events until all the devices are refreshed during soft reset.
2927  *
2928  * Return: Nothing
2929  */
mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc * mrioc)2930 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
2931 {
2932 	struct mpi3mr_fwevt *fwevt = NULL;
2933 
2934 	fwevt = mpi3mr_alloc_fwevt(0);
2935 	if (!fwevt) {
2936 		dprint_event_th(mrioc,
2937 		    "failed to schedule bottom half handler for event(0x%02x)\n",
2938 		    MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
2939 		return;
2940 	}
2941 	fwevt->mrioc = mrioc;
2942 	fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
2943 	fwevt->send_ack = 0;
2944 	fwevt->process_evt = 1;
2945 	fwevt->evt_ctx = 0;
2946 	fwevt->event_data_size = 0;
2947 	mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2948 }
2949 
2950 /**
2951  * mpi3mr_os_handle_events - Firmware event handler
2952  * @mrioc: Adapter instance reference
2953  * @event_reply: event data
2954  *
2955  * Identify whteher the event has to handled and acknowledged
2956  * and either process the event in the tophalf and/or schedule a
2957  * bottom half through mpi3mr_fwevt_worker.
2958  *
2959  * Return: Nothing
2960  */
mpi3mr_os_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)2961 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
2962 	struct mpi3_event_notification_reply *event_reply)
2963 {
2964 	u16 evt_type, sz;
2965 	struct mpi3mr_fwevt *fwevt = NULL;
2966 	bool ack_req = 0, process_evt_bh = 0;
2967 
2968 	if (mrioc->stop_drv_processing)
2969 		return;
2970 
2971 	if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2972 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2973 		ack_req = 1;
2974 
2975 	evt_type = event_reply->event;
2976 	mpi3mr_event_trigger(mrioc, event_reply->event);
2977 
2978 	switch (evt_type) {
2979 	case MPI3_EVENT_DEVICE_ADDED:
2980 	{
2981 		struct mpi3_device_page0 *dev_pg0 =
2982 		    (struct mpi3_device_page0 *)event_reply->event_data;
2983 		if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
2984 			ioc_err(mrioc,
2985 			    "%s :Failed to add device in the device add event\n",
2986 			    __func__);
2987 		else
2988 			process_evt_bh = 1;
2989 		break;
2990 	}
2991 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2992 	{
2993 		process_evt_bh = 1;
2994 		mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
2995 		break;
2996 	}
2997 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2998 	{
2999 		process_evt_bh = 1;
3000 		mpi3mr_sastopochg_evt_th(mrioc, event_reply);
3001 		break;
3002 	}
3003 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
3004 	{
3005 		process_evt_bh = 1;
3006 		mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
3007 		break;
3008 	}
3009 	case MPI3_EVENT_PREPARE_FOR_RESET:
3010 	{
3011 		mpi3mr_preparereset_evt_th(mrioc, event_reply);
3012 		ack_req = 0;
3013 		break;
3014 	}
3015 	case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
3016 	{
3017 		mpi3mr_hdbstatuschg_evt_th(mrioc, event_reply);
3018 		break;
3019 	}
3020 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
3021 	case MPI3_EVENT_LOG_DATA:
3022 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
3023 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
3024 	{
3025 		process_evt_bh = 1;
3026 		break;
3027 	}
3028 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
3029 	{
3030 		mpi3mr_energypackchg_evt_th(mrioc, event_reply);
3031 		break;
3032 	}
3033 	case MPI3_EVENT_CABLE_MGMT:
3034 	{
3035 		mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
3036 		break;
3037 	}
3038 	case MPI3_EVENT_SAS_DISCOVERY:
3039 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
3040 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
3041 	case MPI3_EVENT_PCIE_ENUMERATION:
3042 		break;
3043 	default:
3044 		ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
3045 		    __func__, evt_type);
3046 		break;
3047 	}
3048 	if (process_evt_bh || ack_req) {
3049 		sz = event_reply->event_data_length * 4;
3050 		fwevt = mpi3mr_alloc_fwevt(sz);
3051 		if (!fwevt) {
3052 			ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
3053 			    __func__, __FILE__, __LINE__, __func__);
3054 			return;
3055 		}
3056 
3057 		memcpy(fwevt->event_data, event_reply->event_data, sz);
3058 		fwevt->mrioc = mrioc;
3059 		fwevt->event_id = evt_type;
3060 		fwevt->send_ack = ack_req;
3061 		fwevt->process_evt = process_evt_bh;
3062 		fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
3063 		mpi3mr_fwevt_add_to_list(mrioc, fwevt);
3064 	}
3065 }
3066 
3067 /**
3068  * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
3069  * @mrioc: Adapter instance reference
3070  * @scmd: SCSI command reference
3071  * @scsiio_req: MPI3 SCSI IO request
3072  *
3073  * Identifies the protection information flags from the SCSI
3074  * command and set appropriate flags in the MPI3 SCSI IO
3075  * request.
3076  *
3077  * Return: Nothing
3078  */
mpi3mr_setup_eedp(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req)3079 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
3080 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3081 {
3082 	u16 eedp_flags = 0;
3083 	unsigned char prot_op = scsi_get_prot_op(scmd);
3084 
3085 	switch (prot_op) {
3086 	case SCSI_PROT_NORMAL:
3087 		return;
3088 	case SCSI_PROT_READ_STRIP:
3089 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
3090 		break;
3091 	case SCSI_PROT_WRITE_INSERT:
3092 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
3093 		break;
3094 	case SCSI_PROT_READ_INSERT:
3095 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
3096 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3097 		break;
3098 	case SCSI_PROT_WRITE_STRIP:
3099 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
3100 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3101 		break;
3102 	case SCSI_PROT_READ_PASS:
3103 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
3104 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3105 		break;
3106 	case SCSI_PROT_WRITE_PASS:
3107 		if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
3108 			eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
3109 			scsiio_req->sgl[0].eedp.application_tag_translation_mask =
3110 			    0xffff;
3111 		} else
3112 			eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
3113 
3114 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
3115 		break;
3116 	default:
3117 		return;
3118 	}
3119 
3120 	if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
3121 		eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
3122 
3123 	if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
3124 		eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
3125 
3126 	if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
3127 		eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
3128 			MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
3129 		scsiio_req->cdb.eedp32.primary_reference_tag =
3130 			cpu_to_be32(scsi_prot_ref_tag(scmd));
3131 	}
3132 
3133 	if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
3134 		eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
3135 
3136 	eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
3137 
3138 	switch (scsi_prot_interval(scmd)) {
3139 	case 512:
3140 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
3141 		break;
3142 	case 520:
3143 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
3144 		break;
3145 	case 4080:
3146 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
3147 		break;
3148 	case 4088:
3149 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
3150 		break;
3151 	case 4096:
3152 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
3153 		break;
3154 	case 4104:
3155 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
3156 		break;
3157 	case 4160:
3158 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
3159 		break;
3160 	default:
3161 		break;
3162 	}
3163 
3164 	scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
3165 	scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
3166 }
3167 
3168 /**
3169  * mpi3mr_build_sense_buffer - Map sense information
3170  * @desc: Sense type
3171  * @buf: Sense buffer to populate
3172  * @key: Sense key
3173  * @asc: Additional sense code
3174  * @ascq: Additional sense code qualifier
3175  *
3176  * Maps the given sense information into either descriptor or
3177  * fixed format sense data.
3178  *
3179  * Return: Nothing
3180  */
mpi3mr_build_sense_buffer(int desc,u8 * buf,u8 key,u8 asc,u8 ascq)3181 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
3182 	u8 asc, u8 ascq)
3183 {
3184 	if (desc) {
3185 		buf[0] = 0x72;	/* descriptor, current */
3186 		buf[1] = key;
3187 		buf[2] = asc;
3188 		buf[3] = ascq;
3189 		buf[7] = 0;
3190 	} else {
3191 		buf[0] = 0x70;	/* fixed, current */
3192 		buf[2] = key;
3193 		buf[7] = 0xa;
3194 		buf[12] = asc;
3195 		buf[13] = ascq;
3196 	}
3197 }
3198 
3199 /**
3200  * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
3201  * @scmd: SCSI command reference
3202  * @ioc_status: status of MPI3 request
3203  *
3204  * Maps the EEDP error status of the SCSI IO request to sense
3205  * data.
3206  *
3207  * Return: Nothing
3208  */
mpi3mr_map_eedp_error(struct scsi_cmnd * scmd,u16 ioc_status)3209 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
3210 	u16 ioc_status)
3211 {
3212 	u8 ascq = 0;
3213 
3214 	switch (ioc_status) {
3215 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
3216 		ascq = 0x01;
3217 		break;
3218 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
3219 		ascq = 0x02;
3220 		break;
3221 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
3222 		ascq = 0x03;
3223 		break;
3224 	default:
3225 		ascq = 0x00;
3226 		break;
3227 	}
3228 
3229 	mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3230 	    0x10, ascq);
3231 	scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
3232 }
3233 
3234 /**
3235  * mpi3mr_process_op_reply_desc - reply descriptor handler
3236  * @mrioc: Adapter instance reference
3237  * @reply_desc: Operational reply descriptor
3238  * @reply_dma: place holder for reply DMA address
3239  * @qidx: Operational queue index
3240  *
3241  * Process the operational reply descriptor and identifies the
3242  * descriptor type. Based on the descriptor map the MPI3 request
3243  * status to a SCSI command status and calls scsi_done call
3244  * back.
3245  *
3246  * Return: Nothing
3247  */
mpi3mr_process_op_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma,u16 qidx)3248 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
3249 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
3250 {
3251 	u16 reply_desc_type, host_tag = 0;
3252 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
3253 	u32 ioc_loginfo = 0;
3254 	struct mpi3_status_reply_descriptor *status_desc = NULL;
3255 	struct mpi3_address_reply_descriptor *addr_desc = NULL;
3256 	struct mpi3_success_reply_descriptor *success_desc = NULL;
3257 	struct mpi3_scsi_io_reply *scsi_reply = NULL;
3258 	struct scsi_cmnd *scmd = NULL;
3259 	struct scmd_priv *priv = NULL;
3260 	u8 *sense_buf = NULL;
3261 	u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
3262 	u32 xfer_count = 0, sense_count = 0, resp_data = 0;
3263 	u16 dev_handle = 0xFFFF;
3264 	struct scsi_sense_hdr sshdr;
3265 	struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
3266 	struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3267 	u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
3268 	struct mpi3mr_throttle_group_info *tg = NULL;
3269 	u8 throttle_enabled_dev = 0;
3270 
3271 	*reply_dma = 0;
3272 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
3273 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
3274 	switch (reply_desc_type) {
3275 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
3276 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
3277 		host_tag = le16_to_cpu(status_desc->host_tag);
3278 		ioc_status = le16_to_cpu(status_desc->ioc_status);
3279 		if (ioc_status &
3280 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
3281 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
3282 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
3283 		mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
3284 		break;
3285 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
3286 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
3287 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
3288 		scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
3289 		    *reply_dma);
3290 		if (!scsi_reply) {
3291 			panic("%s: scsi_reply is NULL, this shouldn't happen\n",
3292 			    mrioc->name);
3293 			goto out;
3294 		}
3295 		host_tag = le16_to_cpu(scsi_reply->host_tag);
3296 		ioc_status = le16_to_cpu(scsi_reply->ioc_status);
3297 		scsi_status = scsi_reply->scsi_status;
3298 		scsi_state = scsi_reply->scsi_state;
3299 		dev_handle = le16_to_cpu(scsi_reply->dev_handle);
3300 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
3301 		xfer_count = le32_to_cpu(scsi_reply->transfer_count);
3302 		sense_count = le32_to_cpu(scsi_reply->sense_count);
3303 		resp_data = le32_to_cpu(scsi_reply->response_data);
3304 		sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
3305 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
3306 		if (ioc_status &
3307 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
3308 			ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
3309 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
3310 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
3311 			panic("%s: Ran out of sense buffers\n", mrioc->name);
3312 		if (sense_buf) {
3313 			scsi_normalize_sense(sense_buf, sense_count, &sshdr);
3314 			mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
3315 			    sshdr.asc, sshdr.ascq);
3316 		}
3317 		mpi3mr_reply_trigger(mrioc, ioc_status, ioc_loginfo);
3318 		break;
3319 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
3320 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
3321 		host_tag = le16_to_cpu(success_desc->host_tag);
3322 		break;
3323 	default:
3324 		break;
3325 	}
3326 	scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
3327 	if (!scmd) {
3328 		panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
3329 		    mrioc->name, host_tag);
3330 		goto out;
3331 	}
3332 	priv = scsi_cmd_priv(scmd);
3333 
3334 	data_len_blks = scsi_bufflen(scmd) >> 9;
3335 	sdev_priv_data = scmd->device->hostdata;
3336 	if (sdev_priv_data) {
3337 		stgt_priv_data = sdev_priv_data->tgt_priv_data;
3338 		if (stgt_priv_data) {
3339 			tg = stgt_priv_data->throttle_group;
3340 			throttle_enabled_dev =
3341 			    stgt_priv_data->io_throttle_enabled;
3342 			dev_handle = stgt_priv_data->dev_handle;
3343 		}
3344 	}
3345 	if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
3346 	    throttle_enabled_dev)) {
3347 		ioc_pend_data_len = atomic_sub_return(data_len_blks,
3348 		    &mrioc->pend_large_data_sz);
3349 		if (tg) {
3350 			tg_pend_data_len = atomic_sub_return(data_len_blks,
3351 			    &tg->pend_large_data_sz);
3352 			if (tg->io_divert  && ((ioc_pend_data_len <=
3353 			    mrioc->io_throttle_low) &&
3354 			    (tg_pend_data_len <= tg->low))) {
3355 				tg->io_divert = 0;
3356 				mpi3mr_set_io_divert_for_all_vd_in_tg(
3357 				    mrioc, tg, 0);
3358 			}
3359 		} else {
3360 			if (ioc_pend_data_len <= mrioc->io_throttle_low)
3361 				stgt_priv_data->io_divert = 0;
3362 		}
3363 	} else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
3364 		ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
3365 		if (!tg) {
3366 			if (ioc_pend_data_len <= mrioc->io_throttle_low)
3367 				stgt_priv_data->io_divert = 0;
3368 
3369 		} else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
3370 			tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
3371 			if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
3372 				tg->io_divert = 0;
3373 				mpi3mr_set_io_divert_for_all_vd_in_tg(
3374 				    mrioc, tg, 0);
3375 			}
3376 		}
3377 	}
3378 
3379 	if (success_desc) {
3380 		scmd->result = DID_OK << 16;
3381 		goto out_success;
3382 	}
3383 
3384 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
3385 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
3386 	    xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
3387 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
3388 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
3389 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
3390 
3391 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
3392 	    sense_buf) {
3393 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
3394 
3395 		memcpy(scmd->sense_buffer, sense_buf, sz);
3396 	}
3397 
3398 	switch (ioc_status) {
3399 	case MPI3_IOCSTATUS_BUSY:
3400 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
3401 		scmd->result = SAM_STAT_BUSY;
3402 		break;
3403 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3404 		scmd->result = DID_NO_CONNECT << 16;
3405 		break;
3406 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3407 		if (ioc_loginfo == IOC_LOGINFO_SATA_NCQ_FAIL_AFTER_ERR) {
3408 			/*
3409 			 * This is a ATA NCQ command aborted due to another NCQ
3410 			 * command failure. We must retry this command
3411 			 * immediately but without incrementing its retry
3412 			 * counter.
3413 			 */
3414 			WARN_ON_ONCE(xfer_count != 0);
3415 			scmd->result = DID_IMM_RETRY << 16;
3416 		} else {
3417 			scmd->result = DID_SOFT_ERROR << 16;
3418 		}
3419 		break;
3420 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
3421 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
3422 		scmd->result = DID_RESET << 16;
3423 		break;
3424 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3425 		if ((xfer_count == 0) || (scmd->underflow > xfer_count))
3426 			scmd->result = DID_SOFT_ERROR << 16;
3427 		else
3428 			scmd->result = (DID_OK << 16) | scsi_status;
3429 		break;
3430 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
3431 		scmd->result = (DID_OK << 16) | scsi_status;
3432 		if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
3433 			break;
3434 		if (xfer_count < scmd->underflow) {
3435 			if (scsi_status == SAM_STAT_BUSY)
3436 				scmd->result = SAM_STAT_BUSY;
3437 			else
3438 				scmd->result = DID_SOFT_ERROR << 16;
3439 		} else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
3440 		    (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
3441 			scmd->result = DID_SOFT_ERROR << 16;
3442 		else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
3443 			scmd->result = DID_RESET << 16;
3444 		break;
3445 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
3446 		scsi_set_resid(scmd, 0);
3447 		fallthrough;
3448 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
3449 	case MPI3_IOCSTATUS_SUCCESS:
3450 		scmd->result = (DID_OK << 16) | scsi_status;
3451 		if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
3452 		    (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
3453 			(sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
3454 			scmd->result = DID_SOFT_ERROR << 16;
3455 		else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
3456 			scmd->result = DID_RESET << 16;
3457 		break;
3458 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
3459 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
3460 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
3461 		mpi3mr_map_eedp_error(scmd, ioc_status);
3462 		break;
3463 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3464 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
3465 	case MPI3_IOCSTATUS_INVALID_SGL:
3466 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
3467 	case MPI3_IOCSTATUS_INVALID_FIELD:
3468 	case MPI3_IOCSTATUS_INVALID_STATE:
3469 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
3470 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3471 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
3472 	default:
3473 		scmd->result = DID_SOFT_ERROR << 16;
3474 		break;
3475 	}
3476 
3477 	if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
3478 	    (scmd->cmnd[0] != ATA_16) &&
3479 	    mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
3480 		ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
3481 		    scmd->result);
3482 		scsi_print_command(scmd);
3483 		ioc_info(mrioc,
3484 		    "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
3485 		    __func__, dev_handle, ioc_status, ioc_loginfo,
3486 		    priv->req_q_idx + 1);
3487 		ioc_info(mrioc,
3488 		    " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
3489 		    host_tag, scsi_state, scsi_status, xfer_count, resp_data);
3490 		if (sense_buf) {
3491 			scsi_normalize_sense(sense_buf, sense_count, &sshdr);
3492 			ioc_info(mrioc,
3493 			    "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
3494 			    __func__, sense_count, sshdr.sense_key,
3495 			    sshdr.asc, sshdr.ascq);
3496 		}
3497 	}
3498 out_success:
3499 	if (priv->meta_sg_valid) {
3500 		dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
3501 		    scsi_prot_sg_count(scmd), scmd->sc_data_direction);
3502 	}
3503 	mpi3mr_clear_scmd_priv(mrioc, scmd);
3504 	scsi_dma_unmap(scmd);
3505 	scsi_done(scmd);
3506 out:
3507 	if (sense_buf)
3508 		mpi3mr_repost_sense_buf(mrioc,
3509 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
3510 }
3511 
3512 /**
3513  * mpi3mr_get_chain_idx - get free chain buffer index
3514  * @mrioc: Adapter instance reference
3515  *
3516  * Try to get a free chain buffer index from the free pool.
3517  *
3518  * Return: -1 on failure or the free chain buffer index
3519  */
mpi3mr_get_chain_idx(struct mpi3mr_ioc * mrioc)3520 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
3521 {
3522 	u8 retry_count = 5;
3523 	int cmd_idx = -1;
3524 	unsigned long flags;
3525 
3526 	spin_lock_irqsave(&mrioc->chain_buf_lock, flags);
3527 	do {
3528 		cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
3529 		    mrioc->chain_buf_count);
3530 		if (cmd_idx < mrioc->chain_buf_count) {
3531 			set_bit(cmd_idx, mrioc->chain_bitmap);
3532 			break;
3533 		}
3534 		cmd_idx = -1;
3535 	} while (retry_count--);
3536 	spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags);
3537 	return cmd_idx;
3538 }
3539 
3540 /**
3541  * mpi3mr_prepare_sg_scmd - build scatter gather list
3542  * @mrioc: Adapter instance reference
3543  * @scmd: SCSI command reference
3544  * @scsiio_req: MPI3 SCSI IO request
3545  *
3546  * This function maps SCSI command's data and protection SGEs to
3547  * MPI request SGEs. If required additional 4K chain buffer is
3548  * used to send the SGEs.
3549  *
3550  * Return: 0 on success, -ENOMEM on dma_map_sg failure
3551  */
mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req)3552 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
3553 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3554 {
3555 	dma_addr_t chain_dma;
3556 	struct scatterlist *sg_scmd;
3557 	void *sg_local, *chain;
3558 	u32 chain_length;
3559 	int sges_left, chain_idx;
3560 	u32 sges_in_segment;
3561 	u8 simple_sgl_flags;
3562 	u8 simple_sgl_flags_last;
3563 	u8 last_chain_sgl_flags;
3564 	struct chain_element *chain_req;
3565 	struct scmd_priv *priv = NULL;
3566 	u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
3567 	    MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
3568 
3569 	priv = scsi_cmd_priv(scmd);
3570 
3571 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
3572 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
3573 	simple_sgl_flags_last = simple_sgl_flags |
3574 	    MPI3_SGE_FLAGS_END_OF_LIST;
3575 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
3576 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
3577 
3578 	if (meta_sg)
3579 		sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
3580 	else
3581 		sg_local = &scsiio_req->sgl;
3582 
3583 	if (!scsiio_req->data_length && !meta_sg) {
3584 		mpi3mr_build_zero_len_sge(sg_local);
3585 		return 0;
3586 	}
3587 
3588 	if (meta_sg) {
3589 		sg_scmd = scsi_prot_sglist(scmd);
3590 		sges_left = dma_map_sg(&mrioc->pdev->dev,
3591 		    scsi_prot_sglist(scmd),
3592 		    scsi_prot_sg_count(scmd),
3593 		    scmd->sc_data_direction);
3594 		priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
3595 	} else {
3596 		/*
3597 		 * Some firmware versions byte-swap the REPORT ZONES command
3598 		 * reply from ATA-ZAC devices by directly accessing in the host
3599 		 * buffer. This does not respect the default command DMA
3600 		 * direction and causes IOMMU page faults on some architectures
3601 		 * with an IOMMU enforcing write mappings (e.g. AMD hosts).
3602 		 * Avoid such issue by making the REPORT ZONES buffer mapping
3603 		 * bi-directional.
3604 		 */
3605 		if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES)
3606 			scmd->sc_data_direction = DMA_BIDIRECTIONAL;
3607 		sg_scmd = scsi_sglist(scmd);
3608 		sges_left = scsi_dma_map(scmd);
3609 	}
3610 
3611 	if (sges_left < 0) {
3612 		sdev_printk(KERN_ERR, scmd->device,
3613 		    "scsi_dma_map failed: request for %d bytes!\n",
3614 		    scsi_bufflen(scmd));
3615 		return -ENOMEM;
3616 	}
3617 	if (sges_left > mrioc->max_sgl_entries) {
3618 		sdev_printk(KERN_ERR, scmd->device,
3619 		    "scsi_dma_map returned unsupported sge count %d!\n",
3620 		    sges_left);
3621 		return -ENOMEM;
3622 	}
3623 
3624 	sges_in_segment = (mrioc->facts.op_req_sz -
3625 	    offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
3626 
3627 	if (scsiio_req->sgl[0].eedp.flags ==
3628 	    MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
3629 		sg_local += sizeof(struct mpi3_sge_common);
3630 		sges_in_segment--;
3631 		/* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
3632 	}
3633 
3634 	if (scsiio_req->msg_flags ==
3635 	    MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
3636 		sges_in_segment--;
3637 		/* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
3638 	}
3639 
3640 	if (meta_sg)
3641 		sges_in_segment = 1;
3642 
3643 	if (sges_left <= sges_in_segment)
3644 		goto fill_in_last_segment;
3645 
3646 	/* fill in main message segment when there is a chain following */
3647 	while (sges_in_segment > 1) {
3648 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
3649 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
3650 		sg_scmd = sg_next(sg_scmd);
3651 		sg_local += sizeof(struct mpi3_sge_common);
3652 		sges_left--;
3653 		sges_in_segment--;
3654 	}
3655 
3656 	chain_idx = mpi3mr_get_chain_idx(mrioc);
3657 	if (chain_idx < 0)
3658 		return -1;
3659 	chain_req = &mrioc->chain_sgl_list[chain_idx];
3660 	if (meta_sg)
3661 		priv->meta_chain_idx = chain_idx;
3662 	else
3663 		priv->chain_idx = chain_idx;
3664 
3665 	chain = chain_req->addr;
3666 	chain_dma = chain_req->dma_addr;
3667 	sges_in_segment = sges_left;
3668 	chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
3669 
3670 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
3671 	    chain_length, chain_dma);
3672 
3673 	sg_local = chain;
3674 
3675 fill_in_last_segment:
3676 	while (sges_left > 0) {
3677 		if (sges_left == 1)
3678 			mpi3mr_add_sg_single(sg_local,
3679 			    simple_sgl_flags_last, sg_dma_len(sg_scmd),
3680 			    sg_dma_address(sg_scmd));
3681 		else
3682 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
3683 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
3684 		sg_scmd = sg_next(sg_scmd);
3685 		sg_local += sizeof(struct mpi3_sge_common);
3686 		sges_left--;
3687 	}
3688 
3689 	return 0;
3690 }
3691 
3692 /**
3693  * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
3694  * @mrioc: Adapter instance reference
3695  * @scmd: SCSI command reference
3696  * @scsiio_req: MPI3 SCSI IO request
3697  *
3698  * This function calls mpi3mr_prepare_sg_scmd for constructing
3699  * both data SGEs and protection information SGEs in the MPI
3700  * format from the SCSI Command as appropriate .
3701  *
3702  * Return: return value of mpi3mr_prepare_sg_scmd.
3703  */
mpi3mr_build_sg_scmd(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req)3704 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
3705 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3706 {
3707 	int ret;
3708 
3709 	ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
3710 	if (ret)
3711 		return ret;
3712 
3713 	if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
3714 		/* There is a valid meta sg */
3715 		scsiio_req->flags |=
3716 		    cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
3717 		ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
3718 	}
3719 
3720 	return ret;
3721 }
3722 
3723 /**
3724  * mpi3mr_tm_response_name -  get TM response as a string
3725  * @resp_code: TM response code
3726  *
3727  * Convert known task management response code as a readable
3728  * string.
3729  *
3730  * Return: response code string.
3731  */
mpi3mr_tm_response_name(u8 resp_code)3732 static const char *mpi3mr_tm_response_name(u8 resp_code)
3733 {
3734 	char *desc;
3735 
3736 	switch (resp_code) {
3737 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3738 		desc = "task management request completed";
3739 		break;
3740 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
3741 		desc = "invalid frame";
3742 		break;
3743 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
3744 		desc = "task management request not supported";
3745 		break;
3746 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
3747 		desc = "task management request failed";
3748 		break;
3749 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3750 		desc = "task management request succeeded";
3751 		break;
3752 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
3753 		desc = "invalid LUN";
3754 		break;
3755 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
3756 		desc = "overlapped tag attempted";
3757 		break;
3758 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3759 		desc = "task queued, however not sent to target";
3760 		break;
3761 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
3762 		desc = "task management request denied by NVMe device";
3763 		break;
3764 	default:
3765 		desc = "unknown";
3766 		break;
3767 	}
3768 
3769 	return desc;
3770 }
3771 
mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc * mrioc)3772 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
3773 {
3774 	int i;
3775 	int num_of_reply_queues =
3776 	    mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
3777 
3778 	for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
3779 		mpi3mr_process_op_reply_q(mrioc,
3780 		    mrioc->intr_info[i].op_reply_q);
3781 }
3782 
3783 /**
3784  * mpi3mr_issue_tm - Issue Task Management request
3785  * @mrioc: Adapter instance reference
3786  * @tm_type: Task Management type
3787  * @handle: Device handle
3788  * @lun: lun ID
3789  * @htag: Host tag of the TM request
3790  * @timeout: TM timeout value
3791  * @drv_cmd: Internal command tracker
3792  * @resp_code: Response code place holder
3793  * @scmd: SCSI command
3794  *
3795  * Issues a Task Management Request to the controller for a
3796  * specified target, lun and command and wait for its completion
3797  * and check TM response. Recover the TM if it timed out by
3798  * issuing controller reset.
3799  *
3800  * Return: 0 on success, non-zero on errors
3801  */
mpi3mr_issue_tm(struct mpi3mr_ioc * mrioc,u8 tm_type,u16 handle,uint lun,u16 htag,ulong timeout,struct mpi3mr_drv_cmd * drv_cmd,u8 * resp_code,struct scsi_cmnd * scmd)3802 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
3803 	u16 handle, uint lun, u16 htag, ulong timeout,
3804 	struct mpi3mr_drv_cmd *drv_cmd,
3805 	u8 *resp_code, struct scsi_cmnd *scmd)
3806 {
3807 	struct mpi3_scsi_task_mgmt_request tm_req;
3808 	struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
3809 	int retval = 0;
3810 	struct mpi3mr_tgt_dev *tgtdev = NULL;
3811 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
3812 	struct scmd_priv *cmd_priv = NULL;
3813 	struct scsi_device *sdev = NULL;
3814 	struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3815 
3816 	ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
3817 	     __func__, tm_type, handle);
3818 	if (mrioc->unrecoverable) {
3819 		retval = -1;
3820 		ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
3821 		    __func__);
3822 		goto out;
3823 	}
3824 
3825 	memset(&tm_req, 0, sizeof(tm_req));
3826 	mutex_lock(&drv_cmd->mutex);
3827 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3828 		retval = -1;
3829 		ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
3830 		mutex_unlock(&drv_cmd->mutex);
3831 		goto out;
3832 	}
3833 	if (mrioc->reset_in_progress) {
3834 		retval = -1;
3835 		ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
3836 		mutex_unlock(&drv_cmd->mutex);
3837 		goto out;
3838 	}
3839 	if (mrioc->block_on_pci_err) {
3840 		retval = -1;
3841 		dprint_tm(mrioc, "sending task management failed due to\n"
3842 				"pci error recovery in progress\n");
3843 		mutex_unlock(&drv_cmd->mutex);
3844 		goto out;
3845 	}
3846 
3847 	drv_cmd->state = MPI3MR_CMD_PENDING;
3848 	drv_cmd->is_waiting = 1;
3849 	drv_cmd->callback = NULL;
3850 	tm_req.dev_handle = cpu_to_le16(handle);
3851 	tm_req.task_type = tm_type;
3852 	tm_req.host_tag = cpu_to_le16(htag);
3853 
3854 	int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
3855 	tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3856 
3857 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
3858 
3859 	if (scmd) {
3860 		sdev = scmd->device;
3861 		sdev_priv_data = sdev->hostdata;
3862 		scsi_tgt_priv_data = ((sdev_priv_data) ?
3863 		    sdev_priv_data->tgt_priv_data : NULL);
3864 	} else {
3865 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
3866 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
3867 			    tgtdev->starget->hostdata;
3868 	}
3869 
3870 	if (scsi_tgt_priv_data)
3871 		atomic_inc(&scsi_tgt_priv_data->block_io);
3872 
3873 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
3874 		if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
3875 			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
3876 		else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
3877 			timeout = tgtdev->dev_spec.pcie_inf.reset_to;
3878 	}
3879 
3880 	init_completion(&drv_cmd->done);
3881 	retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
3882 	if (retval) {
3883 		ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
3884 		goto out_unlock;
3885 	}
3886 	wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
3887 
3888 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
3889 		drv_cmd->is_waiting = 0;
3890 		retval = -1;
3891 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
3892 			dprint_tm(mrioc,
3893 			    "task management request timed out after %ld seconds\n",
3894 			    timeout);
3895 			if (mrioc->logging_level & MPI3_DEBUG_TM)
3896 				dprint_dump_req(&tm_req, sizeof(tm_req)/4);
3897 			mpi3mr_soft_reset_handler(mrioc,
3898 			    MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
3899 		}
3900 		goto out_unlock;
3901 	}
3902 
3903 	if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
3904 		dprint_tm(mrioc, "invalid task management reply message\n");
3905 		retval = -1;
3906 		goto out_unlock;
3907 	}
3908 
3909 	tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
3910 
3911 	switch (drv_cmd->ioc_status) {
3912 	case MPI3_IOCSTATUS_SUCCESS:
3913 		*resp_code = le32_to_cpu(tm_reply->response_data) &
3914 			MPI3MR_RI_MASK_RESPCODE;
3915 		break;
3916 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3917 		*resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
3918 		break;
3919 	default:
3920 		dprint_tm(mrioc,
3921 		    "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
3922 		    handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
3923 		retval = -1;
3924 		goto out_unlock;
3925 	}
3926 
3927 	switch (*resp_code) {
3928 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3929 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3930 		break;
3931 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3932 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3933 			retval = -1;
3934 		break;
3935 	default:
3936 		retval = -1;
3937 		break;
3938 	}
3939 
3940 	dprint_tm(mrioc,
3941 	    "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
3942 	    tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
3943 	    le32_to_cpu(tm_reply->termination_count),
3944 	    mpi3mr_tm_response_name(*resp_code), *resp_code);
3945 
3946 	if (!retval) {
3947 		mpi3mr_ioc_disable_intr(mrioc);
3948 		mpi3mr_poll_pend_io_completions(mrioc);
3949 		mpi3mr_ioc_enable_intr(mrioc);
3950 		mpi3mr_poll_pend_io_completions(mrioc);
3951 		mpi3mr_process_admin_reply_q(mrioc);
3952 	}
3953 	switch (tm_type) {
3954 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3955 		if (!scsi_tgt_priv_data)
3956 			break;
3957 		scsi_tgt_priv_data->pend_count = 0;
3958 		blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3959 		    mpi3mr_count_tgt_pending,
3960 		    (void *)scsi_tgt_priv_data->starget);
3961 		break;
3962 	case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3963 		if (!sdev_priv_data)
3964 			break;
3965 		sdev_priv_data->pend_count = 0;
3966 		blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3967 		    mpi3mr_count_dev_pending, (void *)sdev);
3968 		break;
3969 	default:
3970 		break;
3971 	}
3972 	mpi3mr_global_trigger(mrioc,
3973 	    MPI3_DRIVER2_GLOBALTRIGGER_TASK_MANAGEMENT_ENABLED);
3974 
3975 out_unlock:
3976 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3977 	mutex_unlock(&drv_cmd->mutex);
3978 	if (scsi_tgt_priv_data)
3979 		atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
3980 	if (tgtdev)
3981 		mpi3mr_tgtdev_put(tgtdev);
3982 out:
3983 	return retval;
3984 }
3985 
3986 /**
3987  * mpi3mr_bios_param - BIOS param callback
3988  * @sdev: SCSI device reference
3989  * @bdev: Block device reference
3990  * @capacity: Capacity in logical sectors
3991  * @params: Parameter array
3992  *
3993  * Just the parameters with heads/secots/cylinders.
3994  *
3995  * Return: 0 always
3996  */
mpi3mr_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])3997 static int mpi3mr_bios_param(struct scsi_device *sdev,
3998 	struct block_device *bdev, sector_t capacity, int params[])
3999 {
4000 	int heads;
4001 	int sectors;
4002 	sector_t cylinders;
4003 	ulong dummy;
4004 
4005 	heads = 64;
4006 	sectors = 32;
4007 
4008 	dummy = heads * sectors;
4009 	cylinders = capacity;
4010 	sector_div(cylinders, dummy);
4011 
4012 	if ((ulong)capacity >= 0x200000) {
4013 		heads = 255;
4014 		sectors = 63;
4015 		dummy = heads * sectors;
4016 		cylinders = capacity;
4017 		sector_div(cylinders, dummy);
4018 	}
4019 
4020 	params[0] = heads;
4021 	params[1] = sectors;
4022 	params[2] = cylinders;
4023 	return 0;
4024 }
4025 
4026 /**
4027  * mpi3mr_map_queues - Map queues callback handler
4028  * @shost: SCSI host reference
4029  *
4030  * Maps default and poll queues.
4031  *
4032  * Return: return zero.
4033  */
mpi3mr_map_queues(struct Scsi_Host * shost)4034 static void mpi3mr_map_queues(struct Scsi_Host *shost)
4035 {
4036 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4037 	int i, qoff, offset;
4038 	struct blk_mq_queue_map *map = NULL;
4039 
4040 	offset = mrioc->op_reply_q_offset;
4041 
4042 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
4043 		map = &shost->tag_set.map[i];
4044 
4045 		map->nr_queues  = 0;
4046 
4047 		if (i == HCTX_TYPE_DEFAULT)
4048 			map->nr_queues = mrioc->default_qcount;
4049 		else if (i == HCTX_TYPE_POLL)
4050 			map->nr_queues = mrioc->active_poll_qcount;
4051 
4052 		if (!map->nr_queues) {
4053 			BUG_ON(i == HCTX_TYPE_DEFAULT);
4054 			continue;
4055 		}
4056 
4057 		/*
4058 		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
4059 		 * affinity), so use the regular blk-mq cpu mapping
4060 		 */
4061 		map->queue_offset = qoff;
4062 		if (i != HCTX_TYPE_POLL)
4063 			blk_mq_pci_map_queues(map, mrioc->pdev, offset);
4064 		else
4065 			blk_mq_map_queues(map);
4066 
4067 		qoff += map->nr_queues;
4068 		offset += map->nr_queues;
4069 	}
4070 }
4071 
4072 /**
4073  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
4074  * @mrioc: Adapter instance reference
4075  *
4076  * Calculate the pending I/Os for the controller and return.
4077  *
4078  * Return: Number of pending I/Os
4079  */
mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc * mrioc)4080 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
4081 {
4082 	u16 i;
4083 	uint pend_ios = 0;
4084 
4085 	for (i = 0; i < mrioc->num_op_reply_q; i++)
4086 		pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
4087 	return pend_ios;
4088 }
4089 
4090 /**
4091  * mpi3mr_print_pending_host_io - print pending I/Os
4092  * @mrioc: Adapter instance reference
4093  *
4094  * Print number of pending I/Os and each I/O details prior to
4095  * reset for debug purpose.
4096  *
4097  * Return: Nothing
4098  */
mpi3mr_print_pending_host_io(struct mpi3mr_ioc * mrioc)4099 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
4100 {
4101 	struct Scsi_Host *shost = mrioc->shost;
4102 
4103 	ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
4104 	    __func__, mpi3mr_get_fw_pending_ios(mrioc));
4105 	blk_mq_tagset_busy_iter(&shost->tag_set,
4106 	    mpi3mr_print_scmd, (void *)mrioc);
4107 }
4108 
4109 /**
4110  * mpi3mr_wait_for_host_io - block for I/Os to complete
4111  * @mrioc: Adapter instance reference
4112  * @timeout: time out in seconds
4113  * Waits for pending I/Os for the given adapter to complete or
4114  * to hit the timeout.
4115  *
4116  * Return: Nothing
4117  */
mpi3mr_wait_for_host_io(struct mpi3mr_ioc * mrioc,u32 timeout)4118 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
4119 {
4120 	enum mpi3mr_iocstate iocstate;
4121 	int i = 0;
4122 
4123 	iocstate = mpi3mr_get_iocstate(mrioc);
4124 	if (iocstate != MRIOC_STATE_READY)
4125 		return;
4126 
4127 	if (!mpi3mr_get_fw_pending_ios(mrioc))
4128 		return;
4129 	ioc_info(mrioc,
4130 	    "%s :Waiting for %d seconds prior to reset for %d I/O\n",
4131 	    __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
4132 
4133 	for (i = 0; i < timeout; i++) {
4134 		if (!mpi3mr_get_fw_pending_ios(mrioc))
4135 			break;
4136 		iocstate = mpi3mr_get_iocstate(mrioc);
4137 		if (iocstate != MRIOC_STATE_READY)
4138 			break;
4139 		msleep(1000);
4140 	}
4141 
4142 	ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
4143 	    mpi3mr_get_fw_pending_ios(mrioc));
4144 }
4145 
4146 /**
4147  * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same
4148  * @mrioc: Adapter instance reference
4149  * @scmd: SCSI command reference
4150  * @scsiio_req: MPI3 SCSI IO request
4151  * @scsiio_flags: Pointer to MPI3 SCSI IO Flags
4152  * @wslen: write same max length
4153  *
4154  * Gets values of unmap, ndob and number of blocks from write
4155  * same scsi io and based on these values it sets divert IO flag
4156  * and reason for diverting IO to firmware.
4157  *
4158  * Return: Nothing
4159  */
mpi3mr_setup_divert_ws(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd,struct mpi3_scsi_io_request * scsiio_req,u32 * scsiio_flags,u16 wslen)4160 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
4161 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req,
4162 	u32 *scsiio_flags, u16 wslen)
4163 {
4164 	u8 unmap = 0, ndob = 0;
4165 	u8 opcode = scmd->cmnd[0];
4166 	u32 num_blocks = 0;
4167 	u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]);
4168 
4169 	if (opcode == WRITE_SAME_16) {
4170 		unmap = scmd->cmnd[1] & 0x08;
4171 		ndob = scmd->cmnd[1] & 0x01;
4172 		num_blocks = get_unaligned_be32(scmd->cmnd + 10);
4173 	} else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) {
4174 		unmap = scmd->cmnd[10] & 0x08;
4175 		ndob = scmd->cmnd[10] & 0x01;
4176 		num_blocks = get_unaligned_be32(scmd->cmnd + 28);
4177 	} else
4178 		return;
4179 
4180 	if ((unmap) && (ndob) && (num_blocks > wslen)) {
4181 		scsiio_req->msg_flags |=
4182 		    MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
4183 		*scsiio_flags |=
4184 			MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE;
4185 	}
4186 }
4187 
4188 /**
4189  * mpi3mr_eh_host_reset - Host reset error handling callback
4190  * @scmd: SCSI command reference
4191  *
4192  * Issue controller reset
4193  *
4194  * Return: SUCCESS of successful reset else FAILED
4195  */
mpi3mr_eh_host_reset(struct scsi_cmnd * scmd)4196 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
4197 {
4198 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4199 	int retval = FAILED, ret;
4200 
4201 	ret = mpi3mr_soft_reset_handler(mrioc,
4202 	    MPI3MR_RESET_FROM_EH_HOS, 1);
4203 	if (ret)
4204 		goto out;
4205 
4206 	retval = SUCCESS;
4207 out:
4208 	sdev_printk(KERN_INFO, scmd->device,
4209 	    "Host reset is %s for scmd(%p)\n",
4210 	    ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4211 
4212 	return retval;
4213 }
4214 
4215 /**
4216  * mpi3mr_eh_bus_reset - Bus reset error handling callback
4217  * @scmd: SCSI command reference
4218  *
4219  * Checks whether pending I/Os are present for the RAID volume;
4220  * if not there's no need to reset the adapter.
4221  *
4222  * Return: SUCCESS of successful reset else FAILED
4223  */
mpi3mr_eh_bus_reset(struct scsi_cmnd * scmd)4224 static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
4225 {
4226 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4227 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4228 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4229 	u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
4230 	int retval = FAILED;
4231 	unsigned int timeout = MPI3MR_RESET_TIMEOUT;
4232 
4233 	sdev_priv_data = scmd->device->hostdata;
4234 	if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
4235 		stgt_priv_data = sdev_priv_data->tgt_priv_data;
4236 		dev_type = stgt_priv_data->dev_type;
4237 	}
4238 
4239 	if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
4240 		mpi3mr_wait_for_host_io(mrioc,
4241 			MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
4242 		if (!mpi3mr_get_fw_pending_ios(mrioc)) {
4243 			while (mrioc->reset_in_progress ||
4244 			       mrioc->prepare_for_reset ||
4245 			       mrioc->block_on_pci_err) {
4246 				ssleep(1);
4247 				if (!timeout--) {
4248 					retval = FAILED;
4249 					goto out;
4250 				}
4251 			}
4252 			retval = SUCCESS;
4253 			goto out;
4254 		}
4255 	}
4256 	if (retval == FAILED)
4257 		mpi3mr_print_pending_host_io(mrioc);
4258 
4259 out:
4260 	sdev_printk(KERN_INFO, scmd->device,
4261 		"Bus reset is %s for scmd(%p)\n",
4262 		((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4263 	return retval;
4264 }
4265 
4266 /**
4267  * mpi3mr_eh_target_reset - Target reset error handling callback
4268  * @scmd: SCSI command reference
4269  *
4270  * Issue Target reset Task Management and verify the scmd is
4271  * terminated successfully and return status accordingly.
4272  *
4273  * Return: SUCCESS of successful termination of the scmd else
4274  *         FAILED
4275  */
mpi3mr_eh_target_reset(struct scsi_cmnd * scmd)4276 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
4277 {
4278 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4279 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4280 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4281 	u16 dev_handle;
4282 	u8 resp_code = 0;
4283 	int retval = FAILED, ret = 0;
4284 
4285 	sdev_printk(KERN_INFO, scmd->device,
4286 	    "Attempting Target Reset! scmd(%p)\n", scmd);
4287 	scsi_print_command(scmd);
4288 
4289 	sdev_priv_data = scmd->device->hostdata;
4290 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4291 		sdev_printk(KERN_INFO, scmd->device,
4292 		    "SCSI device is not available\n");
4293 		retval = SUCCESS;
4294 		goto out;
4295 	}
4296 
4297 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
4298 	dev_handle = stgt_priv_data->dev_handle;
4299 	if (stgt_priv_data->dev_removed) {
4300 		struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
4301 		sdev_printk(KERN_INFO, scmd->device,
4302 		    "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
4303 		    mrioc->name, dev_handle);
4304 		if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
4305 			retval = SUCCESS;
4306 		else
4307 			retval = FAILED;
4308 		goto out;
4309 	}
4310 	sdev_printk(KERN_INFO, scmd->device,
4311 	    "Target Reset is issued to handle(0x%04x)\n",
4312 	    dev_handle);
4313 
4314 	ret = mpi3mr_issue_tm(mrioc,
4315 	    MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
4316 	    sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4317 	    MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
4318 
4319 	if (ret)
4320 		goto out;
4321 
4322 	if (stgt_priv_data->pend_count) {
4323 		sdev_printk(KERN_INFO, scmd->device,
4324 		    "%s: target has %d pending commands, target reset is failed\n",
4325 		    mrioc->name, stgt_priv_data->pend_count);
4326 		goto out;
4327 	}
4328 
4329 	retval = SUCCESS;
4330 out:
4331 	sdev_printk(KERN_INFO, scmd->device,
4332 	    "%s: target reset is %s for scmd(%p)\n", mrioc->name,
4333 	    ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4334 
4335 	return retval;
4336 }
4337 
4338 /**
4339  * mpi3mr_eh_dev_reset- Device reset error handling callback
4340  * @scmd: SCSI command reference
4341  *
4342  * Issue lun reset Task Management and verify the scmd is
4343  * terminated successfully and return status accordingly.
4344  *
4345  * Return: SUCCESS of successful termination of the scmd else
4346  *         FAILED
4347  */
mpi3mr_eh_dev_reset(struct scsi_cmnd * scmd)4348 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
4349 {
4350 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4351 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4352 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4353 	u16 dev_handle;
4354 	u8 resp_code = 0;
4355 	int retval = FAILED, ret = 0;
4356 
4357 	sdev_printk(KERN_INFO, scmd->device,
4358 	    "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
4359 	scsi_print_command(scmd);
4360 
4361 	sdev_priv_data = scmd->device->hostdata;
4362 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4363 		sdev_printk(KERN_INFO, scmd->device,
4364 		    "SCSI device is not available\n");
4365 		retval = SUCCESS;
4366 		goto out;
4367 	}
4368 
4369 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
4370 	dev_handle = stgt_priv_data->dev_handle;
4371 	if (stgt_priv_data->dev_removed) {
4372 		struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
4373 		sdev_printk(KERN_INFO, scmd->device,
4374 		    "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
4375 		    mrioc->name, dev_handle);
4376 		if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
4377 			retval = SUCCESS;
4378 		else
4379 			retval = FAILED;
4380 		goto out;
4381 	}
4382 	sdev_printk(KERN_INFO, scmd->device,
4383 	    "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
4384 
4385 	ret = mpi3mr_issue_tm(mrioc,
4386 	    MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
4387 	    sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4388 	    MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
4389 
4390 	if (ret)
4391 		goto out;
4392 
4393 	if (sdev_priv_data->pend_count) {
4394 		sdev_printk(KERN_INFO, scmd->device,
4395 		    "%s: device has %d pending commands, device(LUN) reset is failed\n",
4396 		    mrioc->name, sdev_priv_data->pend_count);
4397 		goto out;
4398 	}
4399 	retval = SUCCESS;
4400 out:
4401 	sdev_printk(KERN_INFO, scmd->device,
4402 	    "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
4403 	    ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4404 
4405 	return retval;
4406 }
4407 
4408 /**
4409  * mpi3mr_scan_start - Scan start callback handler
4410  * @shost: SCSI host reference
4411  *
4412  * Issue port enable request asynchronously.
4413  *
4414  * Return: Nothing
4415  */
mpi3mr_scan_start(struct Scsi_Host * shost)4416 static void mpi3mr_scan_start(struct Scsi_Host *shost)
4417 {
4418 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4419 
4420 	mrioc->scan_started = 1;
4421 	ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
4422 	if (mpi3mr_issue_port_enable(mrioc, 1)) {
4423 		ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
4424 		mrioc->scan_started = 0;
4425 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4426 	}
4427 }
4428 
4429 /**
4430  * mpi3mr_scan_finished - Scan finished callback handler
4431  * @shost: SCSI host reference
4432  * @time: Jiffies from the scan start
4433  *
4434  * Checks whether the port enable is completed or timedout or
4435  * failed and set the scan status accordingly after taking any
4436  * recovery if required.
4437  *
4438  * Return: 1 on scan finished or timed out, 0 for in progress
4439  */
mpi3mr_scan_finished(struct Scsi_Host * shost,unsigned long time)4440 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
4441 	unsigned long time)
4442 {
4443 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4444 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
4445 	u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4446 
4447 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4448 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4449 		ioc_err(mrioc, "port enable failed due to fault or reset\n");
4450 		mpi3mr_print_fault_info(mrioc);
4451 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4452 		mrioc->scan_started = 0;
4453 		mrioc->init_cmds.is_waiting = 0;
4454 		mrioc->init_cmds.callback = NULL;
4455 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4456 	}
4457 
4458 	if (time >= (pe_timeout * HZ)) {
4459 		ioc_err(mrioc, "port enable failed due to time out\n");
4460 		mpi3mr_check_rh_fault_ioc(mrioc,
4461 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4462 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4463 		mrioc->scan_started = 0;
4464 		mrioc->init_cmds.is_waiting = 0;
4465 		mrioc->init_cmds.callback = NULL;
4466 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4467 	}
4468 
4469 	if (mrioc->scan_started)
4470 		return 0;
4471 
4472 	if (mrioc->scan_failed) {
4473 		ioc_err(mrioc,
4474 		    "port enable failed with status=0x%04x\n",
4475 		    mrioc->scan_failed);
4476 	} else
4477 		ioc_info(mrioc, "port enable is successfully completed\n");
4478 
4479 	mpi3mr_start_watchdog(mrioc);
4480 	mrioc->is_driver_loading = 0;
4481 	mrioc->stop_bsgs = 0;
4482 	return 1;
4483 }
4484 
4485 /**
4486  * mpi3mr_slave_destroy - Slave destroy callback handler
4487  * @sdev: SCSI device reference
4488  *
4489  * Cleanup and free per device(lun) private data.
4490  *
4491  * Return: Nothing.
4492  */
mpi3mr_slave_destroy(struct scsi_device * sdev)4493 static void mpi3mr_slave_destroy(struct scsi_device *sdev)
4494 {
4495 	struct Scsi_Host *shost;
4496 	struct mpi3mr_ioc *mrioc;
4497 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4498 	struct mpi3mr_tgt_dev *tgt_dev = NULL;
4499 	unsigned long flags;
4500 	struct scsi_target *starget;
4501 	struct sas_rphy *rphy = NULL;
4502 
4503 	if (!sdev->hostdata)
4504 		return;
4505 
4506 	starget = scsi_target(sdev);
4507 	shost = dev_to_shost(&starget->dev);
4508 	mrioc = shost_priv(shost);
4509 	scsi_tgt_priv_data = starget->hostdata;
4510 
4511 	scsi_tgt_priv_data->num_luns--;
4512 
4513 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4514 	if (starget->channel == mrioc->scsi_device_channel)
4515 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4516 	else if (mrioc->sas_transport_enabled && !starget->channel) {
4517 		rphy = dev_to_rphy(starget->dev.parent);
4518 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4519 		    rphy->identify.sas_address, rphy);
4520 	}
4521 
4522 	if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
4523 		tgt_dev->starget = NULL;
4524 	if (tgt_dev)
4525 		mpi3mr_tgtdev_put(tgt_dev);
4526 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4527 
4528 	kfree(sdev->hostdata);
4529 	sdev->hostdata = NULL;
4530 }
4531 
4532 /**
4533  * mpi3mr_target_destroy - Target destroy callback handler
4534  * @starget: SCSI target reference
4535  *
4536  * Cleanup and free per target private data.
4537  *
4538  * Return: Nothing.
4539  */
mpi3mr_target_destroy(struct scsi_target * starget)4540 static void mpi3mr_target_destroy(struct scsi_target *starget)
4541 {
4542 	struct Scsi_Host *shost;
4543 	struct mpi3mr_ioc *mrioc;
4544 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4545 	struct mpi3mr_tgt_dev *tgt_dev;
4546 	unsigned long flags;
4547 
4548 	if (!starget->hostdata)
4549 		return;
4550 
4551 	shost = dev_to_shost(&starget->dev);
4552 	mrioc = shost_priv(shost);
4553 	scsi_tgt_priv_data = starget->hostdata;
4554 
4555 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4556 	tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
4557 	if (tgt_dev && (tgt_dev->starget == starget) &&
4558 	    (tgt_dev->perst_id == starget->id))
4559 		tgt_dev->starget = NULL;
4560 	if (tgt_dev) {
4561 		scsi_tgt_priv_data->tgt_dev = NULL;
4562 		scsi_tgt_priv_data->perst_id = 0;
4563 		mpi3mr_tgtdev_put(tgt_dev);
4564 		mpi3mr_tgtdev_put(tgt_dev);
4565 	}
4566 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4567 
4568 	kfree(starget->hostdata);
4569 	starget->hostdata = NULL;
4570 }
4571 
4572 /**
4573  * mpi3mr_device_configure - Slave configure callback handler
4574  * @sdev: SCSI device reference
4575  * @lim: queue limits
4576  *
4577  * Configure queue depth, max hardware sectors and virt boundary
4578  * as required
4579  *
4580  * Return: 0 always.
4581  */
mpi3mr_device_configure(struct scsi_device * sdev,struct queue_limits * lim)4582 static int mpi3mr_device_configure(struct scsi_device *sdev,
4583 		struct queue_limits *lim)
4584 {
4585 	struct scsi_target *starget;
4586 	struct Scsi_Host *shost;
4587 	struct mpi3mr_ioc *mrioc;
4588 	struct mpi3mr_tgt_dev *tgt_dev = NULL;
4589 	unsigned long flags;
4590 	int retval = 0;
4591 	struct sas_rphy *rphy = NULL;
4592 
4593 	starget = scsi_target(sdev);
4594 	shost = dev_to_shost(&starget->dev);
4595 	mrioc = shost_priv(shost);
4596 
4597 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4598 	if (starget->channel == mrioc->scsi_device_channel)
4599 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4600 	else if (mrioc->sas_transport_enabled && !starget->channel) {
4601 		rphy = dev_to_rphy(starget->dev.parent);
4602 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4603 		    rphy->identify.sas_address, rphy);
4604 	}
4605 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4606 	if (!tgt_dev)
4607 		return -ENXIO;
4608 
4609 	mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
4610 
4611 	sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
4612 	blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
4613 
4614 	mpi3mr_configure_tgt_dev(tgt_dev, lim);
4615 	mpi3mr_tgtdev_put(tgt_dev);
4616 	return retval;
4617 }
4618 
4619 /**
4620  * mpi3mr_slave_alloc -Slave alloc callback handler
4621  * @sdev: SCSI device reference
4622  *
4623  * Allocate per device(lun) private data and initialize it.
4624  *
4625  * Return: 0 on success -ENOMEM on memory allocation failure.
4626  */
mpi3mr_slave_alloc(struct scsi_device * sdev)4627 static int mpi3mr_slave_alloc(struct scsi_device *sdev)
4628 {
4629 	struct Scsi_Host *shost;
4630 	struct mpi3mr_ioc *mrioc;
4631 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4632 	struct mpi3mr_tgt_dev *tgt_dev = NULL;
4633 	struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
4634 	unsigned long flags;
4635 	struct scsi_target *starget;
4636 	int retval = 0;
4637 	struct sas_rphy *rphy = NULL;
4638 
4639 	starget = scsi_target(sdev);
4640 	shost = dev_to_shost(&starget->dev);
4641 	mrioc = shost_priv(shost);
4642 	scsi_tgt_priv_data = starget->hostdata;
4643 
4644 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4645 
4646 	if (starget->channel == mrioc->scsi_device_channel)
4647 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4648 	else if (mrioc->sas_transport_enabled && !starget->channel) {
4649 		rphy = dev_to_rphy(starget->dev.parent);
4650 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4651 		    rphy->identify.sas_address, rphy);
4652 	}
4653 
4654 	if (tgt_dev) {
4655 		if (tgt_dev->starget == NULL)
4656 			tgt_dev->starget = starget;
4657 		mpi3mr_tgtdev_put(tgt_dev);
4658 		retval = 0;
4659 	} else {
4660 		spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4661 		return -ENXIO;
4662 	}
4663 
4664 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4665 
4666 	scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
4667 	if (!scsi_dev_priv_data)
4668 		return -ENOMEM;
4669 
4670 	scsi_dev_priv_data->lun_id = sdev->lun;
4671 	scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
4672 	sdev->hostdata = scsi_dev_priv_data;
4673 
4674 	scsi_tgt_priv_data->num_luns++;
4675 
4676 	return retval;
4677 }
4678 
4679 /**
4680  * mpi3mr_target_alloc - Target alloc callback handler
4681  * @starget: SCSI target reference
4682  *
4683  * Allocate per target private data and initialize it.
4684  *
4685  * Return: 0 on success -ENOMEM on memory allocation failure.
4686  */
mpi3mr_target_alloc(struct scsi_target * starget)4687 static int mpi3mr_target_alloc(struct scsi_target *starget)
4688 {
4689 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4690 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4691 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4692 	struct mpi3mr_tgt_dev *tgt_dev;
4693 	unsigned long flags;
4694 	int retval = 0;
4695 	struct sas_rphy *rphy = NULL;
4696 
4697 	scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
4698 	if (!scsi_tgt_priv_data)
4699 		return -ENOMEM;
4700 
4701 	starget->hostdata = scsi_tgt_priv_data;
4702 
4703 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4704 	if (starget->channel == mrioc->scsi_device_channel) {
4705 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4706 		if (tgt_dev && !tgt_dev->is_hidden) {
4707 			scsi_tgt_priv_data->starget = starget;
4708 			scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
4709 			scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
4710 			scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
4711 			scsi_tgt_priv_data->tgt_dev = tgt_dev;
4712 			tgt_dev->starget = starget;
4713 			atomic_set(&scsi_tgt_priv_data->block_io, 0);
4714 			retval = 0;
4715 			if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
4716 			    ((tgt_dev->dev_spec.pcie_inf.dev_info &
4717 			    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
4718 			    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
4719 			    ((tgt_dev->dev_spec.pcie_inf.dev_info &
4720 			    MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) !=
4721 			    MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0))
4722 				scsi_tgt_priv_data->dev_nvme_dif = 1;
4723 			scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
4724 			scsi_tgt_priv_data->wslen = tgt_dev->wslen;
4725 			if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
4726 				scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg;
4727 		} else
4728 			retval = -ENXIO;
4729 	} else if (mrioc->sas_transport_enabled && !starget->channel) {
4730 		rphy = dev_to_rphy(starget->dev.parent);
4731 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4732 		    rphy->identify.sas_address, rphy);
4733 		if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
4734 		    (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
4735 			scsi_tgt_priv_data->starget = starget;
4736 			scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
4737 			scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
4738 			scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
4739 			scsi_tgt_priv_data->tgt_dev = tgt_dev;
4740 			scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
4741 			scsi_tgt_priv_data->wslen = tgt_dev->wslen;
4742 			tgt_dev->starget = starget;
4743 			atomic_set(&scsi_tgt_priv_data->block_io, 0);
4744 			retval = 0;
4745 		} else
4746 			retval = -ENXIO;
4747 	}
4748 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4749 
4750 	return retval;
4751 }
4752 
4753 /**
4754  * mpi3mr_check_return_unmap - Whether an unmap is allowed
4755  * @mrioc: Adapter instance reference
4756  * @scmd: SCSI Command reference
4757  *
4758  * The controller hardware cannot handle certain unmap commands
4759  * for NVMe drives, this routine checks those and return true
4760  * and completes the SCSI command with proper status and sense
4761  * data.
4762  *
4763  * Return: TRUE for not  allowed unmap, FALSE otherwise.
4764  */
mpi3mr_check_return_unmap(struct mpi3mr_ioc * mrioc,struct scsi_cmnd * scmd)4765 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
4766 	struct scsi_cmnd *scmd)
4767 {
4768 	unsigned char *buf;
4769 	u16 param_len, desc_len, trunc_param_len;
4770 
4771 	trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
4772 
4773 	if (mrioc->pdev->revision) {
4774 		if ((param_len > 24) && ((param_len - 8) & 0xF)) {
4775 			trunc_param_len -= (param_len - 8) & 0xF;
4776 			dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
4777 			dprint_scsi_err(mrioc,
4778 			    "truncating param_len from (%d) to (%d)\n",
4779 			    param_len, trunc_param_len);
4780 			put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
4781 			dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
4782 		}
4783 		return false;
4784 	}
4785 
4786 	if (!param_len) {
4787 		ioc_warn(mrioc,
4788 		    "%s: cdb received with zero parameter length\n",
4789 		    __func__);
4790 		scsi_print_command(scmd);
4791 		scmd->result = DID_OK << 16;
4792 		scsi_done(scmd);
4793 		return true;
4794 	}
4795 
4796 	if (param_len < 24) {
4797 		ioc_warn(mrioc,
4798 		    "%s: cdb received with invalid param_len: %d\n",
4799 		    __func__, param_len);
4800 		scsi_print_command(scmd);
4801 		scmd->result = SAM_STAT_CHECK_CONDITION;
4802 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4803 		    0x1A, 0);
4804 		scsi_done(scmd);
4805 		return true;
4806 	}
4807 	if (param_len != scsi_bufflen(scmd)) {
4808 		ioc_warn(mrioc,
4809 		    "%s: cdb received with param_len: %d bufflen: %d\n",
4810 		    __func__, param_len, scsi_bufflen(scmd));
4811 		scsi_print_command(scmd);
4812 		scmd->result = SAM_STAT_CHECK_CONDITION;
4813 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4814 		    0x1A, 0);
4815 		scsi_done(scmd);
4816 		return true;
4817 	}
4818 	buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
4819 	if (!buf) {
4820 		scsi_print_command(scmd);
4821 		scmd->result = SAM_STAT_CHECK_CONDITION;
4822 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4823 		    0x55, 0x03);
4824 		scsi_done(scmd);
4825 		return true;
4826 	}
4827 	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
4828 	desc_len = get_unaligned_be16(&buf[2]);
4829 
4830 	if (desc_len < 16) {
4831 		ioc_warn(mrioc,
4832 		    "%s: Invalid descriptor length in param list: %d\n",
4833 		    __func__, desc_len);
4834 		scsi_print_command(scmd);
4835 		scmd->result = SAM_STAT_CHECK_CONDITION;
4836 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4837 		    0x26, 0);
4838 		scsi_done(scmd);
4839 		kfree(buf);
4840 		return true;
4841 	}
4842 
4843 	if (param_len > (desc_len + 8)) {
4844 		trunc_param_len = desc_len + 8;
4845 		scsi_print_command(scmd);
4846 		dprint_scsi_err(mrioc,
4847 		    "truncating param_len(%d) to desc_len+8(%d)\n",
4848 		    param_len, trunc_param_len);
4849 		put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
4850 		scsi_print_command(scmd);
4851 	}
4852 
4853 	kfree(buf);
4854 	return false;
4855 }
4856 
4857 /**
4858  * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
4859  * @scmd: SCSI Command reference
4860  *
4861  * Checks whether a cdb is allowed during shutdown or not.
4862  *
4863  * Return: TRUE for allowed commands, FALSE otherwise.
4864  */
4865 
mpi3mr_allow_scmd_to_fw(struct scsi_cmnd * scmd)4866 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
4867 {
4868 	switch (scmd->cmnd[0]) {
4869 	case SYNCHRONIZE_CACHE:
4870 	case START_STOP:
4871 		return true;
4872 	default:
4873 		return false;
4874 	}
4875 }
4876 
4877 /**
4878  * mpi3mr_qcmd - I/O request despatcher
4879  * @shost: SCSI Host reference
4880  * @scmd: SCSI Command reference
4881  *
4882  * Issues the SCSI Command as an MPI3 request.
4883  *
4884  * Return: 0 on successful queueing of the request or if the
4885  *         request is completed with failure.
4886  *         SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
4887  *         SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
4888  */
mpi3mr_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)4889 static int mpi3mr_qcmd(struct Scsi_Host *shost,
4890 	struct scsi_cmnd *scmd)
4891 {
4892 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4893 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4894 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4895 	struct scmd_priv *scmd_priv_data = NULL;
4896 	struct mpi3_scsi_io_request *scsiio_req = NULL;
4897 	struct op_req_qinfo *op_req_q = NULL;
4898 	int retval = 0;
4899 	u16 dev_handle;
4900 	u16 host_tag;
4901 	u32 scsiio_flags = 0, data_len_blks = 0;
4902 	struct request *rq = scsi_cmd_to_rq(scmd);
4903 	int iprio_class;
4904 	u8 is_pcie_dev = 0;
4905 	u32 tracked_io_sz = 0;
4906 	u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
4907 	struct mpi3mr_throttle_group_info *tg = NULL;
4908 
4909 	if (mrioc->unrecoverable) {
4910 		scmd->result = DID_ERROR << 16;
4911 		scsi_done(scmd);
4912 		goto out;
4913 	}
4914 
4915 	sdev_priv_data = scmd->device->hostdata;
4916 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4917 		scmd->result = DID_NO_CONNECT << 16;
4918 		scsi_done(scmd);
4919 		goto out;
4920 	}
4921 
4922 	if (mrioc->stop_drv_processing &&
4923 	    !(mpi3mr_allow_scmd_to_fw(scmd))) {
4924 		scmd->result = DID_NO_CONNECT << 16;
4925 		scsi_done(scmd);
4926 		goto out;
4927 	}
4928 
4929 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
4930 	dev_handle = stgt_priv_data->dev_handle;
4931 
4932 	/* Avoid error handling escalation when device is removed or blocked */
4933 
4934 	if (scmd->device->host->shost_state == SHOST_RECOVERY &&
4935 		scmd->cmnd[0] == TEST_UNIT_READY &&
4936 		(stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) {
4937 		scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
4938 		scsi_done(scmd);
4939 		goto out;
4940 	}
4941 
4942 	if (mrioc->reset_in_progress || mrioc->prepare_for_reset
4943 	    || mrioc->block_on_pci_err) {
4944 		retval = SCSI_MLQUEUE_HOST_BUSY;
4945 		goto out;
4946 	}
4947 
4948 	if (atomic_read(&stgt_priv_data->block_io)) {
4949 		if (mrioc->stop_drv_processing) {
4950 			scmd->result = DID_NO_CONNECT << 16;
4951 			scsi_done(scmd);
4952 			goto out;
4953 		}
4954 		retval = SCSI_MLQUEUE_DEVICE_BUSY;
4955 		goto out;
4956 	}
4957 
4958 	if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
4959 		scmd->result = DID_NO_CONNECT << 16;
4960 		scsi_done(scmd);
4961 		goto out;
4962 	}
4963 	if (stgt_priv_data->dev_removed) {
4964 		scmd->result = DID_NO_CONNECT << 16;
4965 		scsi_done(scmd);
4966 		goto out;
4967 	}
4968 
4969 	if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
4970 		is_pcie_dev = 1;
4971 	if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
4972 	    (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
4973 	    mpi3mr_check_return_unmap(mrioc, scmd))
4974 		goto out;
4975 
4976 	host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
4977 	if (host_tag == MPI3MR_HOSTTAG_INVALID) {
4978 		scmd->result = DID_ERROR << 16;
4979 		scsi_done(scmd);
4980 		goto out;
4981 	}
4982 
4983 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4984 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
4985 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4986 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
4987 	else
4988 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
4989 
4990 	scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
4991 
4992 	if (sdev_priv_data->ncq_prio_enable) {
4993 		iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4994 		if (iprio_class == IOPRIO_CLASS_RT)
4995 			scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
4996 	}
4997 
4998 	if (scmd->cmd_len > 16)
4999 		scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
5000 
5001 	scmd_priv_data = scsi_cmd_priv(scmd);
5002 	memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
5003 	scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
5004 	scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
5005 	scsiio_req->host_tag = cpu_to_le16(host_tag);
5006 
5007 	mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
5008 
5009 	if (stgt_priv_data->wslen)
5010 		mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags,
5011 		    stgt_priv_data->wslen);
5012 
5013 	memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
5014 	scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
5015 	scsiio_req->dev_handle = cpu_to_le16(dev_handle);
5016 	scsiio_req->flags = cpu_to_le32(scsiio_flags);
5017 	int_to_scsilun(sdev_priv_data->lun_id,
5018 	    (struct scsi_lun *)scsiio_req->lun);
5019 
5020 	if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
5021 		mpi3mr_clear_scmd_priv(mrioc, scmd);
5022 		retval = SCSI_MLQUEUE_HOST_BUSY;
5023 		goto out;
5024 	}
5025 	op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
5026 	data_len_blks = scsi_bufflen(scmd) >> 9;
5027 	if ((data_len_blks >= mrioc->io_throttle_data_length) &&
5028 	    stgt_priv_data->io_throttle_enabled) {
5029 		tracked_io_sz = data_len_blks;
5030 		tg = stgt_priv_data->throttle_group;
5031 		if (tg) {
5032 			ioc_pend_data_len = atomic_add_return(data_len_blks,
5033 			    &mrioc->pend_large_data_sz);
5034 			tg_pend_data_len = atomic_add_return(data_len_blks,
5035 			    &tg->pend_large_data_sz);
5036 			if (!tg->io_divert  && ((ioc_pend_data_len >=
5037 			    mrioc->io_throttle_high) ||
5038 			    (tg_pend_data_len >= tg->high))) {
5039 				tg->io_divert = 1;
5040 				tg->need_qd_reduction = 1;
5041 				mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
5042 				    tg, 1);
5043 				mpi3mr_queue_qd_reduction_event(mrioc, tg);
5044 			}
5045 		} else {
5046 			ioc_pend_data_len = atomic_add_return(data_len_blks,
5047 			    &mrioc->pend_large_data_sz);
5048 			if (ioc_pend_data_len >= mrioc->io_throttle_high)
5049 				stgt_priv_data->io_divert = 1;
5050 		}
5051 	}
5052 
5053 	if (stgt_priv_data->io_divert) {
5054 		scsiio_req->msg_flags |=
5055 		    MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
5056 		scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
5057 	}
5058 	scsiio_req->flags |= cpu_to_le32(scsiio_flags);
5059 
5060 	if (mpi3mr_op_request_post(mrioc, op_req_q,
5061 	    scmd_priv_data->mpi3mr_scsiio_req)) {
5062 		mpi3mr_clear_scmd_priv(mrioc, scmd);
5063 		retval = SCSI_MLQUEUE_HOST_BUSY;
5064 		if (tracked_io_sz) {
5065 			atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
5066 			if (tg)
5067 				atomic_sub(tracked_io_sz,
5068 				    &tg->pend_large_data_sz);
5069 		}
5070 		goto out;
5071 	}
5072 
5073 out:
5074 	return retval;
5075 }
5076 
5077 static const struct scsi_host_template mpi3mr_driver_template = {
5078 	.module				= THIS_MODULE,
5079 	.name				= "MPI3 Storage Controller",
5080 	.proc_name			= MPI3MR_DRIVER_NAME,
5081 	.queuecommand			= mpi3mr_qcmd,
5082 	.target_alloc			= mpi3mr_target_alloc,
5083 	.slave_alloc			= mpi3mr_slave_alloc,
5084 	.device_configure		= mpi3mr_device_configure,
5085 	.target_destroy			= mpi3mr_target_destroy,
5086 	.slave_destroy			= mpi3mr_slave_destroy,
5087 	.scan_finished			= mpi3mr_scan_finished,
5088 	.scan_start			= mpi3mr_scan_start,
5089 	.change_queue_depth		= mpi3mr_change_queue_depth,
5090 	.eh_device_reset_handler	= mpi3mr_eh_dev_reset,
5091 	.eh_target_reset_handler	= mpi3mr_eh_target_reset,
5092 	.eh_bus_reset_handler		= mpi3mr_eh_bus_reset,
5093 	.eh_host_reset_handler		= mpi3mr_eh_host_reset,
5094 	.bios_param			= mpi3mr_bios_param,
5095 	.map_queues			= mpi3mr_map_queues,
5096 	.mq_poll                        = mpi3mr_blk_mq_poll,
5097 	.no_write_same			= 1,
5098 	.can_queue			= 1,
5099 	.this_id			= -1,
5100 	.sg_tablesize			= MPI3MR_DEFAULT_SGL_ENTRIES,
5101 	/* max xfer supported is 1M (2K in 512 byte sized sectors)
5102 	 */
5103 	.max_sectors			= (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
5104 	.cmd_per_lun			= MPI3MR_MAX_CMDS_LUN,
5105 	.max_segment_size		= 0xffffffff,
5106 	.track_queue_depth		= 1,
5107 	.cmd_size			= sizeof(struct scmd_priv),
5108 	.shost_groups			= mpi3mr_host_groups,
5109 	.sdev_groups			= mpi3mr_dev_groups,
5110 };
5111 
5112 /**
5113  * mpi3mr_init_drv_cmd - Initialize internal command tracker
5114  * @cmdptr: Internal command tracker
5115  * @host_tag: Host tag used for the specific command
5116  *
5117  * Initialize the internal command tracker structure with
5118  * specified host tag.
5119  *
5120  * Return: Nothing.
5121  */
mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd * cmdptr,u16 host_tag)5122 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
5123 	u16 host_tag)
5124 {
5125 	mutex_init(&cmdptr->mutex);
5126 	cmdptr->reply = NULL;
5127 	cmdptr->state = MPI3MR_CMD_NOTUSED;
5128 	cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
5129 	cmdptr->host_tag = host_tag;
5130 }
5131 
5132 /**
5133  * osintfc_mrioc_security_status -Check controller secure status
5134  * @pdev: PCI device instance
5135  *
5136  * Read the Device Serial Number capability from PCI config
5137  * space and decide whether the controller is secure or not.
5138  *
5139  * Return: 0 on success, non-zero on failure.
5140  */
5141 static int
osintfc_mrioc_security_status(struct pci_dev * pdev)5142 osintfc_mrioc_security_status(struct pci_dev *pdev)
5143 {
5144 	u32 cap_data;
5145 	int base;
5146 	u32 ctlr_status;
5147 	u32 debug_status;
5148 	int retval = 0;
5149 
5150 	base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
5151 	if (!base) {
5152 		dev_err(&pdev->dev,
5153 		    "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
5154 		return -1;
5155 	}
5156 
5157 	pci_read_config_dword(pdev, base + 4, &cap_data);
5158 
5159 	debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
5160 	ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
5161 
5162 	switch (ctlr_status) {
5163 	case MPI3MR_INVALID_DEVICE:
5164 		dev_err(&pdev->dev,
5165 		    "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
5166 		    __func__, pdev->device, pdev->subsystem_vendor,
5167 		    pdev->subsystem_device);
5168 		retval = -1;
5169 		break;
5170 	case MPI3MR_CONFIG_SECURE_DEVICE:
5171 		if (!debug_status)
5172 			dev_info(&pdev->dev,
5173 			    "%s: Config secure ctlr is detected\n",
5174 			    __func__);
5175 		break;
5176 	case MPI3MR_HARD_SECURE_DEVICE:
5177 		break;
5178 	case MPI3MR_TAMPERED_DEVICE:
5179 		dev_err(&pdev->dev,
5180 		    "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
5181 		    __func__, pdev->device, pdev->subsystem_vendor,
5182 		    pdev->subsystem_device);
5183 		retval = -1;
5184 		break;
5185 	default:
5186 		retval = -1;
5187 			break;
5188 	}
5189 
5190 	if (!retval && debug_status) {
5191 		dev_err(&pdev->dev,
5192 		    "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
5193 		    __func__, pdev->device, pdev->subsystem_vendor,
5194 		    pdev->subsystem_device);
5195 		retval = -1;
5196 	}
5197 
5198 	return retval;
5199 }
5200 
5201 /**
5202  * mpi3mr_probe - PCI probe callback
5203  * @pdev: PCI device instance
5204  * @id: PCI device ID details
5205  *
5206  * controller initialization routine. Checks the security status
5207  * of the controller and if it is invalid or tampered return the
5208  * probe without initializing the controller. Otherwise,
5209  * allocate per adapter instance through shost_priv and
5210  * initialize controller specific data structures, initializae
5211  * the controller hardware, add shost to the SCSI subsystem.
5212  *
5213  * Return: 0 on success, non-zero on failure.
5214  */
5215 
5216 static int
mpi3mr_probe(struct pci_dev * pdev,const struct pci_device_id * id)5217 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5218 {
5219 	struct mpi3mr_ioc *mrioc = NULL;
5220 	struct Scsi_Host *shost = NULL;
5221 	int retval = 0, i;
5222 
5223 	if (osintfc_mrioc_security_status(pdev)) {
5224 		warn_non_secure_ctlr = 1;
5225 		return 1; /* For Invalid and Tampered device */
5226 	}
5227 
5228 	shost = scsi_host_alloc(&mpi3mr_driver_template,
5229 	    sizeof(struct mpi3mr_ioc));
5230 	if (!shost) {
5231 		retval = -ENODEV;
5232 		goto shost_failed;
5233 	}
5234 
5235 	mrioc = shost_priv(shost);
5236 	retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL);
5237 	if (retval < 0)
5238 		goto id_alloc_failed;
5239 	mrioc->id = (u8)retval;
5240 	sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
5241 	sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
5242 	INIT_LIST_HEAD(&mrioc->list);
5243 	spin_lock(&mrioc_list_lock);
5244 	list_add_tail(&mrioc->list, &mrioc_list);
5245 	spin_unlock(&mrioc_list_lock);
5246 
5247 	spin_lock_init(&mrioc->admin_req_lock);
5248 	spin_lock_init(&mrioc->reply_free_queue_lock);
5249 	spin_lock_init(&mrioc->sbq_lock);
5250 	spin_lock_init(&mrioc->fwevt_lock);
5251 	spin_lock_init(&mrioc->tgtdev_lock);
5252 	spin_lock_init(&mrioc->watchdog_lock);
5253 	spin_lock_init(&mrioc->chain_buf_lock);
5254 	spin_lock_init(&mrioc->adm_req_q_bar_writeq_lock);
5255 	spin_lock_init(&mrioc->adm_reply_q_bar_writeq_lock);
5256 	spin_lock_init(&mrioc->sas_node_lock);
5257 	spin_lock_init(&mrioc->trigger_lock);
5258 
5259 	INIT_LIST_HEAD(&mrioc->fwevt_list);
5260 	INIT_LIST_HEAD(&mrioc->tgtdev_list);
5261 	INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
5262 	INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
5263 	INIT_LIST_HEAD(&mrioc->sas_expander_list);
5264 	INIT_LIST_HEAD(&mrioc->hba_port_table_list);
5265 	INIT_LIST_HEAD(&mrioc->enclosure_list);
5266 
5267 	mutex_init(&mrioc->reset_mutex);
5268 	mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
5269 	mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
5270 	mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
5271 	mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
5272 	mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
5273 	    MPI3MR_HOSTTAG_TRANSPORT_CMDS);
5274 
5275 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
5276 		mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
5277 		    MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
5278 
5279 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5280 		mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
5281 				    MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
5282 
5283 	if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
5284 		!pdev->revision)
5285 		mrioc->enable_segqueue = false;
5286 	else
5287 		mrioc->enable_segqueue = true;
5288 
5289 	init_waitqueue_head(&mrioc->reset_waitq);
5290 	mrioc->logging_level = logging_level;
5291 	mrioc->shost = shost;
5292 	mrioc->pdev = pdev;
5293 	mrioc->stop_bsgs = 1;
5294 
5295 	mrioc->max_sgl_entries = max_sgl_entries;
5296 	if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
5297 		mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
5298 	else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
5299 		mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
5300 	else {
5301 		mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
5302 		mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
5303 	}
5304 
5305 	/* init shost parameters */
5306 	shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
5307 	shost->max_lun = -1;
5308 	shost->unique_id = mrioc->id;
5309 
5310 	shost->max_channel = 0;
5311 	shost->max_id = 0xFFFFFFFF;
5312 
5313 	shost->host_tagset = 1;
5314 
5315 	if (prot_mask >= 0)
5316 		scsi_host_set_prot(shost, prot_mask);
5317 	else {
5318 		prot_mask = SHOST_DIF_TYPE1_PROTECTION
5319 		    | SHOST_DIF_TYPE2_PROTECTION
5320 		    | SHOST_DIF_TYPE3_PROTECTION;
5321 		scsi_host_set_prot(shost, prot_mask);
5322 	}
5323 
5324 	ioc_info(mrioc,
5325 	    "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
5326 	    __func__,
5327 	    (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5328 	    (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5329 	    (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5330 	    (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5331 	    (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5332 	    (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5333 	    (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5334 
5335 	if (prot_guard_mask)
5336 		scsi_host_set_guard(shost, (prot_guard_mask & 3));
5337 	else
5338 		scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
5339 
5340 	mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
5341 		"%s%d_fwevt_wrkr", 0, mrioc->driver_name, mrioc->id);
5342 	if (!mrioc->fwevt_worker_thread) {
5343 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
5344 		    __FILE__, __LINE__, __func__);
5345 		retval = -ENODEV;
5346 		goto fwevtthread_failed;
5347 	}
5348 
5349 	mrioc->is_driver_loading = 1;
5350 	mrioc->cpu_count = num_online_cpus();
5351 	if (mpi3mr_setup_resources(mrioc)) {
5352 		ioc_err(mrioc, "setup resources failed\n");
5353 		retval = -ENODEV;
5354 		goto resource_alloc_failed;
5355 	}
5356 	if (mpi3mr_init_ioc(mrioc)) {
5357 		ioc_err(mrioc, "initializing IOC failed\n");
5358 		retval = -ENODEV;
5359 		goto init_ioc_failed;
5360 	}
5361 
5362 	shost->nr_hw_queues = mrioc->num_op_reply_q;
5363 	if (mrioc->active_poll_qcount)
5364 		shost->nr_maps = 3;
5365 
5366 	shost->can_queue = mrioc->max_host_ios;
5367 	shost->sg_tablesize = mrioc->max_sgl_entries;
5368 	shost->max_id = mrioc->facts.max_perids + 1;
5369 
5370 	retval = scsi_add_host(shost, &pdev->dev);
5371 	if (retval) {
5372 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
5373 		    __FILE__, __LINE__, __func__);
5374 		goto addhost_failed;
5375 	}
5376 
5377 	scsi_scan_host(shost);
5378 	mpi3mr_bsg_init(mrioc);
5379 	return retval;
5380 
5381 addhost_failed:
5382 	mpi3mr_stop_watchdog(mrioc);
5383 	mpi3mr_cleanup_ioc(mrioc);
5384 init_ioc_failed:
5385 	mpi3mr_free_mem(mrioc);
5386 	mpi3mr_cleanup_resources(mrioc);
5387 resource_alloc_failed:
5388 	destroy_workqueue(mrioc->fwevt_worker_thread);
5389 fwevtthread_failed:
5390 	ida_free(&mrioc_ida, mrioc->id);
5391 	spin_lock(&mrioc_list_lock);
5392 	list_del(&mrioc->list);
5393 	spin_unlock(&mrioc_list_lock);
5394 id_alloc_failed:
5395 	scsi_host_put(shost);
5396 shost_failed:
5397 	return retval;
5398 }
5399 
5400 /**
5401  * mpi3mr_remove - PCI remove callback
5402  * @pdev: PCI device instance
5403  *
5404  * Cleanup the IOC by issuing MUR and shutdown notification.
5405  * Free up all memory and resources associated with the
5406  * controllerand target devices, unregister the shost.
5407  *
5408  * Return: Nothing.
5409  */
mpi3mr_remove(struct pci_dev * pdev)5410 static void mpi3mr_remove(struct pci_dev *pdev)
5411 {
5412 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5413 	struct mpi3mr_ioc *mrioc;
5414 	struct workqueue_struct	*wq;
5415 	unsigned long flags;
5416 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
5417 	struct mpi3mr_hba_port *port, *hba_port_next;
5418 	struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
5419 
5420 	if (!shost)
5421 		return;
5422 
5423 	mrioc = shost_priv(shost);
5424 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5425 		ssleep(1);
5426 
5427 	if (mrioc->block_on_pci_err) {
5428 		mrioc->block_on_pci_err = false;
5429 		scsi_unblock_requests(shost);
5430 		mrioc->unrecoverable = 1;
5431 	}
5432 
5433 	if (!pci_device_is_present(mrioc->pdev) ||
5434 	    mrioc->pci_err_recovery) {
5435 		mrioc->unrecoverable = 1;
5436 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5437 	}
5438 
5439 	mpi3mr_bsg_exit(mrioc);
5440 	mrioc->stop_drv_processing = 1;
5441 	mpi3mr_cleanup_fwevt_list(mrioc);
5442 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
5443 	wq = mrioc->fwevt_worker_thread;
5444 	mrioc->fwevt_worker_thread = NULL;
5445 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
5446 	if (wq)
5447 		destroy_workqueue(wq);
5448 
5449 	if (mrioc->sas_transport_enabled)
5450 		sas_remove_host(shost);
5451 	else
5452 		scsi_remove_host(shost);
5453 
5454 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
5455 	    list) {
5456 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
5457 		mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
5458 		mpi3mr_tgtdev_put(tgtdev);
5459 	}
5460 	mpi3mr_stop_watchdog(mrioc);
5461 	mpi3mr_cleanup_ioc(mrioc);
5462 	mpi3mr_free_mem(mrioc);
5463 	mpi3mr_cleanup_resources(mrioc);
5464 
5465 	spin_lock_irqsave(&mrioc->sas_node_lock, flags);
5466 	list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
5467 	    &mrioc->sas_expander_list, list) {
5468 		spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
5469 		mpi3mr_expander_node_remove(mrioc, sas_expander);
5470 		spin_lock_irqsave(&mrioc->sas_node_lock, flags);
5471 	}
5472 	list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
5473 		ioc_info(mrioc,
5474 		    "removing hba_port entry: %p port: %d from hba_port list\n",
5475 		    port, port->port_id);
5476 		list_del(&port->list);
5477 		kfree(port);
5478 	}
5479 	spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
5480 
5481 	if (mrioc->sas_hba.num_phys) {
5482 		kfree(mrioc->sas_hba.phy);
5483 		mrioc->sas_hba.phy = NULL;
5484 		mrioc->sas_hba.num_phys = 0;
5485 	}
5486 
5487 	ida_free(&mrioc_ida, mrioc->id);
5488 	spin_lock(&mrioc_list_lock);
5489 	list_del(&mrioc->list);
5490 	spin_unlock(&mrioc_list_lock);
5491 
5492 	scsi_host_put(shost);
5493 }
5494 
5495 /**
5496  * mpi3mr_shutdown - PCI shutdown callback
5497  * @pdev: PCI device instance
5498  *
5499  * Free up all memory and resources associated with the
5500  * controller
5501  *
5502  * Return: Nothing.
5503  */
mpi3mr_shutdown(struct pci_dev * pdev)5504 static void mpi3mr_shutdown(struct pci_dev *pdev)
5505 {
5506 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5507 	struct mpi3mr_ioc *mrioc;
5508 	struct workqueue_struct	*wq;
5509 	unsigned long flags;
5510 
5511 	if (!shost)
5512 		return;
5513 
5514 	mrioc = shost_priv(shost);
5515 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5516 		ssleep(1);
5517 
5518 	mrioc->stop_drv_processing = 1;
5519 	mpi3mr_cleanup_fwevt_list(mrioc);
5520 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
5521 	wq = mrioc->fwevt_worker_thread;
5522 	mrioc->fwevt_worker_thread = NULL;
5523 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
5524 	if (wq)
5525 		destroy_workqueue(wq);
5526 
5527 	mpi3mr_stop_watchdog(mrioc);
5528 	mpi3mr_cleanup_ioc(mrioc);
5529 	mpi3mr_cleanup_resources(mrioc);
5530 }
5531 
5532 /**
5533  * mpi3mr_suspend - PCI power management suspend callback
5534  * @dev: Device struct
5535  *
5536  * Change the power state to the given value and cleanup the IOC
5537  * by issuing MUR and shutdown notification
5538  *
5539  * Return: 0 always.
5540  */
5541 static int __maybe_unused
mpi3mr_suspend(struct device * dev)5542 mpi3mr_suspend(struct device *dev)
5543 {
5544 	struct pci_dev *pdev = to_pci_dev(dev);
5545 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5546 	struct mpi3mr_ioc *mrioc;
5547 
5548 	if (!shost)
5549 		return 0;
5550 
5551 	mrioc = shost_priv(shost);
5552 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5553 		ssleep(1);
5554 	mrioc->stop_drv_processing = 1;
5555 	mpi3mr_cleanup_fwevt_list(mrioc);
5556 	scsi_block_requests(shost);
5557 	mpi3mr_stop_watchdog(mrioc);
5558 	mpi3mr_cleanup_ioc(mrioc);
5559 
5560 	ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
5561 	    pdev, pci_name(pdev));
5562 	mpi3mr_cleanup_resources(mrioc);
5563 
5564 	return 0;
5565 }
5566 
5567 /**
5568  * mpi3mr_resume - PCI power management resume callback
5569  * @dev: Device struct
5570  *
5571  * Restore the power state to D0 and reinitialize the controller
5572  * and resume I/O operations to the target devices
5573  *
5574  * Return: 0 on success, non-zero on failure
5575  */
5576 static int __maybe_unused
mpi3mr_resume(struct device * dev)5577 mpi3mr_resume(struct device *dev)
5578 {
5579 	struct pci_dev *pdev = to_pci_dev(dev);
5580 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5581 	struct mpi3mr_ioc *mrioc;
5582 	pci_power_t device_state = pdev->current_state;
5583 	int r;
5584 
5585 	if (!shost)
5586 		return 0;
5587 
5588 	mrioc = shost_priv(shost);
5589 
5590 	ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
5591 	    pdev, pci_name(pdev), device_state);
5592 	mrioc->pdev = pdev;
5593 	mrioc->cpu_count = num_online_cpus();
5594 	r = mpi3mr_setup_resources(mrioc);
5595 	if (r) {
5596 		ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
5597 		    __func__, r);
5598 		return r;
5599 	}
5600 
5601 	mrioc->stop_drv_processing = 0;
5602 	mpi3mr_invalidate_devhandles(mrioc);
5603 	mpi3mr_free_enclosure_list(mrioc);
5604 	mpi3mr_memset_buffers(mrioc);
5605 	r = mpi3mr_reinit_ioc(mrioc, 1);
5606 	if (r) {
5607 		ioc_err(mrioc, "resuming controller failed[%d]\n", r);
5608 		return r;
5609 	}
5610 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5611 	scsi_unblock_requests(shost);
5612 	mrioc->device_refresh_on = 0;
5613 	mpi3mr_start_watchdog(mrioc);
5614 
5615 	return 0;
5616 }
5617 
5618 /**
5619  * mpi3mr_pcierr_error_detected - PCI error detected callback
5620  * @pdev: PCI device instance
5621  * @state: channel state
5622  *
5623  * This function is called by the PCI error recovery driver and
5624  * based on the state passed the driver decides what actions to
5625  * be recommended back to PCI driver.
5626  *
5627  * For all of the states if there is no valid mrioc or scsi host
5628  * references in the PCI device then this function will return
5629  * the result as disconnect.
5630  *
5631  * For normal state, this function will return the result as can
5632  * recover.
5633  *
5634  * For frozen state, this function will block for any pending
5635  * controller initialization or re-initialization to complete,
5636  * stop any new interactions with the controller and return
5637  * status as reset required.
5638  *
5639  * For permanent failure state, this function will mark the
5640  * controller as unrecoverable and return status as disconnect.
5641  *
5642  * Returns: PCI_ERS_RESULT_NEED_RESET or CAN_RECOVER or
5643  * DISCONNECT based on the controller state.
5644  */
5645 static pci_ers_result_t
mpi3mr_pcierr_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5646 mpi3mr_pcierr_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5647 {
5648 	struct Scsi_Host *shost;
5649 	struct mpi3mr_ioc *mrioc;
5650 	unsigned int timeout = MPI3MR_RESET_TIMEOUT;
5651 
5652 	dev_info(&pdev->dev, "%s: callback invoked state(%d)\n", __func__,
5653 	    state);
5654 
5655 	shost = pci_get_drvdata(pdev);
5656 	mrioc = shost_priv(shost);
5657 
5658 	switch (state) {
5659 	case pci_channel_io_normal:
5660 		return PCI_ERS_RESULT_CAN_RECOVER;
5661 	case pci_channel_io_frozen:
5662 		mrioc->pci_err_recovery = true;
5663 		mrioc->block_on_pci_err = true;
5664 		do {
5665 			if (mrioc->reset_in_progress || mrioc->is_driver_loading)
5666 				ssleep(1);
5667 			else
5668 				break;
5669 		} while (--timeout);
5670 
5671 		if (!timeout) {
5672 			mrioc->pci_err_recovery = true;
5673 			mrioc->block_on_pci_err = true;
5674 			mrioc->unrecoverable = 1;
5675 			mpi3mr_stop_watchdog(mrioc);
5676 			mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5677 			return PCI_ERS_RESULT_DISCONNECT;
5678 		}
5679 
5680 		scsi_block_requests(mrioc->shost);
5681 		mpi3mr_stop_watchdog(mrioc);
5682 		mpi3mr_cleanup_resources(mrioc);
5683 		return PCI_ERS_RESULT_NEED_RESET;
5684 	case pci_channel_io_perm_failure:
5685 		mrioc->pci_err_recovery = true;
5686 		mrioc->block_on_pci_err = true;
5687 		mrioc->unrecoverable = 1;
5688 		mpi3mr_stop_watchdog(mrioc);
5689 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5690 		return PCI_ERS_RESULT_DISCONNECT;
5691 	default:
5692 		return PCI_ERS_RESULT_DISCONNECT;
5693 	}
5694 }
5695 
5696 /**
5697  * mpi3mr_pcierr_slot_reset - Post slot reset callback
5698  * @pdev: PCI device instance
5699  *
5700  * This function is called by the PCI error recovery driver
5701  * after a slot or link reset issued by it for the recovery, the
5702  * driver is expected to bring back the controller and
5703  * initialize it.
5704  *
5705  * This function restores PCI state and reinitializes controller
5706  * resources and the controller, this blocks for any pending
5707  * reset to complete.
5708  *
5709  * Returns: PCI_ERS_RESULT_DISCONNECT on failure or
5710  * PCI_ERS_RESULT_RECOVERED
5711  */
mpi3mr_pcierr_slot_reset(struct pci_dev * pdev)5712 static pci_ers_result_t mpi3mr_pcierr_slot_reset(struct pci_dev *pdev)
5713 {
5714 	struct Scsi_Host *shost;
5715 	struct mpi3mr_ioc *mrioc;
5716 	unsigned int timeout = MPI3MR_RESET_TIMEOUT;
5717 
5718 	dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
5719 
5720 	shost = pci_get_drvdata(pdev);
5721 	mrioc = shost_priv(shost);
5722 
5723 	do {
5724 		if (mrioc->reset_in_progress)
5725 			ssleep(1);
5726 		else
5727 			break;
5728 	} while (--timeout);
5729 
5730 	if (!timeout)
5731 		goto out_failed;
5732 
5733 	pci_restore_state(pdev);
5734 
5735 	if (mpi3mr_setup_resources(mrioc)) {
5736 		ioc_err(mrioc, "setup resources failed\n");
5737 		goto out_failed;
5738 	}
5739 	mrioc->unrecoverable = 0;
5740 	mrioc->pci_err_recovery = false;
5741 
5742 	if (mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0))
5743 		goto out_failed;
5744 
5745 	return PCI_ERS_RESULT_RECOVERED;
5746 
5747 out_failed:
5748 	mrioc->unrecoverable = 1;
5749 	mrioc->block_on_pci_err = false;
5750 	scsi_unblock_requests(shost);
5751 	mpi3mr_start_watchdog(mrioc);
5752 	return PCI_ERS_RESULT_DISCONNECT;
5753 }
5754 
5755 /**
5756  * mpi3mr_pcierr_resume - PCI error recovery resume
5757  * callback
5758  * @pdev: PCI device instance
5759  *
5760  * This function enables all I/O and IOCTLs post reset issued as
5761  * part of the PCI error recovery
5762  *
5763  * Return: Nothing.
5764  */
mpi3mr_pcierr_resume(struct pci_dev * pdev)5765 static void mpi3mr_pcierr_resume(struct pci_dev *pdev)
5766 {
5767 	struct Scsi_Host *shost;
5768 	struct mpi3mr_ioc *mrioc;
5769 
5770 	dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
5771 
5772 	shost = pci_get_drvdata(pdev);
5773 	mrioc = shost_priv(shost);
5774 
5775 	if (mrioc->block_on_pci_err) {
5776 		mrioc->block_on_pci_err = false;
5777 		scsi_unblock_requests(shost);
5778 		mpi3mr_start_watchdog(mrioc);
5779 	}
5780 }
5781 
5782 /**
5783  * mpi3mr_pcierr_mmio_enabled - PCI error recovery callback
5784  * @pdev: PCI device instance
5785  *
5786  * This is called only if mpi3mr_pcierr_error_detected returns
5787  * PCI_ERS_RESULT_CAN_RECOVER.
5788  *
5789  * Return: PCI_ERS_RESULT_DISCONNECT when the controller is
5790  * unrecoverable or when the shost/mrioc reference cannot be
5791  * found, else return PCI_ERS_RESULT_RECOVERED
5792  */
mpi3mr_pcierr_mmio_enabled(struct pci_dev * pdev)5793 static pci_ers_result_t mpi3mr_pcierr_mmio_enabled(struct pci_dev *pdev)
5794 {
5795 	struct Scsi_Host *shost;
5796 	struct mpi3mr_ioc *mrioc;
5797 
5798 	dev_info(&pdev->dev, "%s: callback invoked\n", __func__);
5799 
5800 	shost = pci_get_drvdata(pdev);
5801 	mrioc = shost_priv(shost);
5802 
5803 	if (mrioc->unrecoverable)
5804 		return PCI_ERS_RESULT_DISCONNECT;
5805 
5806 	return PCI_ERS_RESULT_RECOVERED;
5807 }
5808 
5809 static const struct pci_device_id mpi3mr_pci_id_table[] = {
5810 	{
5811 		PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5812 		    MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
5813 	},
5814 	{
5815 		PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5816 		    MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID)
5817 	},
5818 	{
5819 		PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5820 		    MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID)
5821 	},
5822 	{ 0 }
5823 };
5824 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
5825 
5826 static struct pci_error_handlers mpi3mr_err_handler = {
5827 	.error_detected = mpi3mr_pcierr_error_detected,
5828 	.mmio_enabled = mpi3mr_pcierr_mmio_enabled,
5829 	.slot_reset = mpi3mr_pcierr_slot_reset,
5830 	.resume = mpi3mr_pcierr_resume,
5831 };
5832 
5833 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
5834 
5835 static struct pci_driver mpi3mr_pci_driver = {
5836 	.name = MPI3MR_DRIVER_NAME,
5837 	.id_table = mpi3mr_pci_id_table,
5838 	.probe = mpi3mr_probe,
5839 	.remove = mpi3mr_remove,
5840 	.shutdown = mpi3mr_shutdown,
5841 	.err_handler = &mpi3mr_err_handler,
5842 	.driver.pm = &mpi3mr_pm_ops,
5843 };
5844 
event_counter_show(struct device_driver * dd,char * buf)5845 static ssize_t event_counter_show(struct device_driver *dd, char *buf)
5846 {
5847 	return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
5848 }
5849 static DRIVER_ATTR_RO(event_counter);
5850 
mpi3mr_init(void)5851 static int __init mpi3mr_init(void)
5852 {
5853 	int ret_val;
5854 
5855 	pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
5856 	    MPI3MR_DRIVER_VERSION);
5857 
5858 	mpi3mr_transport_template =
5859 	    sas_attach_transport(&mpi3mr_transport_functions);
5860 	if (!mpi3mr_transport_template) {
5861 		pr_err("%s failed to load due to sas transport attach failure\n",
5862 		    MPI3MR_DRIVER_NAME);
5863 		return -ENODEV;
5864 	}
5865 
5866 	ret_val = pci_register_driver(&mpi3mr_pci_driver);
5867 	if (ret_val) {
5868 		pr_err("%s failed to load due to pci register driver failure\n",
5869 		    MPI3MR_DRIVER_NAME);
5870 		goto err_pci_reg_fail;
5871 	}
5872 
5873 	ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
5874 				     &driver_attr_event_counter);
5875 	if (ret_val)
5876 		goto err_event_counter;
5877 
5878 	return ret_val;
5879 
5880 err_event_counter:
5881 	pci_unregister_driver(&mpi3mr_pci_driver);
5882 
5883 err_pci_reg_fail:
5884 	sas_release_transport(mpi3mr_transport_template);
5885 	return ret_val;
5886 }
5887 
mpi3mr_exit(void)5888 static void __exit mpi3mr_exit(void)
5889 {
5890 	if (warn_non_secure_ctlr)
5891 		pr_warn(
5892 		    "Unloading %s version %s while managing a non secure controller\n",
5893 		    MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
5894 	else
5895 		pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
5896 		    MPI3MR_DRIVER_VERSION);
5897 
5898 	driver_remove_file(&mpi3mr_pci_driver.driver,
5899 			   &driver_attr_event_counter);
5900 	pci_unregister_driver(&mpi3mr_pci_driver);
5901 	sas_release_transport(mpi3mr_transport_template);
5902 	ida_destroy(&mrioc_ida);
5903 }
5904 
5905 module_init(mpi3mr_init);
5906 module_exit(mpi3mr_exit);
5907