• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 #include <linux/blk-mq-pci.h>
41 
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_dbg.h>
48 #include "megaraid_sas_fusion.h"
49 #include "megaraid_sas.h"
50 
51 /*
52  * Number of sectors per IO command
53  * Will be set in megasas_init_mfi if user does not provide
54  */
55 static unsigned int max_sectors;
56 module_param_named(max_sectors, max_sectors, int, 0444);
57 MODULE_PARM_DESC(max_sectors,
58 	"Maximum number of sectors per IO command");
59 
60 static int msix_disable;
61 module_param(msix_disable, int, 0444);
62 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
63 
64 static unsigned int msix_vectors;
65 module_param(msix_vectors, int, 0444);
66 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
67 
68 static int allow_vf_ioctls;
69 module_param(allow_vf_ioctls, int, 0444);
70 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
71 
72 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
73 module_param(throttlequeuedepth, int, 0444);
74 MODULE_PARM_DESC(throttlequeuedepth,
75 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
76 
77 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
78 module_param(resetwaittime, int, 0444);
79 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
80 
81 static int smp_affinity_enable = 1;
82 module_param(smp_affinity_enable, int, 0444);
83 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
84 
85 static int rdpq_enable = 1;
86 module_param(rdpq_enable, int, 0444);
87 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
88 
89 unsigned int dual_qdepth_disable;
90 module_param(dual_qdepth_disable, int, 0444);
91 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
92 
93 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
94 module_param(scmd_timeout, int, 0444);
95 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
96 
97 int perf_mode = -1;
98 module_param(perf_mode, int, 0444);
99 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
100 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
101 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
102 		"1 - iops: High iops queues are not allocated &\n\t\t"
103 		"interrupt coalescing is enabled on all queues\n\t\t"
104 		"2 - latency: High iops queues are not allocated &\n\t\t"
105 		"interrupt coalescing is disabled on all queues\n\t\t"
106 		"default mode is 'balanced'"
107 		);
108 
109 int event_log_level = MFI_EVT_CLASS_CRITICAL;
110 module_param(event_log_level, int, 0644);
111 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
112 
113 unsigned int enable_sdev_max_qd;
114 module_param(enable_sdev_max_qd, int, 0444);
115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
116 
117 int host_tagset_enable = 1;
118 module_param(host_tagset_enable, int, 0444);
119 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
120 
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(MEGASAS_VERSION);
123 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
124 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
125 
126 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
127 static int megasas_get_pd_list(struct megasas_instance *instance);
128 static int megasas_ld_list_query(struct megasas_instance *instance,
129 				 u8 query_type);
130 static int megasas_issue_init_mfi(struct megasas_instance *instance);
131 static int megasas_register_aen(struct megasas_instance *instance,
132 				u32 seq_num, u32 class_locale_word);
133 static void megasas_get_pd_info(struct megasas_instance *instance,
134 				struct scsi_device *sdev);
135 static void
136 megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
137 
138 /*
139  * PCI ID table for all supported controllers
140  */
141 static struct pci_device_id megasas_pci_table[] = {
142 
143 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
144 	/* xscale IOP */
145 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
146 	/* ppc IOP */
147 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
148 	/* ppc IOP */
149 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
150 	/* gen2*/
151 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
152 	/* gen2*/
153 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
154 	/* skinny*/
155 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
156 	/* skinny*/
157 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
158 	/* xscale IOP, vega */
159 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
160 	/* xscale IOP */
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
162 	/* Fusion */
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
164 	/* Plasma */
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
166 	/* Invader */
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
168 	/* Fury */
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
170 	/* Intruder */
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
172 	/* Intruder 24 port*/
173 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
175 	/* VENTURA */
176 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
179 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
180 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
181 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
182 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
183 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
184 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
185 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
186 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
187 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
188 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
189 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
190 	{}
191 };
192 
193 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
194 
195 static int megasas_mgmt_majorno;
196 struct megasas_mgmt_info megasas_mgmt_info;
197 static struct fasync_struct *megasas_async_queue;
198 static DEFINE_MUTEX(megasas_async_queue_mutex);
199 
200 static int megasas_poll_wait_aen;
201 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
202 static u32 support_poll_for_event;
203 u32 megasas_dbg_lvl;
204 static u32 support_device_change;
205 static bool support_nvme_encapsulation;
206 static bool support_pci_lane_margining;
207 
208 /* define lock for aen poll */
209 static spinlock_t poll_aen_lock;
210 
211 extern struct dentry *megasas_debugfs_root;
212 
213 void
214 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
215 		     u8 alt_status);
216 static u32
217 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
218 static int
219 megasas_adp_reset_gen2(struct megasas_instance *instance,
220 		       struct megasas_register_set __iomem *reg_set);
221 static irqreturn_t megasas_isr(int irq, void *devp);
222 static u32
223 megasas_init_adapter_mfi(struct megasas_instance *instance);
224 u32
225 megasas_build_and_issue_cmd(struct megasas_instance *instance,
226 			    struct scsi_cmnd *scmd);
227 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
228 int
229 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
230 	int seconds);
231 void megasas_fusion_ocr_wq(struct work_struct *work);
232 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
233 					 int initial);
234 static int
235 megasas_set_dma_mask(struct megasas_instance *instance);
236 static int
237 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
238 static inline void
239 megasas_free_ctrl_mem(struct megasas_instance *instance);
240 static inline int
241 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
242 static inline void
243 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
244 static inline void
245 megasas_init_ctrl_params(struct megasas_instance *instance);
246 
megasas_readl(struct megasas_instance * instance,const volatile void __iomem * addr)247 u32 megasas_readl(struct megasas_instance *instance,
248 		  const volatile void __iomem *addr)
249 {
250 	u32 i = 0, ret_val;
251 	/*
252 	 * Due to a HW errata in Aero controllers, reads to certain
253 	 * Fusion registers could intermittently return all zeroes.
254 	 * This behavior is transient in nature and subsequent reads will
255 	 * return valid value. As a workaround in driver, retry readl for
256 	 * upto three times until a non-zero value is read.
257 	 */
258 	if (instance->adapter_type == AERO_SERIES) {
259 		do {
260 			ret_val = readl(addr);
261 			i++;
262 		} while (ret_val == 0 && i < 3);
263 		return ret_val;
264 	} else {
265 		return readl(addr);
266 	}
267 }
268 
269 /**
270  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
271  * @instance:			Adapter soft state
272  * @dcmd:			DCMD frame inside MFI command
273  * @dma_addr:			DMA address of buffer to be passed to FW
274  * @dma_len:			Length of DMA buffer to be passed to FW
275  * @return:			void
276  */
megasas_set_dma_settings(struct megasas_instance * instance,struct megasas_dcmd_frame * dcmd,dma_addr_t dma_addr,u32 dma_len)277 void megasas_set_dma_settings(struct megasas_instance *instance,
278 			      struct megasas_dcmd_frame *dcmd,
279 			      dma_addr_t dma_addr, u32 dma_len)
280 {
281 	if (instance->consistent_mask_64bit) {
282 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
283 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
284 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
285 
286 	} else {
287 		dcmd->sgl.sge32[0].phys_addr =
288 				cpu_to_le32(lower_32_bits(dma_addr));
289 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
290 		dcmd->flags = cpu_to_le16(dcmd->flags);
291 	}
292 }
293 
294 static void
megasas_issue_dcmd(struct megasas_instance * instance,struct megasas_cmd * cmd)295 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
296 {
297 	instance->instancet->fire_cmd(instance,
298 		cmd->frame_phys_addr, 0, instance->reg_set);
299 	return;
300 }
301 
302 /**
303  * megasas_get_cmd -	Get a command from the free pool
304  * @instance:		Adapter soft state
305  *
306  * Returns a free command from the pool
307  */
megasas_get_cmd(struct megasas_instance * instance)308 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
309 						  *instance)
310 {
311 	unsigned long flags;
312 	struct megasas_cmd *cmd = NULL;
313 
314 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
315 
316 	if (!list_empty(&instance->cmd_pool)) {
317 		cmd = list_entry((&instance->cmd_pool)->next,
318 				 struct megasas_cmd, list);
319 		list_del_init(&cmd->list);
320 	} else {
321 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
322 	}
323 
324 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
325 	return cmd;
326 }
327 
328 /**
329  * megasas_return_cmd -	Return a cmd to free command pool
330  * @instance:		Adapter soft state
331  * @cmd:		Command packet to be returned to free command pool
332  */
333 void
megasas_return_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)334 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
335 {
336 	unsigned long flags;
337 	u32 blk_tags;
338 	struct megasas_cmd_fusion *cmd_fusion;
339 	struct fusion_context *fusion = instance->ctrl_context;
340 
341 	/* This flag is used only for fusion adapter.
342 	 * Wait for Interrupt for Polled mode DCMD
343 	 */
344 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
345 		return;
346 
347 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
348 
349 	if (fusion) {
350 		blk_tags = instance->max_scsi_cmds + cmd->index;
351 		cmd_fusion = fusion->cmd_list[blk_tags];
352 		megasas_return_cmd_fusion(instance, cmd_fusion);
353 	}
354 	cmd->scmd = NULL;
355 	cmd->frame_count = 0;
356 	cmd->flags = 0;
357 	memset(cmd->frame, 0, instance->mfi_frame_size);
358 	cmd->frame->io.context = cpu_to_le32(cmd->index);
359 	if (!fusion && reset_devices)
360 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
361 	list_add(&cmd->list, (&instance->cmd_pool)->next);
362 
363 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
364 
365 }
366 
367 static const char *
format_timestamp(uint32_t timestamp)368 format_timestamp(uint32_t timestamp)
369 {
370 	static char buffer[32];
371 
372 	if ((timestamp & 0xff000000) == 0xff000000)
373 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
374 		0x00ffffff);
375 	else
376 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
377 	return buffer;
378 }
379 
380 static const char *
format_class(int8_t class)381 format_class(int8_t class)
382 {
383 	static char buffer[6];
384 
385 	switch (class) {
386 	case MFI_EVT_CLASS_DEBUG:
387 		return "debug";
388 	case MFI_EVT_CLASS_PROGRESS:
389 		return "progress";
390 	case MFI_EVT_CLASS_INFO:
391 		return "info";
392 	case MFI_EVT_CLASS_WARNING:
393 		return "WARN";
394 	case MFI_EVT_CLASS_CRITICAL:
395 		return "CRIT";
396 	case MFI_EVT_CLASS_FATAL:
397 		return "FATAL";
398 	case MFI_EVT_CLASS_DEAD:
399 		return "DEAD";
400 	default:
401 		snprintf(buffer, sizeof(buffer), "%d", class);
402 		return buffer;
403 	}
404 }
405 
406 /**
407   * megasas_decode_evt: Decode FW AEN event and print critical event
408   * for information.
409   * @instance:			Adapter soft state
410   */
411 static void
megasas_decode_evt(struct megasas_instance * instance)412 megasas_decode_evt(struct megasas_instance *instance)
413 {
414 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
415 	union megasas_evt_class_locale class_locale;
416 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
417 
418 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
419 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
420 		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
421 		event_log_level = MFI_EVT_CLASS_CRITICAL;
422 	}
423 
424 	if (class_locale.members.class >= event_log_level)
425 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
426 			le32_to_cpu(evt_detail->seq_num),
427 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
428 			(class_locale.members.locale),
429 			format_class(class_locale.members.class),
430 			evt_detail->description);
431 
432 	if (megasas_dbg_lvl & LD_PD_DEBUG)
433 		dev_info(&instance->pdev->dev,
434 			 "evt_detail.args.ld.target_id/index %d/%d\n",
435 			 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
436 
437 }
438 
439 /*
440  * The following functions are defined for xscale
441  * (deviceid : 1064R, PERC5) controllers
442  */
443 
444 /**
445  * megasas_enable_intr_xscale -	Enables interrupts
446  * @instance:	Adapter soft state
447  */
448 static inline void
megasas_enable_intr_xscale(struct megasas_instance * instance)449 megasas_enable_intr_xscale(struct megasas_instance *instance)
450 {
451 	struct megasas_register_set __iomem *regs;
452 
453 	regs = instance->reg_set;
454 	writel(0, &(regs)->outbound_intr_mask);
455 
456 	/* Dummy readl to force pci flush */
457 	readl(&regs->outbound_intr_mask);
458 }
459 
460 /**
461  * megasas_disable_intr_xscale -Disables interrupt
462  * @instance:	Adapter soft state
463  */
464 static inline void
megasas_disable_intr_xscale(struct megasas_instance * instance)465 megasas_disable_intr_xscale(struct megasas_instance *instance)
466 {
467 	struct megasas_register_set __iomem *regs;
468 	u32 mask = 0x1f;
469 
470 	regs = instance->reg_set;
471 	writel(mask, &regs->outbound_intr_mask);
472 	/* Dummy readl to force pci flush */
473 	readl(&regs->outbound_intr_mask);
474 }
475 
476 /**
477  * megasas_read_fw_status_reg_xscale - returns the current FW status value
478  * @instance:	Adapter soft state
479  */
480 static u32
megasas_read_fw_status_reg_xscale(struct megasas_instance * instance)481 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
482 {
483 	return readl(&instance->reg_set->outbound_msg_0);
484 }
485 /**
486  * megasas_clear_interrupt_xscale -	Check & clear interrupt
487  * @instance:	Adapter soft state
488  */
489 static int
megasas_clear_intr_xscale(struct megasas_instance * instance)490 megasas_clear_intr_xscale(struct megasas_instance *instance)
491 {
492 	u32 status;
493 	u32 mfiStatus = 0;
494 	struct megasas_register_set __iomem *regs;
495 	regs = instance->reg_set;
496 
497 	/*
498 	 * Check if it is our interrupt
499 	 */
500 	status = readl(&regs->outbound_intr_status);
501 
502 	if (status & MFI_OB_INTR_STATUS_MASK)
503 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
504 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
505 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
506 
507 	/*
508 	 * Clear the interrupt by writing back the same value
509 	 */
510 	if (mfiStatus)
511 		writel(status, &regs->outbound_intr_status);
512 
513 	/* Dummy readl to force pci flush */
514 	readl(&regs->outbound_intr_status);
515 
516 	return mfiStatus;
517 }
518 
519 /**
520  * megasas_fire_cmd_xscale -	Sends command to the FW
521  * @instance:		Adapter soft state
522  * @frame_phys_addr :	Physical address of cmd
523  * @frame_count :	Number of frames for the command
524  * @regs :		MFI register set
525  */
526 static inline void
megasas_fire_cmd_xscale(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)527 megasas_fire_cmd_xscale(struct megasas_instance *instance,
528 		dma_addr_t frame_phys_addr,
529 		u32 frame_count,
530 		struct megasas_register_set __iomem *regs)
531 {
532 	unsigned long flags;
533 
534 	spin_lock_irqsave(&instance->hba_lock, flags);
535 	writel((frame_phys_addr >> 3)|(frame_count),
536 	       &(regs)->inbound_queue_port);
537 	spin_unlock_irqrestore(&instance->hba_lock, flags);
538 }
539 
540 /**
541  * megasas_adp_reset_xscale -  For controller reset
542  * @instance:	Adapter soft state
543  * @regs:	MFI register set
544  */
545 static int
megasas_adp_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)546 megasas_adp_reset_xscale(struct megasas_instance *instance,
547 	struct megasas_register_set __iomem *regs)
548 {
549 	u32 i;
550 	u32 pcidata;
551 
552 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
553 
554 	for (i = 0; i < 3; i++)
555 		msleep(1000); /* sleep for 3 secs */
556 	pcidata  = 0;
557 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
558 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
559 	if (pcidata & 0x2) {
560 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
561 		pcidata &= ~0x2;
562 		pci_write_config_dword(instance->pdev,
563 				MFI_1068_PCSR_OFFSET, pcidata);
564 
565 		for (i = 0; i < 2; i++)
566 			msleep(1000); /* need to wait 2 secs again */
567 
568 		pcidata  = 0;
569 		pci_read_config_dword(instance->pdev,
570 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
571 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
572 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
573 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
574 			pcidata = 0;
575 			pci_write_config_dword(instance->pdev,
576 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
577 		}
578 	}
579 	return 0;
580 }
581 
582 /**
583  * megasas_check_reset_xscale -	For controller reset check
584  * @instance:	Adapter soft state
585  * @regs:	MFI register set
586  */
587 static int
megasas_check_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)588 megasas_check_reset_xscale(struct megasas_instance *instance,
589 		struct megasas_register_set __iomem *regs)
590 {
591 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
592 	    (le32_to_cpu(*instance->consumer) ==
593 		MEGASAS_ADPRESET_INPROG_SIGN))
594 		return 1;
595 	return 0;
596 }
597 
598 static struct megasas_instance_template megasas_instance_template_xscale = {
599 
600 	.fire_cmd = megasas_fire_cmd_xscale,
601 	.enable_intr = megasas_enable_intr_xscale,
602 	.disable_intr = megasas_disable_intr_xscale,
603 	.clear_intr = megasas_clear_intr_xscale,
604 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
605 	.adp_reset = megasas_adp_reset_xscale,
606 	.check_reset = megasas_check_reset_xscale,
607 	.service_isr = megasas_isr,
608 	.tasklet = megasas_complete_cmd_dpc,
609 	.init_adapter = megasas_init_adapter_mfi,
610 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
611 	.issue_dcmd = megasas_issue_dcmd,
612 };
613 
614 /*
615  * This is the end of set of functions & definitions specific
616  * to xscale (deviceid : 1064R, PERC5) controllers
617  */
618 
619 /*
620  * The following functions are defined for ppc (deviceid : 0x60)
621  * controllers
622  */
623 
624 /**
625  * megasas_enable_intr_ppc -	Enables interrupts
626  * @instance:	Adapter soft state
627  */
628 static inline void
megasas_enable_intr_ppc(struct megasas_instance * instance)629 megasas_enable_intr_ppc(struct megasas_instance *instance)
630 {
631 	struct megasas_register_set __iomem *regs;
632 
633 	regs = instance->reg_set;
634 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
635 
636 	writel(~0x80000000, &(regs)->outbound_intr_mask);
637 
638 	/* Dummy readl to force pci flush */
639 	readl(&regs->outbound_intr_mask);
640 }
641 
642 /**
643  * megasas_disable_intr_ppc -	Disable interrupt
644  * @instance:	Adapter soft state
645  */
646 static inline void
megasas_disable_intr_ppc(struct megasas_instance * instance)647 megasas_disable_intr_ppc(struct megasas_instance *instance)
648 {
649 	struct megasas_register_set __iomem *regs;
650 	u32 mask = 0xFFFFFFFF;
651 
652 	regs = instance->reg_set;
653 	writel(mask, &regs->outbound_intr_mask);
654 	/* Dummy readl to force pci flush */
655 	readl(&regs->outbound_intr_mask);
656 }
657 
658 /**
659  * megasas_read_fw_status_reg_ppc - returns the current FW status value
660  * @instance:	Adapter soft state
661  */
662 static u32
megasas_read_fw_status_reg_ppc(struct megasas_instance * instance)663 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
664 {
665 	return readl(&instance->reg_set->outbound_scratch_pad_0);
666 }
667 
668 /**
669  * megasas_clear_interrupt_ppc -	Check & clear interrupt
670  * @instance:	Adapter soft state
671  */
672 static int
megasas_clear_intr_ppc(struct megasas_instance * instance)673 megasas_clear_intr_ppc(struct megasas_instance *instance)
674 {
675 	u32 status, mfiStatus = 0;
676 	struct megasas_register_set __iomem *regs;
677 	regs = instance->reg_set;
678 
679 	/*
680 	 * Check if it is our interrupt
681 	 */
682 	status = readl(&regs->outbound_intr_status);
683 
684 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
685 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
686 
687 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
688 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
689 
690 	/*
691 	 * Clear the interrupt by writing back the same value
692 	 */
693 	writel(status, &regs->outbound_doorbell_clear);
694 
695 	/* Dummy readl to force pci flush */
696 	readl(&regs->outbound_doorbell_clear);
697 
698 	return mfiStatus;
699 }
700 
701 /**
702  * megasas_fire_cmd_ppc -	Sends command to the FW
703  * @instance:		Adapter soft state
704  * @frame_phys_addr:	Physical address of cmd
705  * @frame_count:	Number of frames for the command
706  * @regs:		MFI register set
707  */
708 static inline void
megasas_fire_cmd_ppc(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)709 megasas_fire_cmd_ppc(struct megasas_instance *instance,
710 		dma_addr_t frame_phys_addr,
711 		u32 frame_count,
712 		struct megasas_register_set __iomem *regs)
713 {
714 	unsigned long flags;
715 
716 	spin_lock_irqsave(&instance->hba_lock, flags);
717 	writel((frame_phys_addr | (frame_count<<1))|1,
718 			&(regs)->inbound_queue_port);
719 	spin_unlock_irqrestore(&instance->hba_lock, flags);
720 }
721 
722 /**
723  * megasas_check_reset_ppc -	For controller reset check
724  * @instance:	Adapter soft state
725  * @regs:	MFI register set
726  */
727 static int
megasas_check_reset_ppc(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)728 megasas_check_reset_ppc(struct megasas_instance *instance,
729 			struct megasas_register_set __iomem *regs)
730 {
731 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
732 		return 1;
733 
734 	return 0;
735 }
736 
737 static struct megasas_instance_template megasas_instance_template_ppc = {
738 
739 	.fire_cmd = megasas_fire_cmd_ppc,
740 	.enable_intr = megasas_enable_intr_ppc,
741 	.disable_intr = megasas_disable_intr_ppc,
742 	.clear_intr = megasas_clear_intr_ppc,
743 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
744 	.adp_reset = megasas_adp_reset_xscale,
745 	.check_reset = megasas_check_reset_ppc,
746 	.service_isr = megasas_isr,
747 	.tasklet = megasas_complete_cmd_dpc,
748 	.init_adapter = megasas_init_adapter_mfi,
749 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
750 	.issue_dcmd = megasas_issue_dcmd,
751 };
752 
753 /**
754  * megasas_enable_intr_skinny -	Enables interrupts
755  * @instance:	Adapter soft state
756  */
757 static inline void
megasas_enable_intr_skinny(struct megasas_instance * instance)758 megasas_enable_intr_skinny(struct megasas_instance *instance)
759 {
760 	struct megasas_register_set __iomem *regs;
761 
762 	regs = instance->reg_set;
763 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
764 
765 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
766 
767 	/* Dummy readl to force pci flush */
768 	readl(&regs->outbound_intr_mask);
769 }
770 
771 /**
772  * megasas_disable_intr_skinny -	Disables interrupt
773  * @instance:	Adapter soft state
774  */
775 static inline void
megasas_disable_intr_skinny(struct megasas_instance * instance)776 megasas_disable_intr_skinny(struct megasas_instance *instance)
777 {
778 	struct megasas_register_set __iomem *regs;
779 	u32 mask = 0xFFFFFFFF;
780 
781 	regs = instance->reg_set;
782 	writel(mask, &regs->outbound_intr_mask);
783 	/* Dummy readl to force pci flush */
784 	readl(&regs->outbound_intr_mask);
785 }
786 
787 /**
788  * megasas_read_fw_status_reg_skinny - returns the current FW status value
789  * @instance:	Adapter soft state
790  */
791 static u32
megasas_read_fw_status_reg_skinny(struct megasas_instance * instance)792 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
793 {
794 	return readl(&instance->reg_set->outbound_scratch_pad_0);
795 }
796 
797 /**
798  * megasas_clear_interrupt_skinny -	Check & clear interrupt
799  * @instance:	Adapter soft state
800  */
801 static int
megasas_clear_intr_skinny(struct megasas_instance * instance)802 megasas_clear_intr_skinny(struct megasas_instance *instance)
803 {
804 	u32 status;
805 	u32 mfiStatus = 0;
806 	struct megasas_register_set __iomem *regs;
807 	regs = instance->reg_set;
808 
809 	/*
810 	 * Check if it is our interrupt
811 	 */
812 	status = readl(&regs->outbound_intr_status);
813 
814 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
815 		return 0;
816 	}
817 
818 	/*
819 	 * Check if it is our interrupt
820 	 */
821 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
822 	    MFI_STATE_FAULT) {
823 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
824 	} else
825 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
826 
827 	/*
828 	 * Clear the interrupt by writing back the same value
829 	 */
830 	writel(status, &regs->outbound_intr_status);
831 
832 	/*
833 	 * dummy read to flush PCI
834 	 */
835 	readl(&regs->outbound_intr_status);
836 
837 	return mfiStatus;
838 }
839 
840 /**
841  * megasas_fire_cmd_skinny -	Sends command to the FW
842  * @instance:		Adapter soft state
843  * @frame_phys_addr:	Physical address of cmd
844  * @frame_count:	Number of frames for the command
845  * @regs:		MFI register set
846  */
847 static inline void
megasas_fire_cmd_skinny(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)848 megasas_fire_cmd_skinny(struct megasas_instance *instance,
849 			dma_addr_t frame_phys_addr,
850 			u32 frame_count,
851 			struct megasas_register_set __iomem *regs)
852 {
853 	unsigned long flags;
854 
855 	spin_lock_irqsave(&instance->hba_lock, flags);
856 	writel(upper_32_bits(frame_phys_addr),
857 	       &(regs)->inbound_high_queue_port);
858 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
859 	       &(regs)->inbound_low_queue_port);
860 	spin_unlock_irqrestore(&instance->hba_lock, flags);
861 }
862 
863 /**
864  * megasas_check_reset_skinny -	For controller reset check
865  * @instance:	Adapter soft state
866  * @regs:	MFI register set
867  */
868 static int
megasas_check_reset_skinny(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)869 megasas_check_reset_skinny(struct megasas_instance *instance,
870 				struct megasas_register_set __iomem *regs)
871 {
872 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
873 		return 1;
874 
875 	return 0;
876 }
877 
878 static struct megasas_instance_template megasas_instance_template_skinny = {
879 
880 	.fire_cmd = megasas_fire_cmd_skinny,
881 	.enable_intr = megasas_enable_intr_skinny,
882 	.disable_intr = megasas_disable_intr_skinny,
883 	.clear_intr = megasas_clear_intr_skinny,
884 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
885 	.adp_reset = megasas_adp_reset_gen2,
886 	.check_reset = megasas_check_reset_skinny,
887 	.service_isr = megasas_isr,
888 	.tasklet = megasas_complete_cmd_dpc,
889 	.init_adapter = megasas_init_adapter_mfi,
890 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
891 	.issue_dcmd = megasas_issue_dcmd,
892 };
893 
894 
895 /*
896  * The following functions are defined for gen2 (deviceid : 0x78 0x79)
897  * controllers
898  */
899 
900 /**
901  * megasas_enable_intr_gen2 -  Enables interrupts
902  * @instance:	Adapter soft state
903  */
904 static inline void
megasas_enable_intr_gen2(struct megasas_instance * instance)905 megasas_enable_intr_gen2(struct megasas_instance *instance)
906 {
907 	struct megasas_register_set __iomem *regs;
908 
909 	regs = instance->reg_set;
910 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
911 
912 	/* write ~0x00000005 (4 & 1) to the intr mask*/
913 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
914 
915 	/* Dummy readl to force pci flush */
916 	readl(&regs->outbound_intr_mask);
917 }
918 
919 /**
920  * megasas_disable_intr_gen2 - Disables interrupt
921  * @instance:	Adapter soft state
922  */
923 static inline void
megasas_disable_intr_gen2(struct megasas_instance * instance)924 megasas_disable_intr_gen2(struct megasas_instance *instance)
925 {
926 	struct megasas_register_set __iomem *regs;
927 	u32 mask = 0xFFFFFFFF;
928 
929 	regs = instance->reg_set;
930 	writel(mask, &regs->outbound_intr_mask);
931 	/* Dummy readl to force pci flush */
932 	readl(&regs->outbound_intr_mask);
933 }
934 
935 /**
936  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
937  * @instance:	Adapter soft state
938  */
939 static u32
megasas_read_fw_status_reg_gen2(struct megasas_instance * instance)940 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
941 {
942 	return readl(&instance->reg_set->outbound_scratch_pad_0);
943 }
944 
945 /**
946  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
947  * @instance:	Adapter soft state
948  */
949 static int
megasas_clear_intr_gen2(struct megasas_instance * instance)950 megasas_clear_intr_gen2(struct megasas_instance *instance)
951 {
952 	u32 status;
953 	u32 mfiStatus = 0;
954 	struct megasas_register_set __iomem *regs;
955 	regs = instance->reg_set;
956 
957 	/*
958 	 * Check if it is our interrupt
959 	 */
960 	status = readl(&regs->outbound_intr_status);
961 
962 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
963 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
964 	}
965 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
966 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
967 	}
968 
969 	/*
970 	 * Clear the interrupt by writing back the same value
971 	 */
972 	if (mfiStatus)
973 		writel(status, &regs->outbound_doorbell_clear);
974 
975 	/* Dummy readl to force pci flush */
976 	readl(&regs->outbound_intr_status);
977 
978 	return mfiStatus;
979 }
980 
981 /**
982  * megasas_fire_cmd_gen2 -     Sends command to the FW
983  * @instance:		Adapter soft state
984  * @frame_phys_addr:	Physical address of cmd
985  * @frame_count:	Number of frames for the command
986  * @regs:		MFI register set
987  */
988 static inline void
megasas_fire_cmd_gen2(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)989 megasas_fire_cmd_gen2(struct megasas_instance *instance,
990 			dma_addr_t frame_phys_addr,
991 			u32 frame_count,
992 			struct megasas_register_set __iomem *regs)
993 {
994 	unsigned long flags;
995 
996 	spin_lock_irqsave(&instance->hba_lock, flags);
997 	writel((frame_phys_addr | (frame_count<<1))|1,
998 			&(regs)->inbound_queue_port);
999 	spin_unlock_irqrestore(&instance->hba_lock, flags);
1000 }
1001 
1002 /**
1003  * megasas_adp_reset_gen2 -	For controller reset
1004  * @instance:	Adapter soft state
1005  * @reg_set:	MFI register set
1006  */
1007 static int
megasas_adp_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * reg_set)1008 megasas_adp_reset_gen2(struct megasas_instance *instance,
1009 			struct megasas_register_set __iomem *reg_set)
1010 {
1011 	u32 retry = 0 ;
1012 	u32 HostDiag;
1013 	u32 __iomem *seq_offset = &reg_set->seq_offset;
1014 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
1015 
1016 	if (instance->instancet == &megasas_instance_template_skinny) {
1017 		seq_offset = &reg_set->fusion_seq_offset;
1018 		hostdiag_offset = &reg_set->fusion_host_diag;
1019 	}
1020 
1021 	writel(0, seq_offset);
1022 	writel(4, seq_offset);
1023 	writel(0xb, seq_offset);
1024 	writel(2, seq_offset);
1025 	writel(7, seq_offset);
1026 	writel(0xd, seq_offset);
1027 
1028 	msleep(1000);
1029 
1030 	HostDiag = (u32)readl(hostdiag_offset);
1031 
1032 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1033 		msleep(100);
1034 		HostDiag = (u32)readl(hostdiag_offset);
1035 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1036 					retry, HostDiag);
1037 
1038 		if (retry++ >= 100)
1039 			return 1;
1040 
1041 	}
1042 
1043 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1044 
1045 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1046 
1047 	ssleep(10);
1048 
1049 	HostDiag = (u32)readl(hostdiag_offset);
1050 	while (HostDiag & DIAG_RESET_ADAPTER) {
1051 		msleep(100);
1052 		HostDiag = (u32)readl(hostdiag_offset);
1053 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1054 				retry, HostDiag);
1055 
1056 		if (retry++ >= 1000)
1057 			return 1;
1058 
1059 	}
1060 	return 0;
1061 }
1062 
1063 /**
1064  * megasas_check_reset_gen2 -	For controller reset check
1065  * @instance:	Adapter soft state
1066  * @regs:	MFI register set
1067  */
1068 static int
megasas_check_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)1069 megasas_check_reset_gen2(struct megasas_instance *instance,
1070 		struct megasas_register_set __iomem *regs)
1071 {
1072 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1073 		return 1;
1074 
1075 	return 0;
1076 }
1077 
1078 static struct megasas_instance_template megasas_instance_template_gen2 = {
1079 
1080 	.fire_cmd = megasas_fire_cmd_gen2,
1081 	.enable_intr = megasas_enable_intr_gen2,
1082 	.disable_intr = megasas_disable_intr_gen2,
1083 	.clear_intr = megasas_clear_intr_gen2,
1084 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1085 	.adp_reset = megasas_adp_reset_gen2,
1086 	.check_reset = megasas_check_reset_gen2,
1087 	.service_isr = megasas_isr,
1088 	.tasklet = megasas_complete_cmd_dpc,
1089 	.init_adapter = megasas_init_adapter_mfi,
1090 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1091 	.issue_dcmd = megasas_issue_dcmd,
1092 };
1093 
1094 /*
1095  * This is the end of set of functions & definitions
1096  * specific to gen2 (deviceid : 0x78, 0x79) controllers
1097  */
1098 
1099 /*
1100  * Template added for TB (Fusion)
1101  */
1102 extern struct megasas_instance_template megasas_instance_template_fusion;
1103 
1104 /**
1105  * megasas_issue_polled -	Issues a polling command
1106  * @instance:			Adapter soft state
1107  * @cmd:			Command packet to be issued
1108  *
1109  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1110  */
1111 int
megasas_issue_polled(struct megasas_instance * instance,struct megasas_cmd * cmd)1112 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1113 {
1114 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1115 
1116 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1117 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1118 
1119 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1120 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1121 			__func__, __LINE__);
1122 		return DCMD_INIT;
1123 	}
1124 
1125 	instance->instancet->issue_dcmd(instance, cmd);
1126 
1127 	return wait_and_poll(instance, cmd, instance->requestorId ?
1128 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1129 }
1130 
1131 /**
1132  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1133  * @instance:			Adapter soft state
1134  * @cmd:			Command to be issued
1135  * @timeout:			Timeout in seconds
1136  *
1137  * This function waits on an event for the command to be returned from ISR.
1138  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1139  * Used to issue ioctl commands.
1140  */
1141 int
megasas_issue_blocked_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,int timeout)1142 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1143 			  struct megasas_cmd *cmd, int timeout)
1144 {
1145 	int ret = 0;
1146 	cmd->cmd_status_drv = DCMD_INIT;
1147 
1148 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1149 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1150 			__func__, __LINE__);
1151 		return DCMD_INIT;
1152 	}
1153 
1154 	instance->instancet->issue_dcmd(instance, cmd);
1155 
1156 	if (timeout) {
1157 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1158 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1159 		if (!ret) {
1160 			dev_err(&instance->pdev->dev,
1161 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1162 				cmd->frame->dcmd.opcode, __func__);
1163 			return DCMD_TIMEOUT;
1164 		}
1165 	} else
1166 		wait_event(instance->int_cmd_wait_q,
1167 				cmd->cmd_status_drv != DCMD_INIT);
1168 
1169 	return cmd->cmd_status_drv;
1170 }
1171 
1172 /**
1173  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1174  * @instance:				Adapter soft state
1175  * @cmd_to_abort:			Previously issued cmd to be aborted
1176  * @timeout:				Timeout in seconds
1177  *
1178  * MFI firmware can abort previously issued AEN comamnd (automatic event
1179  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1180  * cmd and waits for return status.
1181  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1182  */
1183 static int
megasas_issue_blocked_abort_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort,int timeout)1184 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1185 				struct megasas_cmd *cmd_to_abort, int timeout)
1186 {
1187 	struct megasas_cmd *cmd;
1188 	struct megasas_abort_frame *abort_fr;
1189 	int ret = 0;
1190 	u32 opcode;
1191 
1192 	cmd = megasas_get_cmd(instance);
1193 
1194 	if (!cmd)
1195 		return -1;
1196 
1197 	abort_fr = &cmd->frame->abort;
1198 
1199 	/*
1200 	 * Prepare and issue the abort frame
1201 	 */
1202 	abort_fr->cmd = MFI_CMD_ABORT;
1203 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1204 	abort_fr->flags = cpu_to_le16(0);
1205 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1206 	abort_fr->abort_mfi_phys_addr_lo =
1207 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1208 	abort_fr->abort_mfi_phys_addr_hi =
1209 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1210 
1211 	cmd->sync_cmd = 1;
1212 	cmd->cmd_status_drv = DCMD_INIT;
1213 
1214 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1215 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1216 			__func__, __LINE__);
1217 		return DCMD_INIT;
1218 	}
1219 
1220 	instance->instancet->issue_dcmd(instance, cmd);
1221 
1222 	if (timeout) {
1223 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1224 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1225 		if (!ret) {
1226 			opcode = cmd_to_abort->frame->dcmd.opcode;
1227 			dev_err(&instance->pdev->dev,
1228 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1229 				opcode,  __func__);
1230 			return DCMD_TIMEOUT;
1231 		}
1232 	} else
1233 		wait_event(instance->abort_cmd_wait_q,
1234 		cmd->cmd_status_drv != DCMD_INIT);
1235 
1236 	cmd->sync_cmd = 0;
1237 
1238 	megasas_return_cmd(instance, cmd);
1239 	return cmd->cmd_status_drv;
1240 }
1241 
1242 /**
1243  * megasas_make_sgl32 -	Prepares 32-bit SGL
1244  * @instance:		Adapter soft state
1245  * @scp:		SCSI command from the mid-layer
1246  * @mfi_sgl:		SGL to be filled in
1247  *
1248  * If successful, this function returns the number of SG elements. Otherwise,
1249  * it returnes -1.
1250  */
1251 static int
megasas_make_sgl32(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1252 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1253 		   union megasas_sgl *mfi_sgl)
1254 {
1255 	int i;
1256 	int sge_count;
1257 	struct scatterlist *os_sgl;
1258 
1259 	sge_count = scsi_dma_map(scp);
1260 	BUG_ON(sge_count < 0);
1261 
1262 	if (sge_count) {
1263 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1264 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1265 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1266 		}
1267 	}
1268 	return sge_count;
1269 }
1270 
1271 /**
1272  * megasas_make_sgl64 -	Prepares 64-bit SGL
1273  * @instance:		Adapter soft state
1274  * @scp:		SCSI command from the mid-layer
1275  * @mfi_sgl:		SGL to be filled in
1276  *
1277  * If successful, this function returns the number of SG elements. Otherwise,
1278  * it returnes -1.
1279  */
1280 static int
megasas_make_sgl64(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1281 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1282 		   union megasas_sgl *mfi_sgl)
1283 {
1284 	int i;
1285 	int sge_count;
1286 	struct scatterlist *os_sgl;
1287 
1288 	sge_count = scsi_dma_map(scp);
1289 	BUG_ON(sge_count < 0);
1290 
1291 	if (sge_count) {
1292 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1293 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1294 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1295 		}
1296 	}
1297 	return sge_count;
1298 }
1299 
1300 /**
1301  * megasas_make_sgl_skinny - Prepares IEEE SGL
1302  * @instance:           Adapter soft state
1303  * @scp:                SCSI command from the mid-layer
1304  * @mfi_sgl:            SGL to be filled in
1305  *
1306  * If successful, this function returns the number of SG elements. Otherwise,
1307  * it returnes -1.
1308  */
1309 static int
megasas_make_sgl_skinny(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1310 megasas_make_sgl_skinny(struct megasas_instance *instance,
1311 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1312 {
1313 	int i;
1314 	int sge_count;
1315 	struct scatterlist *os_sgl;
1316 
1317 	sge_count = scsi_dma_map(scp);
1318 
1319 	if (sge_count) {
1320 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1321 			mfi_sgl->sge_skinny[i].length =
1322 				cpu_to_le32(sg_dma_len(os_sgl));
1323 			mfi_sgl->sge_skinny[i].phys_addr =
1324 				cpu_to_le64(sg_dma_address(os_sgl));
1325 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1326 		}
1327 	}
1328 	return sge_count;
1329 }
1330 
1331  /**
1332  * megasas_get_frame_count - Computes the number of frames
1333  * @frame_type		: type of frame- io or pthru frame
1334  * @sge_count		: number of sg elements
1335  *
1336  * Returns the number of frames required for numnber of sge's (sge_count)
1337  */
1338 
megasas_get_frame_count(struct megasas_instance * instance,u8 sge_count,u8 frame_type)1339 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1340 			u8 sge_count, u8 frame_type)
1341 {
1342 	int num_cnt;
1343 	int sge_bytes;
1344 	u32 sge_sz;
1345 	u32 frame_count = 0;
1346 
1347 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1348 	    sizeof(struct megasas_sge32);
1349 
1350 	if (instance->flag_ieee) {
1351 		sge_sz = sizeof(struct megasas_sge_skinny);
1352 	}
1353 
1354 	/*
1355 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1356 	 * 3 SGEs for 32-bit SGLs for ldio &
1357 	 * 1 SGEs for 64-bit SGLs and
1358 	 * 2 SGEs for 32-bit SGLs for pthru frame
1359 	 */
1360 	if (unlikely(frame_type == PTHRU_FRAME)) {
1361 		if (instance->flag_ieee == 1) {
1362 			num_cnt = sge_count - 1;
1363 		} else if (IS_DMA64)
1364 			num_cnt = sge_count - 1;
1365 		else
1366 			num_cnt = sge_count - 2;
1367 	} else {
1368 		if (instance->flag_ieee == 1) {
1369 			num_cnt = sge_count - 1;
1370 		} else if (IS_DMA64)
1371 			num_cnt = sge_count - 2;
1372 		else
1373 			num_cnt = sge_count - 3;
1374 	}
1375 
1376 	if (num_cnt > 0) {
1377 		sge_bytes = sge_sz * num_cnt;
1378 
1379 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1380 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1381 	}
1382 	/* Main frame */
1383 	frame_count += 1;
1384 
1385 	if (frame_count > 7)
1386 		frame_count = 8;
1387 	return frame_count;
1388 }
1389 
1390 /**
1391  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1392  * @instance:		Adapter soft state
1393  * @scp:		SCSI command
1394  * @cmd:		Command to be prepared in
1395  *
1396  * This function prepares CDB commands. These are typcially pass-through
1397  * commands to the devices.
1398  */
1399 static int
megasas_build_dcdb(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1400 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1401 		   struct megasas_cmd *cmd)
1402 {
1403 	u32 is_logical;
1404 	u32 device_id;
1405 	u16 flags = 0;
1406 	struct megasas_pthru_frame *pthru;
1407 
1408 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1409 	device_id = MEGASAS_DEV_INDEX(scp);
1410 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1411 
1412 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1413 		flags = MFI_FRAME_DIR_WRITE;
1414 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1415 		flags = MFI_FRAME_DIR_READ;
1416 	else if (scp->sc_data_direction == DMA_NONE)
1417 		flags = MFI_FRAME_DIR_NONE;
1418 
1419 	if (instance->flag_ieee == 1) {
1420 		flags |= MFI_FRAME_IEEE;
1421 	}
1422 
1423 	/*
1424 	 * Prepare the DCDB frame
1425 	 */
1426 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1427 	pthru->cmd_status = 0x0;
1428 	pthru->scsi_status = 0x0;
1429 	pthru->target_id = device_id;
1430 	pthru->lun = scp->device->lun;
1431 	pthru->cdb_len = scp->cmd_len;
1432 	pthru->timeout = 0;
1433 	pthru->pad_0 = 0;
1434 	pthru->flags = cpu_to_le16(flags);
1435 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1436 
1437 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1438 
1439 	/*
1440 	 * If the command is for the tape device, set the
1441 	 * pthru timeout to the os layer timeout value.
1442 	 */
1443 	if (scp->device->type == TYPE_TAPE) {
1444 		if ((scp->request->timeout / HZ) > 0xFFFF)
1445 			pthru->timeout = cpu_to_le16(0xFFFF);
1446 		else
1447 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1448 	}
1449 
1450 	/*
1451 	 * Construct SGL
1452 	 */
1453 	if (instance->flag_ieee == 1) {
1454 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1455 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1456 						      &pthru->sgl);
1457 	} else if (IS_DMA64) {
1458 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1459 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1460 						      &pthru->sgl);
1461 	} else
1462 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1463 						      &pthru->sgl);
1464 
1465 	if (pthru->sge_count > instance->max_num_sge) {
1466 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1467 			pthru->sge_count);
1468 		return 0;
1469 	}
1470 
1471 	/*
1472 	 * Sense info specific
1473 	 */
1474 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1475 	pthru->sense_buf_phys_addr_hi =
1476 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1477 	pthru->sense_buf_phys_addr_lo =
1478 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1479 
1480 	/*
1481 	 * Compute the total number of frames this command consumes. FW uses
1482 	 * this number to pull sufficient number of frames from host memory.
1483 	 */
1484 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1485 							PTHRU_FRAME);
1486 
1487 	return cmd->frame_count;
1488 }
1489 
1490 /**
1491  * megasas_build_ldio -	Prepares IOs to logical devices
1492  * @instance:		Adapter soft state
1493  * @scp:		SCSI command
1494  * @cmd:		Command to be prepared
1495  *
1496  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1497  */
1498 static int
megasas_build_ldio(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1499 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1500 		   struct megasas_cmd *cmd)
1501 {
1502 	u32 device_id;
1503 	u8 sc = scp->cmnd[0];
1504 	u16 flags = 0;
1505 	struct megasas_io_frame *ldio;
1506 
1507 	device_id = MEGASAS_DEV_INDEX(scp);
1508 	ldio = (struct megasas_io_frame *)cmd->frame;
1509 
1510 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1511 		flags = MFI_FRAME_DIR_WRITE;
1512 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1513 		flags = MFI_FRAME_DIR_READ;
1514 
1515 	if (instance->flag_ieee == 1) {
1516 		flags |= MFI_FRAME_IEEE;
1517 	}
1518 
1519 	/*
1520 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1521 	 */
1522 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1523 	ldio->cmd_status = 0x0;
1524 	ldio->scsi_status = 0x0;
1525 	ldio->target_id = device_id;
1526 	ldio->timeout = 0;
1527 	ldio->reserved_0 = 0;
1528 	ldio->pad_0 = 0;
1529 	ldio->flags = cpu_to_le16(flags);
1530 	ldio->start_lba_hi = 0;
1531 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1532 
1533 	/*
1534 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1535 	 */
1536 	if (scp->cmd_len == 6) {
1537 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1538 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1539 						 ((u32) scp->cmnd[2] << 8) |
1540 						 (u32) scp->cmnd[3]);
1541 
1542 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1543 	}
1544 
1545 	/*
1546 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1547 	 */
1548 	else if (scp->cmd_len == 10) {
1549 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1550 					      ((u32) scp->cmnd[7] << 8));
1551 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1552 						 ((u32) scp->cmnd[3] << 16) |
1553 						 ((u32) scp->cmnd[4] << 8) |
1554 						 (u32) scp->cmnd[5]);
1555 	}
1556 
1557 	/*
1558 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1559 	 */
1560 	else if (scp->cmd_len == 12) {
1561 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1562 					      ((u32) scp->cmnd[7] << 16) |
1563 					      ((u32) scp->cmnd[8] << 8) |
1564 					      (u32) scp->cmnd[9]);
1565 
1566 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1567 						 ((u32) scp->cmnd[3] << 16) |
1568 						 ((u32) scp->cmnd[4] << 8) |
1569 						 (u32) scp->cmnd[5]);
1570 	}
1571 
1572 	/*
1573 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1574 	 */
1575 	else if (scp->cmd_len == 16) {
1576 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1577 					      ((u32) scp->cmnd[11] << 16) |
1578 					      ((u32) scp->cmnd[12] << 8) |
1579 					      (u32) scp->cmnd[13]);
1580 
1581 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1582 						 ((u32) scp->cmnd[7] << 16) |
1583 						 ((u32) scp->cmnd[8] << 8) |
1584 						 (u32) scp->cmnd[9]);
1585 
1586 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1587 						 ((u32) scp->cmnd[3] << 16) |
1588 						 ((u32) scp->cmnd[4] << 8) |
1589 						 (u32) scp->cmnd[5]);
1590 
1591 	}
1592 
1593 	/*
1594 	 * Construct SGL
1595 	 */
1596 	if (instance->flag_ieee) {
1597 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1598 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1599 					      &ldio->sgl);
1600 	} else if (IS_DMA64) {
1601 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1602 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1603 	} else
1604 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1605 
1606 	if (ldio->sge_count > instance->max_num_sge) {
1607 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1608 			ldio->sge_count);
1609 		return 0;
1610 	}
1611 
1612 	/*
1613 	 * Sense info specific
1614 	 */
1615 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1616 	ldio->sense_buf_phys_addr_hi = 0;
1617 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1618 
1619 	/*
1620 	 * Compute the total number of frames this command consumes. FW uses
1621 	 * this number to pull sufficient number of frames from host memory.
1622 	 */
1623 	cmd->frame_count = megasas_get_frame_count(instance,
1624 			ldio->sge_count, IO_FRAME);
1625 
1626 	return cmd->frame_count;
1627 }
1628 
1629 /**
1630  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1631  *				and whether it's RW or non RW
1632  * @cmd:			SCSI command
1633  *
1634  */
megasas_cmd_type(struct scsi_cmnd * cmd)1635 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1636 {
1637 	int ret;
1638 
1639 	switch (cmd->cmnd[0]) {
1640 	case READ_10:
1641 	case WRITE_10:
1642 	case READ_12:
1643 	case WRITE_12:
1644 	case READ_6:
1645 	case WRITE_6:
1646 	case READ_16:
1647 	case WRITE_16:
1648 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1649 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1650 		break;
1651 	default:
1652 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1653 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1654 	}
1655 	return ret;
1656 }
1657 
1658  /**
1659  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1660  *					in FW
1661  * @instance:				Adapter soft state
1662  */
1663 static inline void
megasas_dump_pending_frames(struct megasas_instance * instance)1664 megasas_dump_pending_frames(struct megasas_instance *instance)
1665 {
1666 	struct megasas_cmd *cmd;
1667 	int i,n;
1668 	union megasas_sgl *mfi_sgl;
1669 	struct megasas_io_frame *ldio;
1670 	struct megasas_pthru_frame *pthru;
1671 	u32 sgcount;
1672 	u16 max_cmd = instance->max_fw_cmds;
1673 
1674 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1675 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1676 	if (IS_DMA64)
1677 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1678 	else
1679 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1680 
1681 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1682 	for (i = 0; i < max_cmd; i++) {
1683 		cmd = instance->cmd_list[i];
1684 		if (!cmd->scmd)
1685 			continue;
1686 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1687 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1688 			ldio = (struct megasas_io_frame *)cmd->frame;
1689 			mfi_sgl = &ldio->sgl;
1690 			sgcount = ldio->sge_count;
1691 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1692 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1693 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1694 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1695 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1696 		} else {
1697 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1698 			mfi_sgl = &pthru->sgl;
1699 			sgcount = pthru->sge_count;
1700 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1701 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1702 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1703 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1704 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1705 		}
1706 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1707 			for (n = 0; n < sgcount; n++) {
1708 				if (IS_DMA64)
1709 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1710 						le32_to_cpu(mfi_sgl->sge64[n].length),
1711 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1712 				else
1713 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1714 						le32_to_cpu(mfi_sgl->sge32[n].length),
1715 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1716 			}
1717 		}
1718 	} /*for max_cmd*/
1719 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1720 	for (i = 0; i < max_cmd; i++) {
1721 
1722 		cmd = instance->cmd_list[i];
1723 
1724 		if (cmd->sync_cmd == 1)
1725 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1726 	}
1727 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1728 }
1729 
1730 u32
megasas_build_and_issue_cmd(struct megasas_instance * instance,struct scsi_cmnd * scmd)1731 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1732 			    struct scsi_cmnd *scmd)
1733 {
1734 	struct megasas_cmd *cmd;
1735 	u32 frame_count;
1736 
1737 	cmd = megasas_get_cmd(instance);
1738 	if (!cmd)
1739 		return SCSI_MLQUEUE_HOST_BUSY;
1740 
1741 	/*
1742 	 * Logical drive command
1743 	 */
1744 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1745 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1746 	else
1747 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1748 
1749 	if (!frame_count)
1750 		goto out_return_cmd;
1751 
1752 	cmd->scmd = scmd;
1753 	scmd->SCp.ptr = (char *)cmd;
1754 
1755 	/*
1756 	 * Issue the command to the FW
1757 	 */
1758 	atomic_inc(&instance->fw_outstanding);
1759 
1760 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1761 				cmd->frame_count-1, instance->reg_set);
1762 
1763 	return 0;
1764 out_return_cmd:
1765 	megasas_return_cmd(instance, cmd);
1766 	return SCSI_MLQUEUE_HOST_BUSY;
1767 }
1768 
1769 
1770 /**
1771  * megasas_queue_command -	Queue entry point
1772  * @shost:			adapter SCSI host
1773  * @scmd:			SCSI command to be queued
1774  */
1775 static int
megasas_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1776 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1777 {
1778 	struct megasas_instance *instance;
1779 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1780 	u32 ld_tgt_id;
1781 
1782 	instance = (struct megasas_instance *)
1783 	    scmd->device->host->hostdata;
1784 
1785 	if (instance->unload == 1) {
1786 		scmd->result = DID_NO_CONNECT << 16;
1787 		scmd->scsi_done(scmd);
1788 		return 0;
1789 	}
1790 
1791 	if (instance->issuepend_done == 0)
1792 		return SCSI_MLQUEUE_HOST_BUSY;
1793 
1794 
1795 	/* Check for an mpio path and adjust behavior */
1796 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1797 		if (megasas_check_mpio_paths(instance, scmd) ==
1798 		    (DID_REQUEUE << 16)) {
1799 			return SCSI_MLQUEUE_HOST_BUSY;
1800 		} else {
1801 			scmd->result = DID_NO_CONNECT << 16;
1802 			scmd->scsi_done(scmd);
1803 			return 0;
1804 		}
1805 	}
1806 
1807 	mr_device_priv_data = scmd->device->hostdata;
1808 	if (!mr_device_priv_data ||
1809 	    (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
1810 		scmd->result = DID_NO_CONNECT << 16;
1811 		scmd->scsi_done(scmd);
1812 		return 0;
1813 	}
1814 
1815 	if (MEGASAS_IS_LOGICAL(scmd->device)) {
1816 		ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
1817 		if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
1818 			scmd->result = DID_NO_CONNECT << 16;
1819 			scmd->scsi_done(scmd);
1820 			return 0;
1821 		}
1822 	}
1823 
1824 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1825 		return SCSI_MLQUEUE_HOST_BUSY;
1826 
1827 	if (mr_device_priv_data->tm_busy)
1828 		return SCSI_MLQUEUE_DEVICE_BUSY;
1829 
1830 
1831 	scmd->result = 0;
1832 
1833 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1834 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1835 		scmd->device->lun)) {
1836 		scmd->result = DID_BAD_TARGET << 16;
1837 		goto out_done;
1838 	}
1839 
1840 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1841 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1842 	    (!instance->fw_sync_cache_support)) {
1843 		scmd->result = DID_OK << 16;
1844 		goto out_done;
1845 	}
1846 
1847 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1848 
1849  out_done:
1850 	scmd->scsi_done(scmd);
1851 	return 0;
1852 }
1853 
megasas_lookup_instance(u16 host_no)1854 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1855 {
1856 	int i;
1857 
1858 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1859 
1860 		if ((megasas_mgmt_info.instance[i]) &&
1861 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1862 			return megasas_mgmt_info.instance[i];
1863 	}
1864 
1865 	return NULL;
1866 }
1867 
1868 /*
1869 * megasas_set_dynamic_target_properties -
1870 * Device property set by driver may not be static and it is required to be
1871 * updated after OCR
1872 *
1873 * set tm_capable.
1874 * set dma alignment (only for eedp protection enable vd).
1875 *
1876 * @sdev: OS provided scsi device
1877 *
1878 * Returns void
1879 */
megasas_set_dynamic_target_properties(struct scsi_device * sdev,bool is_target_prop)1880 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1881 					   bool is_target_prop)
1882 {
1883 	u16 pd_index = 0, ld;
1884 	u32 device_id;
1885 	struct megasas_instance *instance;
1886 	struct fusion_context *fusion;
1887 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1888 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1889 	struct MR_LD_RAID *raid;
1890 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1891 
1892 	instance = megasas_lookup_instance(sdev->host->host_no);
1893 	fusion = instance->ctrl_context;
1894 	mr_device_priv_data = sdev->hostdata;
1895 
1896 	if (!fusion || !mr_device_priv_data)
1897 		return;
1898 
1899 	if (MEGASAS_IS_LOGICAL(sdev)) {
1900 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1901 					+ sdev->id;
1902 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1903 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1904 		if (ld >= instance->fw_supported_vd_count)
1905 			return;
1906 		raid = MR_LdRaidGet(ld, local_map_ptr);
1907 
1908 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1909 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1910 
1911 		mr_device_priv_data->is_tm_capable =
1912 			raid->capability.tmCapable;
1913 
1914 		if (!raid->flags.isEPD)
1915 			sdev->no_write_same = 1;
1916 
1917 	} else if (instance->use_seqnum_jbod_fp) {
1918 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1919 			sdev->id;
1920 		pd_sync = (void *)fusion->pd_seq_sync
1921 				[(instance->pd_seq_map_id - 1) & 1];
1922 		mr_device_priv_data->is_tm_capable =
1923 			pd_sync->seq[pd_index].capability.tmCapable;
1924 	}
1925 
1926 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1927 		/*
1928 		 * If FW provides a target reset timeout value, driver will use
1929 		 * it. If not set, fallback to default values.
1930 		 */
1931 		mr_device_priv_data->target_reset_tmo =
1932 			min_t(u8, instance->max_reset_tmo,
1933 			      instance->tgt_prop->reset_tmo);
1934 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1935 	} else {
1936 		mr_device_priv_data->target_reset_tmo =
1937 						MEGASAS_DEFAULT_TM_TIMEOUT;
1938 		mr_device_priv_data->task_abort_tmo =
1939 						MEGASAS_DEFAULT_TM_TIMEOUT;
1940 	}
1941 }
1942 
1943 /*
1944  * megasas_set_nvme_device_properties -
1945  * set nomerges=2
1946  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1947  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1948  *
1949  * MR firmware provides value in KB. Caller of this function converts
1950  * kb into bytes.
1951  *
1952  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1953  * MR firmware provides value 128 as (32 * 4K) = 128K.
1954  *
1955  * @sdev:				scsi device
1956  * @max_io_size:				maximum io transfer size
1957  *
1958  */
1959 static inline void
megasas_set_nvme_device_properties(struct scsi_device * sdev,u32 max_io_size)1960 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1961 {
1962 	struct megasas_instance *instance;
1963 	u32 mr_nvme_pg_size;
1964 
1965 	instance = (struct megasas_instance *)sdev->host->hostdata;
1966 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1967 				MR_DEFAULT_NVME_PAGE_SIZE);
1968 
1969 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1970 
1971 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1972 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1973 }
1974 
1975 /*
1976  * megasas_set_fw_assisted_qd -
1977  * set device queue depth to can_queue
1978  * set device queue depth to fw assisted qd
1979  *
1980  * @sdev:				scsi device
1981  * @is_target_prop			true, if fw provided target properties.
1982  */
megasas_set_fw_assisted_qd(struct scsi_device * sdev,bool is_target_prop)1983 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1984 						 bool is_target_prop)
1985 {
1986 	u8 interface_type;
1987 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1988 	u32 tgt_device_qd;
1989 	struct megasas_instance *instance;
1990 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1991 
1992 	instance = megasas_lookup_instance(sdev->host->host_no);
1993 	mr_device_priv_data = sdev->hostdata;
1994 	interface_type  = mr_device_priv_data->interface_type;
1995 
1996 	switch (interface_type) {
1997 	case SAS_PD:
1998 		device_qd = MEGASAS_SAS_QD;
1999 		break;
2000 	case SATA_PD:
2001 		device_qd = MEGASAS_SATA_QD;
2002 		break;
2003 	case NVME_PD:
2004 		device_qd = MEGASAS_NVME_QD;
2005 		break;
2006 	}
2007 
2008 	if (is_target_prop) {
2009 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
2010 		if (tgt_device_qd)
2011 			device_qd = min(instance->host->can_queue,
2012 					(int)tgt_device_qd);
2013 	}
2014 
2015 	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
2016 		device_qd = instance->host->can_queue;
2017 
2018 	scsi_change_queue_depth(sdev, device_qd);
2019 }
2020 
2021 /*
2022  * megasas_set_static_target_properties -
2023  * Device property set by driver are static and it is not required to be
2024  * updated after OCR.
2025  *
2026  * set io timeout
2027  * set device queue depth
2028  * set nvme device properties. see - megasas_set_nvme_device_properties
2029  *
2030  * @sdev:				scsi device
2031  * @is_target_prop			true, if fw provided target properties.
2032  */
megasas_set_static_target_properties(struct scsi_device * sdev,bool is_target_prop)2033 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2034 						 bool is_target_prop)
2035 {
2036 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2037 	struct megasas_instance *instance;
2038 
2039 	instance = megasas_lookup_instance(sdev->host->host_no);
2040 
2041 	/*
2042 	 * The RAID firmware may require extended timeouts.
2043 	 */
2044 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2045 
2046 	/* max_io_size_kb will be set to non zero for
2047 	 * nvme based vd and syspd.
2048 	 */
2049 	if (is_target_prop)
2050 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2051 
2052 	if (instance->nvme_page_size && max_io_size_kb)
2053 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2054 
2055 	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2056 }
2057 
2058 
megasas_slave_configure(struct scsi_device * sdev)2059 static int megasas_slave_configure(struct scsi_device *sdev)
2060 {
2061 	u16 pd_index = 0;
2062 	struct megasas_instance *instance;
2063 	int ret_target_prop = DCMD_FAILED;
2064 	bool is_target_prop = false;
2065 
2066 	instance = megasas_lookup_instance(sdev->host->host_no);
2067 	if (instance->pd_list_not_supported) {
2068 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2069 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2070 				sdev->id;
2071 			if (instance->pd_list[pd_index].driveState !=
2072 				MR_PD_STATE_SYSTEM)
2073 				return -ENXIO;
2074 		}
2075 	}
2076 
2077 	mutex_lock(&instance->reset_mutex);
2078 	/* Send DCMD to Firmware and cache the information */
2079 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2080 		megasas_get_pd_info(instance, sdev);
2081 
2082 	/* Some ventura firmware may not have instance->nvme_page_size set.
2083 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2084 	 */
2085 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2086 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2087 
2088 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2089 	megasas_set_static_target_properties(sdev, is_target_prop);
2090 
2091 	/* This sdev property may change post OCR */
2092 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2093 
2094 	mutex_unlock(&instance->reset_mutex);
2095 
2096 	return 0;
2097 }
2098 
megasas_slave_alloc(struct scsi_device * sdev)2099 static int megasas_slave_alloc(struct scsi_device *sdev)
2100 {
2101 	u16 pd_index = 0, ld_tgt_id;
2102 	struct megasas_instance *instance ;
2103 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2104 
2105 	instance = megasas_lookup_instance(sdev->host->host_no);
2106 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2107 		/*
2108 		 * Open the OS scan to the SYSTEM PD
2109 		 */
2110 		pd_index =
2111 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2112 			sdev->id;
2113 		if ((instance->pd_list_not_supported ||
2114 			instance->pd_list[pd_index].driveState ==
2115 			MR_PD_STATE_SYSTEM)) {
2116 			goto scan_target;
2117 		}
2118 		return -ENXIO;
2119 	}
2120 
2121 scan_target:
2122 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2123 					GFP_KERNEL);
2124 	if (!mr_device_priv_data)
2125 		return -ENOMEM;
2126 
2127 	if (MEGASAS_IS_LOGICAL(sdev)) {
2128 		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2129 		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
2130 		if (megasas_dbg_lvl & LD_PD_DEBUG)
2131 			sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
2132 	}
2133 
2134 	sdev->hostdata = mr_device_priv_data;
2135 
2136 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2137 		   instance->r1_ldio_hint_default);
2138 	return 0;
2139 }
2140 
megasas_slave_destroy(struct scsi_device * sdev)2141 static void megasas_slave_destroy(struct scsi_device *sdev)
2142 {
2143 	u16 ld_tgt_id;
2144 	struct megasas_instance *instance;
2145 
2146 	instance = megasas_lookup_instance(sdev->host->host_no);
2147 
2148 	if (MEGASAS_IS_LOGICAL(sdev)) {
2149 		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2150 		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
2151 		if (megasas_dbg_lvl & LD_PD_DEBUG)
2152 			sdev_printk(KERN_INFO, sdev,
2153 				    "LD target ID %d removed from OS stack\n", ld_tgt_id);
2154 	}
2155 
2156 	kfree(sdev->hostdata);
2157 	sdev->hostdata = NULL;
2158 }
2159 
2160 /*
2161 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2162 *                                       kill adapter
2163 * @instance:				Adapter soft state
2164 *
2165 */
megasas_complete_outstanding_ioctls(struct megasas_instance * instance)2166 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2167 {
2168 	int i;
2169 	struct megasas_cmd *cmd_mfi;
2170 	struct megasas_cmd_fusion *cmd_fusion;
2171 	struct fusion_context *fusion = instance->ctrl_context;
2172 
2173 	/* Find all outstanding ioctls */
2174 	if (fusion) {
2175 		for (i = 0; i < instance->max_fw_cmds; i++) {
2176 			cmd_fusion = fusion->cmd_list[i];
2177 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2178 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2179 				if (cmd_mfi->sync_cmd &&
2180 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2181 					cmd_mfi->frame->hdr.cmd_status =
2182 							MFI_STAT_WRONG_STATE;
2183 					megasas_complete_cmd(instance,
2184 							     cmd_mfi, DID_OK);
2185 				}
2186 			}
2187 		}
2188 	} else {
2189 		for (i = 0; i < instance->max_fw_cmds; i++) {
2190 			cmd_mfi = instance->cmd_list[i];
2191 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2192 				MFI_CMD_ABORT)
2193 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2194 		}
2195 	}
2196 }
2197 
2198 
megaraid_sas_kill_hba(struct megasas_instance * instance)2199 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2200 {
2201 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2202 		dev_warn(&instance->pdev->dev,
2203 			 "Adapter already dead, skipping kill HBA\n");
2204 		return;
2205 	}
2206 
2207 	/* Set critical error to block I/O & ioctls in case caller didn't */
2208 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2209 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2210 	msleep(1000);
2211 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2212 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2213 		(instance->adapter_type != MFI_SERIES)) {
2214 		if (!instance->requestorId) {
2215 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2216 			/* Flush */
2217 			readl(&instance->reg_set->doorbell);
2218 		}
2219 		if (instance->requestorId && instance->peerIsPresent)
2220 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2221 	} else {
2222 		writel(MFI_STOP_ADP,
2223 			&instance->reg_set->inbound_doorbell);
2224 	}
2225 	/* Complete outstanding ioctls when adapter is killed */
2226 	megasas_complete_outstanding_ioctls(instance);
2227 }
2228 
2229  /**
2230   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2231   *					restored to max value
2232   * @instance:			Adapter soft state
2233   *
2234   */
2235 void
megasas_check_and_restore_queue_depth(struct megasas_instance * instance)2236 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2237 {
2238 	unsigned long flags;
2239 
2240 	if (instance->flag & MEGASAS_FW_BUSY
2241 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2242 	    && atomic_read(&instance->fw_outstanding) <
2243 	    instance->throttlequeuedepth + 1) {
2244 
2245 		spin_lock_irqsave(instance->host->host_lock, flags);
2246 		instance->flag &= ~MEGASAS_FW_BUSY;
2247 
2248 		instance->host->can_queue = instance->cur_can_queue;
2249 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2250 	}
2251 }
2252 
2253 /**
2254  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2255  * @instance_addr:			Address of adapter soft state
2256  *
2257  * Tasklet to complete cmds
2258  */
megasas_complete_cmd_dpc(unsigned long instance_addr)2259 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2260 {
2261 	u32 producer;
2262 	u32 consumer;
2263 	u32 context;
2264 	struct megasas_cmd *cmd;
2265 	struct megasas_instance *instance =
2266 				(struct megasas_instance *)instance_addr;
2267 	unsigned long flags;
2268 
2269 	/* If we have already declared adapter dead, donot complete cmds */
2270 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2271 		return;
2272 
2273 	spin_lock_irqsave(&instance->completion_lock, flags);
2274 
2275 	producer = le32_to_cpu(*instance->producer);
2276 	consumer = le32_to_cpu(*instance->consumer);
2277 
2278 	while (consumer != producer) {
2279 		context = le32_to_cpu(instance->reply_queue[consumer]);
2280 		if (context >= instance->max_fw_cmds) {
2281 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2282 				context);
2283 			BUG();
2284 		}
2285 
2286 		cmd = instance->cmd_list[context];
2287 
2288 		megasas_complete_cmd(instance, cmd, DID_OK);
2289 
2290 		consumer++;
2291 		if (consumer == (instance->max_fw_cmds + 1)) {
2292 			consumer = 0;
2293 		}
2294 	}
2295 
2296 	*instance->consumer = cpu_to_le32(producer);
2297 
2298 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2299 
2300 	/*
2301 	 * Check if we can restore can_queue
2302 	 */
2303 	megasas_check_and_restore_queue_depth(instance);
2304 }
2305 
2306 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2307 
2308 /**
2309  * megasas_start_timer - Initializes sriov heartbeat timer object
2310  * @instance:		Adapter soft state
2311  *
2312  */
megasas_start_timer(struct megasas_instance * instance)2313 void megasas_start_timer(struct megasas_instance *instance)
2314 {
2315 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2316 
2317 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2318 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2319 	add_timer(timer);
2320 }
2321 
2322 static void
2323 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2324 
2325 static void
2326 process_fw_state_change_wq(struct work_struct *work);
2327 
megasas_do_ocr(struct megasas_instance * instance)2328 static void megasas_do_ocr(struct megasas_instance *instance)
2329 {
2330 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2331 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2332 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2333 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2334 	}
2335 	instance->instancet->disable_intr(instance);
2336 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2337 	instance->issuepend_done = 0;
2338 
2339 	atomic_set(&instance->fw_outstanding, 0);
2340 	megasas_internal_reset_defer_cmds(instance);
2341 	process_fw_state_change_wq(&instance->work_init);
2342 }
2343 
megasas_get_ld_vf_affiliation_111(struct megasas_instance * instance,int initial)2344 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2345 					    int initial)
2346 {
2347 	struct megasas_cmd *cmd;
2348 	struct megasas_dcmd_frame *dcmd;
2349 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2350 	dma_addr_t new_affiliation_111_h;
2351 	int ld, retval = 0;
2352 	u8 thisVf;
2353 
2354 	cmd = megasas_get_cmd(instance);
2355 
2356 	if (!cmd) {
2357 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2358 		       "Failed to get cmd for scsi%d\n",
2359 			instance->host->host_no);
2360 		return -ENOMEM;
2361 	}
2362 
2363 	dcmd = &cmd->frame->dcmd;
2364 
2365 	if (!instance->vf_affiliation_111) {
2366 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2367 		       "affiliation for scsi%d\n", instance->host->host_no);
2368 		megasas_return_cmd(instance, cmd);
2369 		return -ENOMEM;
2370 	}
2371 
2372 	if (initial)
2373 			memset(instance->vf_affiliation_111, 0,
2374 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2375 	else {
2376 		new_affiliation_111 =
2377 			dma_alloc_coherent(&instance->pdev->dev,
2378 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2379 					   &new_affiliation_111_h, GFP_KERNEL);
2380 		if (!new_affiliation_111) {
2381 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2382 			       "memory for new affiliation for scsi%d\n",
2383 			       instance->host->host_no);
2384 			megasas_return_cmd(instance, cmd);
2385 			return -ENOMEM;
2386 		}
2387 	}
2388 
2389 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2390 
2391 	dcmd->cmd = MFI_CMD_DCMD;
2392 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2393 	dcmd->sge_count = 1;
2394 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2395 	dcmd->timeout = 0;
2396 	dcmd->pad_0 = 0;
2397 	dcmd->data_xfer_len =
2398 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2399 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2400 
2401 	if (initial)
2402 		dcmd->sgl.sge32[0].phys_addr =
2403 			cpu_to_le32(instance->vf_affiliation_111_h);
2404 	else
2405 		dcmd->sgl.sge32[0].phys_addr =
2406 			cpu_to_le32(new_affiliation_111_h);
2407 
2408 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2409 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2410 
2411 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2412 	       "scsi%d\n", instance->host->host_no);
2413 
2414 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2415 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2416 		       " failed with status 0x%x for scsi%d\n",
2417 		       dcmd->cmd_status, instance->host->host_no);
2418 		retval = 1; /* Do a scan if we couldn't get affiliation */
2419 		goto out;
2420 	}
2421 
2422 	if (!initial) {
2423 		thisVf = new_affiliation_111->thisVf;
2424 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2425 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2426 			    new_affiliation_111->map[ld].policy[thisVf]) {
2427 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2428 				       "Got new LD/VF affiliation for scsi%d\n",
2429 				       instance->host->host_no);
2430 				memcpy(instance->vf_affiliation_111,
2431 				       new_affiliation_111,
2432 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2433 				retval = 1;
2434 				goto out;
2435 			}
2436 	}
2437 out:
2438 	if (new_affiliation_111) {
2439 		dma_free_coherent(&instance->pdev->dev,
2440 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2441 				    new_affiliation_111,
2442 				    new_affiliation_111_h);
2443 	}
2444 
2445 	megasas_return_cmd(instance, cmd);
2446 
2447 	return retval;
2448 }
2449 
megasas_get_ld_vf_affiliation_12(struct megasas_instance * instance,int initial)2450 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2451 					    int initial)
2452 {
2453 	struct megasas_cmd *cmd;
2454 	struct megasas_dcmd_frame *dcmd;
2455 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2456 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2457 	dma_addr_t new_affiliation_h;
2458 	int i, j, retval = 0, found = 0, doscan = 0;
2459 	u8 thisVf;
2460 
2461 	cmd = megasas_get_cmd(instance);
2462 
2463 	if (!cmd) {
2464 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2465 		       "Failed to get cmd for scsi%d\n",
2466 		       instance->host->host_no);
2467 		return -ENOMEM;
2468 	}
2469 
2470 	dcmd = &cmd->frame->dcmd;
2471 
2472 	if (!instance->vf_affiliation) {
2473 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2474 		       "affiliation for scsi%d\n", instance->host->host_no);
2475 		megasas_return_cmd(instance, cmd);
2476 		return -ENOMEM;
2477 	}
2478 
2479 	if (initial)
2480 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2481 		       sizeof(struct MR_LD_VF_AFFILIATION));
2482 	else {
2483 		new_affiliation =
2484 			dma_alloc_coherent(&instance->pdev->dev,
2485 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2486 					   &new_affiliation_h, GFP_KERNEL);
2487 		if (!new_affiliation) {
2488 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2489 			       "memory for new affiliation for scsi%d\n",
2490 			       instance->host->host_no);
2491 			megasas_return_cmd(instance, cmd);
2492 			return -ENOMEM;
2493 		}
2494 	}
2495 
2496 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2497 
2498 	dcmd->cmd = MFI_CMD_DCMD;
2499 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2500 	dcmd->sge_count = 1;
2501 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2502 	dcmd->timeout = 0;
2503 	dcmd->pad_0 = 0;
2504 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2505 		sizeof(struct MR_LD_VF_AFFILIATION));
2506 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2507 
2508 	if (initial)
2509 		dcmd->sgl.sge32[0].phys_addr =
2510 			cpu_to_le32(instance->vf_affiliation_h);
2511 	else
2512 		dcmd->sgl.sge32[0].phys_addr =
2513 			cpu_to_le32(new_affiliation_h);
2514 
2515 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2516 		sizeof(struct MR_LD_VF_AFFILIATION));
2517 
2518 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2519 	       "scsi%d\n", instance->host->host_no);
2520 
2521 
2522 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2523 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2524 		       " failed with status 0x%x for scsi%d\n",
2525 		       dcmd->cmd_status, instance->host->host_no);
2526 		retval = 1; /* Do a scan if we couldn't get affiliation */
2527 		goto out;
2528 	}
2529 
2530 	if (!initial) {
2531 		if (!new_affiliation->ldCount) {
2532 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2533 			       "affiliation for passive path for scsi%d\n",
2534 			       instance->host->host_no);
2535 			retval = 1;
2536 			goto out;
2537 		}
2538 		newmap = new_affiliation->map;
2539 		savedmap = instance->vf_affiliation->map;
2540 		thisVf = new_affiliation->thisVf;
2541 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2542 			found = 0;
2543 			for (j = 0; j < instance->vf_affiliation->ldCount;
2544 			     j++) {
2545 				if (newmap->ref.targetId ==
2546 				    savedmap->ref.targetId) {
2547 					found = 1;
2548 					if (newmap->policy[thisVf] !=
2549 					    savedmap->policy[thisVf]) {
2550 						doscan = 1;
2551 						goto out;
2552 					}
2553 				}
2554 				savedmap = (struct MR_LD_VF_MAP *)
2555 					((unsigned char *)savedmap +
2556 					 savedmap->size);
2557 			}
2558 			if (!found && newmap->policy[thisVf] !=
2559 			    MR_LD_ACCESS_HIDDEN) {
2560 				doscan = 1;
2561 				goto out;
2562 			}
2563 			newmap = (struct MR_LD_VF_MAP *)
2564 				((unsigned char *)newmap + newmap->size);
2565 		}
2566 
2567 		newmap = new_affiliation->map;
2568 		savedmap = instance->vf_affiliation->map;
2569 
2570 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2571 			found = 0;
2572 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2573 				if (savedmap->ref.targetId ==
2574 				    newmap->ref.targetId) {
2575 					found = 1;
2576 					if (savedmap->policy[thisVf] !=
2577 					    newmap->policy[thisVf]) {
2578 						doscan = 1;
2579 						goto out;
2580 					}
2581 				}
2582 				newmap = (struct MR_LD_VF_MAP *)
2583 					((unsigned char *)newmap +
2584 					 newmap->size);
2585 			}
2586 			if (!found && savedmap->policy[thisVf] !=
2587 			    MR_LD_ACCESS_HIDDEN) {
2588 				doscan = 1;
2589 				goto out;
2590 			}
2591 			savedmap = (struct MR_LD_VF_MAP *)
2592 				((unsigned char *)savedmap +
2593 				 savedmap->size);
2594 		}
2595 	}
2596 out:
2597 	if (doscan) {
2598 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2599 		       "affiliation for scsi%d\n", instance->host->host_no);
2600 		memcpy(instance->vf_affiliation, new_affiliation,
2601 		       new_affiliation->size);
2602 		retval = 1;
2603 	}
2604 
2605 	if (new_affiliation)
2606 		dma_free_coherent(&instance->pdev->dev,
2607 				    (MAX_LOGICAL_DRIVES + 1) *
2608 				    sizeof(struct MR_LD_VF_AFFILIATION),
2609 				    new_affiliation, new_affiliation_h);
2610 	megasas_return_cmd(instance, cmd);
2611 
2612 	return retval;
2613 }
2614 
2615 /* This function will get the current SR-IOV LD/VF affiliation */
megasas_get_ld_vf_affiliation(struct megasas_instance * instance,int initial)2616 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2617 	int initial)
2618 {
2619 	int retval;
2620 
2621 	if (instance->PlasmaFW111)
2622 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2623 	else
2624 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2625 	return retval;
2626 }
2627 
2628 /* This function will tell FW to start the SR-IOV heartbeat */
megasas_sriov_start_heartbeat(struct megasas_instance * instance,int initial)2629 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2630 					 int initial)
2631 {
2632 	struct megasas_cmd *cmd;
2633 	struct megasas_dcmd_frame *dcmd;
2634 	int retval = 0;
2635 
2636 	cmd = megasas_get_cmd(instance);
2637 
2638 	if (!cmd) {
2639 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2640 		       "Failed to get cmd for scsi%d\n",
2641 		       instance->host->host_no);
2642 		return -ENOMEM;
2643 	}
2644 
2645 	dcmd = &cmd->frame->dcmd;
2646 
2647 	if (initial) {
2648 		instance->hb_host_mem =
2649 			dma_alloc_coherent(&instance->pdev->dev,
2650 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2651 					   &instance->hb_host_mem_h,
2652 					   GFP_KERNEL);
2653 		if (!instance->hb_host_mem) {
2654 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2655 			       " memory for heartbeat host memory for scsi%d\n",
2656 			       instance->host->host_no);
2657 			retval = -ENOMEM;
2658 			goto out;
2659 		}
2660 	}
2661 
2662 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2663 
2664 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2665 	dcmd->cmd = MFI_CMD_DCMD;
2666 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2667 	dcmd->sge_count = 1;
2668 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2669 	dcmd->timeout = 0;
2670 	dcmd->pad_0 = 0;
2671 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2672 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2673 
2674 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2675 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2676 
2677 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2678 	       instance->host->host_no);
2679 
2680 	if ((instance->adapter_type != MFI_SERIES) &&
2681 	    !instance->mask_interrupts)
2682 		retval = megasas_issue_blocked_cmd(instance, cmd,
2683 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2684 	else
2685 		retval = megasas_issue_polled(instance, cmd);
2686 
2687 	if (retval) {
2688 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2689 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2690 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2691 			"timed out" : "failed", instance->host->host_no);
2692 		retval = 1;
2693 	}
2694 
2695 out:
2696 	megasas_return_cmd(instance, cmd);
2697 
2698 	return retval;
2699 }
2700 
2701 /* Handler for SR-IOV heartbeat */
megasas_sriov_heartbeat_handler(struct timer_list * t)2702 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2703 {
2704 	struct megasas_instance *instance =
2705 		from_timer(instance, t, sriov_heartbeat_timer);
2706 
2707 	if (instance->hb_host_mem->HB.fwCounter !=
2708 	    instance->hb_host_mem->HB.driverCounter) {
2709 		instance->hb_host_mem->HB.driverCounter =
2710 			instance->hb_host_mem->HB.fwCounter;
2711 		mod_timer(&instance->sriov_heartbeat_timer,
2712 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2713 	} else {
2714 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2715 		       "completed for scsi%d\n", instance->host->host_no);
2716 		schedule_work(&instance->work_init);
2717 	}
2718 }
2719 
2720 /**
2721  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2722  * @instance:				Adapter soft state
2723  *
2724  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2725  * complete all its outstanding commands. Returns error if one or more IOs
2726  * are pending after this time period. It also marks the controller dead.
2727  */
megasas_wait_for_outstanding(struct megasas_instance * instance)2728 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2729 {
2730 	int i, sl, outstanding;
2731 	u32 reset_index;
2732 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2733 	unsigned long flags;
2734 	struct list_head clist_local;
2735 	struct megasas_cmd *reset_cmd;
2736 	u32 fw_state;
2737 
2738 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2739 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2740 		__func__, __LINE__);
2741 		return FAILED;
2742 	}
2743 
2744 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2745 
2746 		INIT_LIST_HEAD(&clist_local);
2747 		spin_lock_irqsave(&instance->hba_lock, flags);
2748 		list_splice_init(&instance->internal_reset_pending_q,
2749 				&clist_local);
2750 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2751 
2752 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2753 		for (i = 0; i < wait_time; i++) {
2754 			msleep(1000);
2755 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2756 				break;
2757 		}
2758 
2759 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2760 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2761 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2762 			return FAILED;
2763 		}
2764 
2765 		reset_index = 0;
2766 		while (!list_empty(&clist_local)) {
2767 			reset_cmd = list_entry((&clist_local)->next,
2768 						struct megasas_cmd, list);
2769 			list_del_init(&reset_cmd->list);
2770 			if (reset_cmd->scmd) {
2771 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2772 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2773 					reset_index, reset_cmd,
2774 					reset_cmd->scmd->cmnd[0]);
2775 
2776 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2777 				megasas_return_cmd(instance, reset_cmd);
2778 			} else if (reset_cmd->sync_cmd) {
2779 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2780 						"reset queue\n",
2781 						reset_cmd);
2782 
2783 				reset_cmd->cmd_status_drv = DCMD_INIT;
2784 				instance->instancet->fire_cmd(instance,
2785 						reset_cmd->frame_phys_addr,
2786 						0, instance->reg_set);
2787 			} else {
2788 				dev_notice(&instance->pdev->dev, "%p unexpected"
2789 					"cmds lst\n",
2790 					reset_cmd);
2791 			}
2792 			reset_index++;
2793 		}
2794 
2795 		return SUCCESS;
2796 	}
2797 
2798 	for (i = 0; i < resetwaittime; i++) {
2799 		outstanding = atomic_read(&instance->fw_outstanding);
2800 
2801 		if (!outstanding)
2802 			break;
2803 
2804 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2805 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2806 			       "commands to complete\n",i,outstanding);
2807 			/*
2808 			 * Call cmd completion routine. Cmd to be
2809 			 * be completed directly without depending on isr.
2810 			 */
2811 			megasas_complete_cmd_dpc((unsigned long)instance);
2812 		}
2813 
2814 		msleep(1000);
2815 	}
2816 
2817 	i = 0;
2818 	outstanding = atomic_read(&instance->fw_outstanding);
2819 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2820 
2821 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2822 		goto no_outstanding;
2823 
2824 	if (instance->disableOnlineCtrlReset)
2825 		goto kill_hba_and_failed;
2826 	do {
2827 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2828 			dev_info(&instance->pdev->dev,
2829 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2830 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2831 			if (i == 3)
2832 				goto kill_hba_and_failed;
2833 			megasas_do_ocr(instance);
2834 
2835 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2836 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2837 				__func__, __LINE__);
2838 				return FAILED;
2839 			}
2840 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2841 				__func__, __LINE__);
2842 
2843 			for (sl = 0; sl < 10; sl++)
2844 				msleep(500);
2845 
2846 			outstanding = atomic_read(&instance->fw_outstanding);
2847 
2848 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2849 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2850 				goto no_outstanding;
2851 		}
2852 		i++;
2853 	} while (i <= 3);
2854 
2855 no_outstanding:
2856 
2857 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2858 		__func__, __LINE__);
2859 	return SUCCESS;
2860 
2861 kill_hba_and_failed:
2862 
2863 	/* Reset not supported, kill adapter */
2864 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2865 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2866 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2867 		atomic_read(&instance->fw_outstanding));
2868 	megasas_dump_pending_frames(instance);
2869 	megaraid_sas_kill_hba(instance);
2870 
2871 	return FAILED;
2872 }
2873 
2874 /**
2875  * megasas_generic_reset -	Generic reset routine
2876  * @scmd:			Mid-layer SCSI command
2877  *
2878  * This routine implements a generic reset handler for device, bus and host
2879  * reset requests. Device, bus and host specific reset handlers can use this
2880  * function after they do their specific tasks.
2881  */
megasas_generic_reset(struct scsi_cmnd * scmd)2882 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2883 {
2884 	int ret_val;
2885 	struct megasas_instance *instance;
2886 
2887 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2888 
2889 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2890 		 scmd->cmnd[0], scmd->retries);
2891 
2892 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2893 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2894 		return FAILED;
2895 	}
2896 
2897 	ret_val = megasas_wait_for_outstanding(instance);
2898 	if (ret_val == SUCCESS)
2899 		dev_notice(&instance->pdev->dev, "reset successful\n");
2900 	else
2901 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2902 
2903 	return ret_val;
2904 }
2905 
2906 /**
2907  * megasas_reset_timer - quiesce the adapter if required
2908  * @scmd:		scsi cmnd
2909  *
2910  * Sets the FW busy flag and reduces the host->can_queue if the
2911  * cmd has not been completed within the timeout period.
2912  */
2913 static enum
megasas_reset_timer(struct scsi_cmnd * scmd)2914 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2915 {
2916 	struct megasas_instance *instance;
2917 	unsigned long flags;
2918 
2919 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2920 				(scmd_timeout * 2) * HZ)) {
2921 		return BLK_EH_DONE;
2922 	}
2923 
2924 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2925 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2926 		/* FW is busy, throttle IO */
2927 		spin_lock_irqsave(instance->host->host_lock, flags);
2928 
2929 		instance->host->can_queue = instance->throttlequeuedepth;
2930 		instance->last_time = jiffies;
2931 		instance->flag |= MEGASAS_FW_BUSY;
2932 
2933 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2934 	}
2935 	return BLK_EH_RESET_TIMER;
2936 }
2937 
2938 /**
2939  * megasas_dump -	This function will print hexdump of provided buffer.
2940  * @buf:		Buffer to be dumped
2941  * @sz:		Size in bytes
2942  * @format:		Different formats of dumping e.g. format=n will
2943  *			cause only 'n' 32 bit words to be dumped in a single
2944  *			line.
2945  */
2946 inline void
megasas_dump(void * buf,int sz,int format)2947 megasas_dump(void *buf, int sz, int format)
2948 {
2949 	int i;
2950 	__le32 *buf_loc = (__le32 *)buf;
2951 
2952 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2953 		if ((i % format) == 0) {
2954 			if (i != 0)
2955 				printk(KERN_CONT "\n");
2956 			printk(KERN_CONT "%08x: ", (i * 4));
2957 		}
2958 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2959 	}
2960 	printk(KERN_CONT "\n");
2961 }
2962 
2963 /**
2964  * megasas_dump_reg_set -	This function will print hexdump of register set
2965  * @reg_set:	Register set to be dumped
2966  */
2967 inline void
megasas_dump_reg_set(void __iomem * reg_set)2968 megasas_dump_reg_set(void __iomem *reg_set)
2969 {
2970 	unsigned int i, sz = 256;
2971 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2972 
2973 	for (i = 0; i < (sz / sizeof(u32)); i++)
2974 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2975 }
2976 
2977 /**
2978  * megasas_dump_fusion_io -	This function will print key details
2979  *				of SCSI IO
2980  * @scmd:			SCSI command pointer of SCSI IO
2981  */
2982 void
megasas_dump_fusion_io(struct scsi_cmnd * scmd)2983 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2984 {
2985 	struct megasas_cmd_fusion *cmd;
2986 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2987 	struct megasas_instance *instance;
2988 
2989 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2990 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2991 
2992 	scmd_printk(KERN_INFO, scmd,
2993 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2994 		    scmd, scmd->retries, scmd->allowed);
2995 	scsi_print_command(scmd);
2996 
2997 	if (cmd) {
2998 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2999 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
3000 		scmd_printk(KERN_INFO, scmd,
3001 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
3002 			    req_desc->SCSIIO.RequestFlags,
3003 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
3004 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
3005 
3006 		printk(KERN_INFO "IO request frame:\n");
3007 		megasas_dump(cmd->io_request,
3008 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
3009 		printk(KERN_INFO "Chain frame:\n");
3010 		megasas_dump(cmd->sg_frame,
3011 			     instance->max_chain_frame_sz, 8);
3012 	}
3013 
3014 }
3015 
3016 /*
3017  * megasas_dump_sys_regs - This function will dump system registers through
3018  *			    sysfs.
3019  * @reg_set:		    Pointer to System register set.
3020  * @buf:		    Buffer to which output is to be written.
3021  * @return:		    Number of bytes written to buffer.
3022  */
3023 static inline ssize_t
megasas_dump_sys_regs(void __iomem * reg_set,char * buf)3024 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
3025 {
3026 	unsigned int i, sz = 256;
3027 	int bytes_wrote = 0;
3028 	char *loc = (char *)buf;
3029 	u32 __iomem *reg = (u32 __iomem *)reg_set;
3030 
3031 	for (i = 0; i < sz / sizeof(u32); i++) {
3032 		bytes_wrote += scnprintf(loc + bytes_wrote,
3033 					 PAGE_SIZE - bytes_wrote,
3034 					 "%08x: %08x\n", (i * 4),
3035 					 readl(&reg[i]));
3036 	}
3037 	return bytes_wrote;
3038 }
3039 
3040 /**
3041  * megasas_reset_bus_host -	Bus & host reset handler entry point
3042  * @scmd:			Mid-layer SCSI command
3043  */
megasas_reset_bus_host(struct scsi_cmnd * scmd)3044 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3045 {
3046 	int ret;
3047 	struct megasas_instance *instance;
3048 
3049 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3050 
3051 	scmd_printk(KERN_INFO, scmd,
3052 		"OCR is requested due to IO timeout!!\n");
3053 
3054 	scmd_printk(KERN_INFO, scmd,
3055 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3056 		scmd->device->host->shost_state,
3057 		scsi_host_busy(scmd->device->host),
3058 		atomic_read(&instance->fw_outstanding));
3059 	/*
3060 	 * First wait for all commands to complete
3061 	 */
3062 	if (instance->adapter_type == MFI_SERIES) {
3063 		ret = megasas_generic_reset(scmd);
3064 	} else {
3065 		megasas_dump_fusion_io(scmd);
3066 		ret = megasas_reset_fusion(scmd->device->host,
3067 				SCSIIO_TIMEOUT_OCR);
3068 	}
3069 
3070 	return ret;
3071 }
3072 
3073 /**
3074  * megasas_task_abort - Issues task abort request to firmware
3075  *			(supported only for fusion adapters)
3076  * @scmd:		SCSI command pointer
3077  */
megasas_task_abort(struct scsi_cmnd * scmd)3078 static int megasas_task_abort(struct scsi_cmnd *scmd)
3079 {
3080 	int ret;
3081 	struct megasas_instance *instance;
3082 
3083 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3084 
3085 	if (instance->adapter_type != MFI_SERIES)
3086 		ret = megasas_task_abort_fusion(scmd);
3087 	else {
3088 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3089 		ret = FAILED;
3090 	}
3091 
3092 	return ret;
3093 }
3094 
3095 /**
3096  * megasas_reset_target:  Issues target reset request to firmware
3097  *                        (supported only for fusion adapters)
3098  * @scmd:                 SCSI command pointer
3099  */
megasas_reset_target(struct scsi_cmnd * scmd)3100 static int megasas_reset_target(struct scsi_cmnd *scmd)
3101 {
3102 	int ret;
3103 	struct megasas_instance *instance;
3104 
3105 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3106 
3107 	if (instance->adapter_type != MFI_SERIES)
3108 		ret = megasas_reset_target_fusion(scmd);
3109 	else {
3110 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3111 		ret = FAILED;
3112 	}
3113 
3114 	return ret;
3115 }
3116 
3117 /**
3118  * megasas_bios_param - Returns disk geometry for a disk
3119  * @sdev:		device handle
3120  * @bdev:		block device
3121  * @capacity:		drive capacity
3122  * @geom:		geometry parameters
3123  */
3124 static int
megasas_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])3125 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3126 		 sector_t capacity, int geom[])
3127 {
3128 	int heads;
3129 	int sectors;
3130 	sector_t cylinders;
3131 	unsigned long tmp;
3132 
3133 	/* Default heads (64) & sectors (32) */
3134 	heads = 64;
3135 	sectors = 32;
3136 
3137 	tmp = heads * sectors;
3138 	cylinders = capacity;
3139 
3140 	sector_div(cylinders, tmp);
3141 
3142 	/*
3143 	 * Handle extended translation size for logical drives > 1Gb
3144 	 */
3145 
3146 	if (capacity >= 0x200000) {
3147 		heads = 255;
3148 		sectors = 63;
3149 		tmp = heads*sectors;
3150 		cylinders = capacity;
3151 		sector_div(cylinders, tmp);
3152 	}
3153 
3154 	geom[0] = heads;
3155 	geom[1] = sectors;
3156 	geom[2] = cylinders;
3157 
3158 	return 0;
3159 }
3160 
megasas_map_queues(struct Scsi_Host * shost)3161 static int megasas_map_queues(struct Scsi_Host *shost)
3162 {
3163 	struct megasas_instance *instance;
3164 
3165 	instance = (struct megasas_instance *)shost->hostdata;
3166 
3167 	if (shost->nr_hw_queues == 1)
3168 		return 0;
3169 
3170 	return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
3171 			instance->pdev, instance->low_latency_index_start);
3172 }
3173 
3174 static void megasas_aen_polling(struct work_struct *work);
3175 
3176 /**
3177  * megasas_service_aen -	Processes an event notification
3178  * @instance:			Adapter soft state
3179  * @cmd:			AEN command completed by the ISR
3180  *
3181  * For AEN, driver sends a command down to FW that is held by the FW till an
3182  * event occurs. When an event of interest occurs, FW completes the command
3183  * that it was previously holding.
3184  *
3185  * This routines sends SIGIO signal to processes that have registered with the
3186  * driver for AEN.
3187  */
3188 static void
megasas_service_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)3189 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3190 {
3191 	unsigned long flags;
3192 
3193 	/*
3194 	 * Don't signal app if it is just an aborted previously registered aen
3195 	 */
3196 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3197 		spin_lock_irqsave(&poll_aen_lock, flags);
3198 		megasas_poll_wait_aen = 1;
3199 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3200 		wake_up(&megasas_poll_wait);
3201 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3202 	}
3203 	else
3204 		cmd->abort_aen = 0;
3205 
3206 	instance->aen_cmd = NULL;
3207 
3208 	megasas_return_cmd(instance, cmd);
3209 
3210 	if ((instance->unload == 0) &&
3211 		((instance->issuepend_done == 1))) {
3212 		struct megasas_aen_event *ev;
3213 
3214 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3215 		if (!ev) {
3216 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3217 		} else {
3218 			ev->instance = instance;
3219 			instance->ev = ev;
3220 			INIT_DELAYED_WORK(&ev->hotplug_work,
3221 					  megasas_aen_polling);
3222 			schedule_delayed_work(&ev->hotplug_work, 0);
3223 		}
3224 	}
3225 }
3226 
3227 static ssize_t
fw_crash_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3228 fw_crash_buffer_store(struct device *cdev,
3229 	struct device_attribute *attr, const char *buf, size_t count)
3230 {
3231 	struct Scsi_Host *shost = class_to_shost(cdev);
3232 	struct megasas_instance *instance =
3233 		(struct megasas_instance *) shost->hostdata;
3234 	int val = 0;
3235 	unsigned long flags;
3236 
3237 	if (kstrtoint(buf, 0, &val) != 0)
3238 		return -EINVAL;
3239 
3240 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3241 	instance->fw_crash_buffer_offset = val;
3242 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3243 	return strlen(buf);
3244 }
3245 
3246 static ssize_t
fw_crash_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3247 fw_crash_buffer_show(struct device *cdev,
3248 	struct device_attribute *attr, char *buf)
3249 {
3250 	struct Scsi_Host *shost = class_to_shost(cdev);
3251 	struct megasas_instance *instance =
3252 		(struct megasas_instance *) shost->hostdata;
3253 	u32 size;
3254 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3255 	unsigned long chunk_left_bytes;
3256 	unsigned long src_addr;
3257 	unsigned long flags;
3258 	u32 buff_offset;
3259 
3260 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3261 	buff_offset = instance->fw_crash_buffer_offset;
3262 	if (!instance->crash_dump_buf &&
3263 		!((instance->fw_crash_state == AVAILABLE) ||
3264 		(instance->fw_crash_state == COPYING))) {
3265 		dev_err(&instance->pdev->dev,
3266 			"Firmware crash dump is not available\n");
3267 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3268 		return -EINVAL;
3269 	}
3270 
3271 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3272 		dev_err(&instance->pdev->dev,
3273 			"Firmware crash dump offset is out of range\n");
3274 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3275 		return 0;
3276 	}
3277 
3278 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3279 	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3280 	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3281 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3282 
3283 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3284 		(buff_offset % dmachunk);
3285 	memcpy(buf, (void *)src_addr, size);
3286 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3287 
3288 	return size;
3289 }
3290 
3291 static ssize_t
fw_crash_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3292 fw_crash_buffer_size_show(struct device *cdev,
3293 	struct device_attribute *attr, char *buf)
3294 {
3295 	struct Scsi_Host *shost = class_to_shost(cdev);
3296 	struct megasas_instance *instance =
3297 		(struct megasas_instance *) shost->hostdata;
3298 
3299 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3300 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3301 }
3302 
3303 static ssize_t
fw_crash_state_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3304 fw_crash_state_store(struct device *cdev,
3305 	struct device_attribute *attr, const char *buf, size_t count)
3306 {
3307 	struct Scsi_Host *shost = class_to_shost(cdev);
3308 	struct megasas_instance *instance =
3309 		(struct megasas_instance *) shost->hostdata;
3310 	int val = 0;
3311 	unsigned long flags;
3312 
3313 	if (kstrtoint(buf, 0, &val) != 0)
3314 		return -EINVAL;
3315 
3316 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3317 		dev_err(&instance->pdev->dev, "application updates invalid "
3318 			"firmware crash state\n");
3319 		return -EINVAL;
3320 	}
3321 
3322 	instance->fw_crash_state = val;
3323 
3324 	if ((val == COPIED) || (val == COPY_ERROR)) {
3325 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3326 		megasas_free_host_crash_buffer(instance);
3327 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3328 		if (val == COPY_ERROR)
3329 			dev_info(&instance->pdev->dev, "application failed to "
3330 				"copy Firmware crash dump\n");
3331 		else
3332 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3333 				"copied successfully\n");
3334 	}
3335 	return strlen(buf);
3336 }
3337 
3338 static ssize_t
fw_crash_state_show(struct device * cdev,struct device_attribute * attr,char * buf)3339 fw_crash_state_show(struct device *cdev,
3340 	struct device_attribute *attr, char *buf)
3341 {
3342 	struct Scsi_Host *shost = class_to_shost(cdev);
3343 	struct megasas_instance *instance =
3344 		(struct megasas_instance *) shost->hostdata;
3345 
3346 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3347 }
3348 
3349 static ssize_t
page_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3350 page_size_show(struct device *cdev,
3351 	struct device_attribute *attr, char *buf)
3352 {
3353 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3354 }
3355 
3356 static ssize_t
ldio_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3357 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3358 	char *buf)
3359 {
3360 	struct Scsi_Host *shost = class_to_shost(cdev);
3361 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3362 
3363 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3364 }
3365 
3366 static ssize_t
fw_cmds_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3367 fw_cmds_outstanding_show(struct device *cdev,
3368 				 struct device_attribute *attr, char *buf)
3369 {
3370 	struct Scsi_Host *shost = class_to_shost(cdev);
3371 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3372 
3373 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3374 }
3375 
3376 static ssize_t
enable_sdev_max_qd_show(struct device * cdev,struct device_attribute * attr,char * buf)3377 enable_sdev_max_qd_show(struct device *cdev,
3378 	struct device_attribute *attr, char *buf)
3379 {
3380 	struct Scsi_Host *shost = class_to_shost(cdev);
3381 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3382 
3383 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3384 }
3385 
3386 static ssize_t
enable_sdev_max_qd_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3387 enable_sdev_max_qd_store(struct device *cdev,
3388 	struct device_attribute *attr, const char *buf, size_t count)
3389 {
3390 	struct Scsi_Host *shost = class_to_shost(cdev);
3391 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3392 	u32 val = 0;
3393 	bool is_target_prop;
3394 	int ret_target_prop = DCMD_FAILED;
3395 	struct scsi_device *sdev;
3396 
3397 	if (kstrtou32(buf, 0, &val) != 0) {
3398 		pr_err("megasas: could not set enable_sdev_max_qd\n");
3399 		return -EINVAL;
3400 	}
3401 
3402 	mutex_lock(&instance->reset_mutex);
3403 	if (val)
3404 		instance->enable_sdev_max_qd = true;
3405 	else
3406 		instance->enable_sdev_max_qd = false;
3407 
3408 	shost_for_each_device(sdev, shost) {
3409 		ret_target_prop = megasas_get_target_prop(instance, sdev);
3410 		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3411 		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3412 	}
3413 	mutex_unlock(&instance->reset_mutex);
3414 
3415 	return strlen(buf);
3416 }
3417 
3418 static ssize_t
dump_system_regs_show(struct device * cdev,struct device_attribute * attr,char * buf)3419 dump_system_regs_show(struct device *cdev,
3420 			       struct device_attribute *attr, char *buf)
3421 {
3422 	struct Scsi_Host *shost = class_to_shost(cdev);
3423 	struct megasas_instance *instance =
3424 			(struct megasas_instance *)shost->hostdata;
3425 
3426 	return megasas_dump_sys_regs(instance->reg_set, buf);
3427 }
3428 
3429 static ssize_t
raid_map_id_show(struct device * cdev,struct device_attribute * attr,char * buf)3430 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3431 			  char *buf)
3432 {
3433 	struct Scsi_Host *shost = class_to_shost(cdev);
3434 	struct megasas_instance *instance =
3435 			(struct megasas_instance *)shost->hostdata;
3436 
3437 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3438 			(unsigned long)instance->map_id);
3439 }
3440 
3441 static DEVICE_ATTR_RW(fw_crash_buffer);
3442 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3443 static DEVICE_ATTR_RW(fw_crash_state);
3444 static DEVICE_ATTR_RO(page_size);
3445 static DEVICE_ATTR_RO(ldio_outstanding);
3446 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3447 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3448 static DEVICE_ATTR_RO(dump_system_regs);
3449 static DEVICE_ATTR_RO(raid_map_id);
3450 
3451 static struct device_attribute *megaraid_host_attrs[] = {
3452 	&dev_attr_fw_crash_buffer_size,
3453 	&dev_attr_fw_crash_buffer,
3454 	&dev_attr_fw_crash_state,
3455 	&dev_attr_page_size,
3456 	&dev_attr_ldio_outstanding,
3457 	&dev_attr_fw_cmds_outstanding,
3458 	&dev_attr_enable_sdev_max_qd,
3459 	&dev_attr_dump_system_regs,
3460 	&dev_attr_raid_map_id,
3461 	NULL,
3462 };
3463 
3464 /*
3465  * Scsi host template for megaraid_sas driver
3466  */
3467 static struct scsi_host_template megasas_template = {
3468 
3469 	.module = THIS_MODULE,
3470 	.name = "Avago SAS based MegaRAID driver",
3471 	.proc_name = "megaraid_sas",
3472 	.slave_configure = megasas_slave_configure,
3473 	.slave_alloc = megasas_slave_alloc,
3474 	.slave_destroy = megasas_slave_destroy,
3475 	.queuecommand = megasas_queue_command,
3476 	.eh_target_reset_handler = megasas_reset_target,
3477 	.eh_abort_handler = megasas_task_abort,
3478 	.eh_host_reset_handler = megasas_reset_bus_host,
3479 	.eh_timed_out = megasas_reset_timer,
3480 	.shost_attrs = megaraid_host_attrs,
3481 	.bios_param = megasas_bios_param,
3482 	.map_queues = megasas_map_queues,
3483 	.change_queue_depth = scsi_change_queue_depth,
3484 	.max_segment_size = 0xffffffff,
3485 };
3486 
3487 /**
3488  * megasas_complete_int_cmd -	Completes an internal command
3489  * @instance:			Adapter soft state
3490  * @cmd:			Command to be completed
3491  *
3492  * The megasas_issue_blocked_cmd() function waits for a command to complete
3493  * after it issues a command. This function wakes up that waiting routine by
3494  * calling wake_up() on the wait queue.
3495  */
3496 static void
megasas_complete_int_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)3497 megasas_complete_int_cmd(struct megasas_instance *instance,
3498 			 struct megasas_cmd *cmd)
3499 {
3500 	if (cmd->cmd_status_drv == DCMD_INIT)
3501 		cmd->cmd_status_drv =
3502 		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3503 		DCMD_SUCCESS : DCMD_FAILED;
3504 
3505 	wake_up(&instance->int_cmd_wait_q);
3506 }
3507 
3508 /**
3509  * megasas_complete_abort -	Completes aborting a command
3510  * @instance:			Adapter soft state
3511  * @cmd:			Cmd that was issued to abort another cmd
3512  *
3513  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3514  * after it issues an abort on a previously issued command. This function
3515  * wakes up all functions waiting on the same wait queue.
3516  */
3517 static void
megasas_complete_abort(struct megasas_instance * instance,struct megasas_cmd * cmd)3518 megasas_complete_abort(struct megasas_instance *instance,
3519 		       struct megasas_cmd *cmd)
3520 {
3521 	if (cmd->sync_cmd) {
3522 		cmd->sync_cmd = 0;
3523 		cmd->cmd_status_drv = DCMD_SUCCESS;
3524 		wake_up(&instance->abort_cmd_wait_q);
3525 	}
3526 }
3527 
3528 static void
megasas_set_ld_removed_by_fw(struct megasas_instance * instance)3529 megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
3530 {
3531 	uint i;
3532 
3533 	for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
3534 		if (instance->ld_ids_prev[i] != 0xff &&
3535 		    instance->ld_ids_from_raidmap[i] == 0xff) {
3536 			if (megasas_dbg_lvl & LD_PD_DEBUG)
3537 				dev_info(&instance->pdev->dev,
3538 					 "LD target ID %d removed from RAID map\n", i);
3539 			instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
3540 		}
3541 	}
3542 }
3543 
3544 /**
3545  * megasas_complete_cmd -	Completes a command
3546  * @instance:			Adapter soft state
3547  * @cmd:			Command to be completed
3548  * @alt_status:			If non-zero, use this value as status to
3549  *				SCSI mid-layer instead of the value returned
3550  *				by the FW. This should be used if caller wants
3551  *				an alternate status (as in the case of aborted
3552  *				commands)
3553  */
3554 void
megasas_complete_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,u8 alt_status)3555 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3556 		     u8 alt_status)
3557 {
3558 	int exception = 0;
3559 	struct megasas_header *hdr = &cmd->frame->hdr;
3560 	unsigned long flags;
3561 	struct fusion_context *fusion = instance->ctrl_context;
3562 	u32 opcode, status;
3563 
3564 	/* flag for the retry reset */
3565 	cmd->retry_for_fw_reset = 0;
3566 
3567 	if (cmd->scmd)
3568 		cmd->scmd->SCp.ptr = NULL;
3569 
3570 	switch (hdr->cmd) {
3571 	case MFI_CMD_INVALID:
3572 		/* Some older 1068 controller FW may keep a pended
3573 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3574 		   when booting the kdump kernel.  Ignore this command to
3575 		   prevent a kernel panic on shutdown of the kdump kernel. */
3576 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3577 		       "completed\n");
3578 		dev_warn(&instance->pdev->dev, "If you have a controller "
3579 		       "other than PERC5, please upgrade your firmware\n");
3580 		break;
3581 	case MFI_CMD_PD_SCSI_IO:
3582 	case MFI_CMD_LD_SCSI_IO:
3583 
3584 		/*
3585 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3586 		 * issued either through an IO path or an IOCTL path. If it
3587 		 * was via IOCTL, we will send it to internal completion.
3588 		 */
3589 		if (cmd->sync_cmd) {
3590 			cmd->sync_cmd = 0;
3591 			megasas_complete_int_cmd(instance, cmd);
3592 			break;
3593 		}
3594 		fallthrough;
3595 
3596 	case MFI_CMD_LD_READ:
3597 	case MFI_CMD_LD_WRITE:
3598 
3599 		if (alt_status) {
3600 			cmd->scmd->result = alt_status << 16;
3601 			exception = 1;
3602 		}
3603 
3604 		if (exception) {
3605 
3606 			atomic_dec(&instance->fw_outstanding);
3607 
3608 			scsi_dma_unmap(cmd->scmd);
3609 			cmd->scmd->scsi_done(cmd->scmd);
3610 			megasas_return_cmd(instance, cmd);
3611 
3612 			break;
3613 		}
3614 
3615 		switch (hdr->cmd_status) {
3616 
3617 		case MFI_STAT_OK:
3618 			cmd->scmd->result = DID_OK << 16;
3619 			break;
3620 
3621 		case MFI_STAT_SCSI_IO_FAILED:
3622 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3623 			cmd->scmd->result =
3624 			    (DID_ERROR << 16) | hdr->scsi_status;
3625 			break;
3626 
3627 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3628 
3629 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3630 
3631 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3632 				memset(cmd->scmd->sense_buffer, 0,
3633 				       SCSI_SENSE_BUFFERSIZE);
3634 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3635 				       hdr->sense_len);
3636 
3637 				cmd->scmd->result |= DRIVER_SENSE << 24;
3638 			}
3639 
3640 			break;
3641 
3642 		case MFI_STAT_LD_OFFLINE:
3643 		case MFI_STAT_DEVICE_NOT_FOUND:
3644 			cmd->scmd->result = DID_BAD_TARGET << 16;
3645 			break;
3646 
3647 		default:
3648 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3649 			       hdr->cmd_status);
3650 			cmd->scmd->result = DID_ERROR << 16;
3651 			break;
3652 		}
3653 
3654 		atomic_dec(&instance->fw_outstanding);
3655 
3656 		scsi_dma_unmap(cmd->scmd);
3657 		cmd->scmd->scsi_done(cmd->scmd);
3658 		megasas_return_cmd(instance, cmd);
3659 
3660 		break;
3661 
3662 	case MFI_CMD_SMP:
3663 	case MFI_CMD_STP:
3664 	case MFI_CMD_NVME:
3665 	case MFI_CMD_TOOLBOX:
3666 		megasas_complete_int_cmd(instance, cmd);
3667 		break;
3668 
3669 	case MFI_CMD_DCMD:
3670 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3671 		/* Check for LD map update */
3672 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3673 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3674 			fusion->fast_path_io = 0;
3675 			spin_lock_irqsave(instance->host->host_lock, flags);
3676 			status = cmd->frame->hdr.cmd_status;
3677 			instance->map_update_cmd = NULL;
3678 			if (status != MFI_STAT_OK) {
3679 				if (status != MFI_STAT_NOT_FOUND)
3680 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3681 					       cmd->frame->hdr.cmd_status);
3682 				else {
3683 					megasas_return_cmd(instance, cmd);
3684 					spin_unlock_irqrestore(
3685 						instance->host->host_lock,
3686 						flags);
3687 					break;
3688 				}
3689 			}
3690 
3691 			megasas_return_cmd(instance, cmd);
3692 
3693 			/*
3694 			 * Set fast path IO to ZERO.
3695 			 * Validate Map will set proper value.
3696 			 * Meanwhile all IOs will go as LD IO.
3697 			 */
3698 			if (status == MFI_STAT_OK &&
3699 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3700 				instance->map_id++;
3701 				fusion->fast_path_io = 1;
3702 			} else {
3703 				fusion->fast_path_io = 0;
3704 			}
3705 
3706 			if (instance->adapter_type >= INVADER_SERIES)
3707 				megasas_set_ld_removed_by_fw(instance);
3708 
3709 			megasas_sync_map_info(instance);
3710 			spin_unlock_irqrestore(instance->host->host_lock,
3711 					       flags);
3712 
3713 			break;
3714 		}
3715 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3716 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3717 			spin_lock_irqsave(&poll_aen_lock, flags);
3718 			megasas_poll_wait_aen = 0;
3719 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3720 		}
3721 
3722 		/* FW has an updated PD sequence */
3723 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3724 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3725 
3726 			spin_lock_irqsave(instance->host->host_lock, flags);
3727 			status = cmd->frame->hdr.cmd_status;
3728 			instance->jbod_seq_cmd = NULL;
3729 			megasas_return_cmd(instance, cmd);
3730 
3731 			if (status == MFI_STAT_OK) {
3732 				instance->pd_seq_map_id++;
3733 				/* Re-register a pd sync seq num cmd */
3734 				if (megasas_sync_pd_seq_num(instance, true))
3735 					instance->use_seqnum_jbod_fp = false;
3736 			} else
3737 				instance->use_seqnum_jbod_fp = false;
3738 
3739 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3740 			break;
3741 		}
3742 
3743 		/*
3744 		 * See if got an event notification
3745 		 */
3746 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3747 			megasas_service_aen(instance, cmd);
3748 		else
3749 			megasas_complete_int_cmd(instance, cmd);
3750 
3751 		break;
3752 
3753 	case MFI_CMD_ABORT:
3754 		/*
3755 		 * Cmd issued to abort another cmd returned
3756 		 */
3757 		megasas_complete_abort(instance, cmd);
3758 		break;
3759 
3760 	default:
3761 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3762 		       hdr->cmd);
3763 		megasas_complete_int_cmd(instance, cmd);
3764 		break;
3765 	}
3766 }
3767 
3768 /**
3769  * megasas_issue_pending_cmds_again -	issue all pending cmds
3770  *					in FW again because of the fw reset
3771  * @instance:				Adapter soft state
3772  */
3773 static inline void
megasas_issue_pending_cmds_again(struct megasas_instance * instance)3774 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3775 {
3776 	struct megasas_cmd *cmd;
3777 	struct list_head clist_local;
3778 	union megasas_evt_class_locale class_locale;
3779 	unsigned long flags;
3780 	u32 seq_num;
3781 
3782 	INIT_LIST_HEAD(&clist_local);
3783 	spin_lock_irqsave(&instance->hba_lock, flags);
3784 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3785 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3786 
3787 	while (!list_empty(&clist_local)) {
3788 		cmd = list_entry((&clist_local)->next,
3789 					struct megasas_cmd, list);
3790 		list_del_init(&cmd->list);
3791 
3792 		if (cmd->sync_cmd || cmd->scmd) {
3793 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3794 				"detected to be pending while HBA reset\n",
3795 					cmd, cmd->scmd, cmd->sync_cmd);
3796 
3797 			cmd->retry_for_fw_reset++;
3798 
3799 			if (cmd->retry_for_fw_reset == 3) {
3800 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3801 					"was tried multiple times during reset."
3802 					"Shutting down the HBA\n",
3803 					cmd, cmd->scmd, cmd->sync_cmd);
3804 				instance->instancet->disable_intr(instance);
3805 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3806 				megaraid_sas_kill_hba(instance);
3807 				return;
3808 			}
3809 		}
3810 
3811 		if (cmd->sync_cmd == 1) {
3812 			if (cmd->scmd) {
3813 				dev_notice(&instance->pdev->dev, "unexpected"
3814 					"cmd attached to internal command!\n");
3815 			}
3816 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3817 						"on the internal reset queue,"
3818 						"issue it again.\n", cmd);
3819 			cmd->cmd_status_drv = DCMD_INIT;
3820 			instance->instancet->fire_cmd(instance,
3821 							cmd->frame_phys_addr,
3822 							0, instance->reg_set);
3823 		} else if (cmd->scmd) {
3824 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3825 			"detected on the internal queue, issue again.\n",
3826 			cmd, cmd->scmd->cmnd[0]);
3827 
3828 			atomic_inc(&instance->fw_outstanding);
3829 			instance->instancet->fire_cmd(instance,
3830 					cmd->frame_phys_addr,
3831 					cmd->frame_count-1, instance->reg_set);
3832 		} else {
3833 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3834 				"internal reset defer list while re-issue!!\n",
3835 				cmd);
3836 		}
3837 	}
3838 
3839 	if (instance->aen_cmd) {
3840 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3841 		megasas_return_cmd(instance, instance->aen_cmd);
3842 
3843 		instance->aen_cmd = NULL;
3844 	}
3845 
3846 	/*
3847 	 * Initiate AEN (Asynchronous Event Notification)
3848 	 */
3849 	seq_num = instance->last_seq_num;
3850 	class_locale.members.reserved = 0;
3851 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3852 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3853 
3854 	megasas_register_aen(instance, seq_num, class_locale.word);
3855 }
3856 
3857 /*
3858  * Move the internal reset pending commands to a deferred queue.
3859  *
3860  * We move the commands pending at internal reset time to a
3861  * pending queue. This queue would be flushed after successful
3862  * completion of the internal reset sequence. if the internal reset
3863  * did not complete in time, the kernel reset handler would flush
3864  * these commands.
3865  */
3866 static void
megasas_internal_reset_defer_cmds(struct megasas_instance * instance)3867 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3868 {
3869 	struct megasas_cmd *cmd;
3870 	int i;
3871 	u16 max_cmd = instance->max_fw_cmds;
3872 	u32 defer_index;
3873 	unsigned long flags;
3874 
3875 	defer_index = 0;
3876 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3877 	for (i = 0; i < max_cmd; i++) {
3878 		cmd = instance->cmd_list[i];
3879 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3880 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3881 					"on the defer queue as internal\n",
3882 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3883 
3884 			if (!list_empty(&cmd->list)) {
3885 				dev_notice(&instance->pdev->dev, "ERROR while"
3886 					" moving this cmd:%p, %d %p, it was"
3887 					"discovered on some list?\n",
3888 					cmd, cmd->sync_cmd, cmd->scmd);
3889 
3890 				list_del_init(&cmd->list);
3891 			}
3892 			defer_index++;
3893 			list_add_tail(&cmd->list,
3894 				&instance->internal_reset_pending_q);
3895 		}
3896 	}
3897 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3898 }
3899 
3900 
3901 static void
process_fw_state_change_wq(struct work_struct * work)3902 process_fw_state_change_wq(struct work_struct *work)
3903 {
3904 	struct megasas_instance *instance =
3905 		container_of(work, struct megasas_instance, work_init);
3906 	u32 wait;
3907 	unsigned long flags;
3908 
3909     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3910 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3911 				atomic_read(&instance->adprecovery));
3912 		return ;
3913 	}
3914 
3915 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3916 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3917 					"state, restarting it...\n");
3918 
3919 		instance->instancet->disable_intr(instance);
3920 		atomic_set(&instance->fw_outstanding, 0);
3921 
3922 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3923 		instance->instancet->adp_reset(instance, instance->reg_set);
3924 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3925 
3926 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3927 					"initiating next stage...\n");
3928 
3929 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3930 					"state 2 starting...\n");
3931 
3932 		/* waiting for about 20 second before start the second init */
3933 		for (wait = 0; wait < 30; wait++) {
3934 			msleep(1000);
3935 		}
3936 
3937 		if (megasas_transition_to_ready(instance, 1)) {
3938 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3939 
3940 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3941 			megaraid_sas_kill_hba(instance);
3942 			return ;
3943 		}
3944 
3945 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3946 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3947 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3948 			) {
3949 			*instance->consumer = *instance->producer;
3950 		} else {
3951 			*instance->consumer = 0;
3952 			*instance->producer = 0;
3953 		}
3954 
3955 		megasas_issue_init_mfi(instance);
3956 
3957 		spin_lock_irqsave(&instance->hba_lock, flags);
3958 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3959 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3960 		instance->instancet->enable_intr(instance);
3961 
3962 		megasas_issue_pending_cmds_again(instance);
3963 		instance->issuepend_done = 1;
3964 	}
3965 }
3966 
3967 /**
3968  * megasas_deplete_reply_queue -	Processes all completed commands
3969  * @instance:				Adapter soft state
3970  * @alt_status:				Alternate status to be returned to
3971  *					SCSI mid-layer instead of the status
3972  *					returned by the FW
3973  * Note: this must be called with hba lock held
3974  */
3975 static int
megasas_deplete_reply_queue(struct megasas_instance * instance,u8 alt_status)3976 megasas_deplete_reply_queue(struct megasas_instance *instance,
3977 					u8 alt_status)
3978 {
3979 	u32 mfiStatus;
3980 	u32 fw_state;
3981 
3982 	if ((mfiStatus = instance->instancet->check_reset(instance,
3983 					instance->reg_set)) == 1) {
3984 		return IRQ_HANDLED;
3985 	}
3986 
3987 	mfiStatus = instance->instancet->clear_intr(instance);
3988 	if (mfiStatus == 0) {
3989 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3990 		if (!instance->msix_vectors)
3991 			return IRQ_NONE;
3992 	}
3993 
3994 	instance->mfiStatus = mfiStatus;
3995 
3996 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3997 		fw_state = instance->instancet->read_fw_status_reg(
3998 				instance) & MFI_STATE_MASK;
3999 
4000 		if (fw_state != MFI_STATE_FAULT) {
4001 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
4002 						fw_state);
4003 		}
4004 
4005 		if ((fw_state == MFI_STATE_FAULT) &&
4006 				(instance->disableOnlineCtrlReset == 0)) {
4007 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
4008 
4009 			if ((instance->pdev->device ==
4010 					PCI_DEVICE_ID_LSI_SAS1064R) ||
4011 				(instance->pdev->device ==
4012 					PCI_DEVICE_ID_DELL_PERC5) ||
4013 				(instance->pdev->device ==
4014 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
4015 
4016 				*instance->consumer =
4017 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
4018 			}
4019 
4020 
4021 			instance->instancet->disable_intr(instance);
4022 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4023 			instance->issuepend_done = 0;
4024 
4025 			atomic_set(&instance->fw_outstanding, 0);
4026 			megasas_internal_reset_defer_cmds(instance);
4027 
4028 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
4029 					fw_state, atomic_read(&instance->adprecovery));
4030 
4031 			schedule_work(&instance->work_init);
4032 			return IRQ_HANDLED;
4033 
4034 		} else {
4035 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
4036 				fw_state, instance->disableOnlineCtrlReset);
4037 		}
4038 	}
4039 
4040 	tasklet_schedule(&instance->isr_tasklet);
4041 	return IRQ_HANDLED;
4042 }
4043 
4044 /**
4045  * megasas_isr - isr entry point
4046  * @irq:	IRQ number
4047  * @devp:	IRQ context address
4048  */
megasas_isr(int irq,void * devp)4049 static irqreturn_t megasas_isr(int irq, void *devp)
4050 {
4051 	struct megasas_irq_context *irq_context = devp;
4052 	struct megasas_instance *instance = irq_context->instance;
4053 	unsigned long flags;
4054 	irqreturn_t rc;
4055 
4056 	if (atomic_read(&instance->fw_reset_no_pci_access))
4057 		return IRQ_HANDLED;
4058 
4059 	spin_lock_irqsave(&instance->hba_lock, flags);
4060 	rc = megasas_deplete_reply_queue(instance, DID_OK);
4061 	spin_unlock_irqrestore(&instance->hba_lock, flags);
4062 
4063 	return rc;
4064 }
4065 
4066 /**
4067  * megasas_transition_to_ready -	Move the FW to READY state
4068  * @instance:				Adapter soft state
4069  * @ocr:				Adapter reset state
4070  *
4071  * During the initialization, FW passes can potentially be in any one of
4072  * several possible states. If the FW in operational, waiting-for-handshake
4073  * states, driver must take steps to bring it to ready state. Otherwise, it
4074  * has to wait for the ready state.
4075  */
4076 int
megasas_transition_to_ready(struct megasas_instance * instance,int ocr)4077 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
4078 {
4079 	int i;
4080 	u8 max_wait;
4081 	u32 fw_state;
4082 	u32 abs_state, curr_abs_state;
4083 
4084 	abs_state = instance->instancet->read_fw_status_reg(instance);
4085 	fw_state = abs_state & MFI_STATE_MASK;
4086 
4087 	if (fw_state != MFI_STATE_READY)
4088 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4089 		       " state\n");
4090 
4091 	while (fw_state != MFI_STATE_READY) {
4092 
4093 		switch (fw_state) {
4094 
4095 		case MFI_STATE_FAULT:
4096 			dev_printk(KERN_ERR, &instance->pdev->dev,
4097 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4098 				   abs_state & MFI_STATE_FAULT_CODE,
4099 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4100 			if (ocr) {
4101 				max_wait = MEGASAS_RESET_WAIT_TIME;
4102 				break;
4103 			} else {
4104 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4105 				megasas_dump_reg_set(instance->reg_set);
4106 				return -ENODEV;
4107 			}
4108 
4109 		case MFI_STATE_WAIT_HANDSHAKE:
4110 			/*
4111 			 * Set the CLR bit in inbound doorbell
4112 			 */
4113 			if ((instance->pdev->device ==
4114 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4115 				(instance->pdev->device ==
4116 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4117 				(instance->adapter_type != MFI_SERIES))
4118 				writel(
4119 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4120 				  &instance->reg_set->doorbell);
4121 			else
4122 				writel(
4123 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4124 					&instance->reg_set->inbound_doorbell);
4125 
4126 			max_wait = MEGASAS_RESET_WAIT_TIME;
4127 			break;
4128 
4129 		case MFI_STATE_BOOT_MESSAGE_PENDING:
4130 			if ((instance->pdev->device ==
4131 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4132 				(instance->pdev->device ==
4133 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4134 				(instance->adapter_type != MFI_SERIES))
4135 				writel(MFI_INIT_HOTPLUG,
4136 				       &instance->reg_set->doorbell);
4137 			else
4138 				writel(MFI_INIT_HOTPLUG,
4139 					&instance->reg_set->inbound_doorbell);
4140 
4141 			max_wait = MEGASAS_RESET_WAIT_TIME;
4142 			break;
4143 
4144 		case MFI_STATE_OPERATIONAL:
4145 			/*
4146 			 * Bring it to READY state; assuming max wait 10 secs
4147 			 */
4148 			instance->instancet->disable_intr(instance);
4149 			if ((instance->pdev->device ==
4150 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4151 				(instance->pdev->device ==
4152 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4153 				(instance->adapter_type != MFI_SERIES)) {
4154 				writel(MFI_RESET_FLAGS,
4155 					&instance->reg_set->doorbell);
4156 
4157 				if (instance->adapter_type != MFI_SERIES) {
4158 					for (i = 0; i < (10 * 1000); i += 20) {
4159 						if (megasas_readl(
4160 							    instance,
4161 							    &instance->
4162 							    reg_set->
4163 							    doorbell) & 1)
4164 							msleep(20);
4165 						else
4166 							break;
4167 					}
4168 				}
4169 			} else
4170 				writel(MFI_RESET_FLAGS,
4171 					&instance->reg_set->inbound_doorbell);
4172 
4173 			max_wait = MEGASAS_RESET_WAIT_TIME;
4174 			break;
4175 
4176 		case MFI_STATE_UNDEFINED:
4177 			/*
4178 			 * This state should not last for more than 2 seconds
4179 			 */
4180 			max_wait = MEGASAS_RESET_WAIT_TIME;
4181 			break;
4182 
4183 		case MFI_STATE_BB_INIT:
4184 			max_wait = MEGASAS_RESET_WAIT_TIME;
4185 			break;
4186 
4187 		case MFI_STATE_FW_INIT:
4188 			max_wait = MEGASAS_RESET_WAIT_TIME;
4189 			break;
4190 
4191 		case MFI_STATE_FW_INIT_2:
4192 			max_wait = MEGASAS_RESET_WAIT_TIME;
4193 			break;
4194 
4195 		case MFI_STATE_DEVICE_SCAN:
4196 			max_wait = MEGASAS_RESET_WAIT_TIME;
4197 			break;
4198 
4199 		case MFI_STATE_FLUSH_CACHE:
4200 			max_wait = MEGASAS_RESET_WAIT_TIME;
4201 			break;
4202 
4203 		default:
4204 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4205 			       fw_state);
4206 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4207 			megasas_dump_reg_set(instance->reg_set);
4208 			return -ENODEV;
4209 		}
4210 
4211 		/*
4212 		 * The cur_state should not last for more than max_wait secs
4213 		 */
4214 		for (i = 0; i < max_wait * 50; i++) {
4215 			curr_abs_state = instance->instancet->
4216 				read_fw_status_reg(instance);
4217 
4218 			if (abs_state == curr_abs_state) {
4219 				msleep(20);
4220 			} else
4221 				break;
4222 		}
4223 
4224 		/*
4225 		 * Return error if fw_state hasn't changed after max_wait
4226 		 */
4227 		if (curr_abs_state == abs_state) {
4228 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4229 			       "in %d secs\n", fw_state, max_wait);
4230 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4231 			megasas_dump_reg_set(instance->reg_set);
4232 			return -ENODEV;
4233 		}
4234 
4235 		abs_state = curr_abs_state;
4236 		fw_state = curr_abs_state & MFI_STATE_MASK;
4237 	}
4238 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4239 
4240 	return 0;
4241 }
4242 
4243 /**
4244  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4245  * @instance:				Adapter soft state
4246  */
megasas_teardown_frame_pool(struct megasas_instance * instance)4247 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4248 {
4249 	int i;
4250 	u16 max_cmd = instance->max_mfi_cmds;
4251 	struct megasas_cmd *cmd;
4252 
4253 	if (!instance->frame_dma_pool)
4254 		return;
4255 
4256 	/*
4257 	 * Return all frames to pool
4258 	 */
4259 	for (i = 0; i < max_cmd; i++) {
4260 
4261 		cmd = instance->cmd_list[i];
4262 
4263 		if (cmd->frame)
4264 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4265 				      cmd->frame_phys_addr);
4266 
4267 		if (cmd->sense)
4268 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4269 				      cmd->sense_phys_addr);
4270 	}
4271 
4272 	/*
4273 	 * Now destroy the pool itself
4274 	 */
4275 	dma_pool_destroy(instance->frame_dma_pool);
4276 	dma_pool_destroy(instance->sense_dma_pool);
4277 
4278 	instance->frame_dma_pool = NULL;
4279 	instance->sense_dma_pool = NULL;
4280 }
4281 
4282 /**
4283  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4284  * @instance:			Adapter soft state
4285  *
4286  * Each command packet has an embedded DMA memory buffer that is used for
4287  * filling MFI frame and the SG list that immediately follows the frame. This
4288  * function creates those DMA memory buffers for each command packet by using
4289  * PCI pool facility.
4290  */
megasas_create_frame_pool(struct megasas_instance * instance)4291 static int megasas_create_frame_pool(struct megasas_instance *instance)
4292 {
4293 	int i;
4294 	u16 max_cmd;
4295 	u32 frame_count;
4296 	struct megasas_cmd *cmd;
4297 
4298 	max_cmd = instance->max_mfi_cmds;
4299 
4300 	/*
4301 	 * For MFI controllers.
4302 	 * max_num_sge = 60
4303 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4304 	 * Total 960 byte (15 MFI frame of 64 byte)
4305 	 *
4306 	 * Fusion adapter require only 3 extra frame.
4307 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4308 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4309 	 * Total 192 byte (3 MFI frame of 64 byte)
4310 	 */
4311 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4312 			(15 + 1) : (3 + 1);
4313 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4314 	/*
4315 	 * Use DMA pool facility provided by PCI layer
4316 	 */
4317 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4318 					&instance->pdev->dev,
4319 					instance->mfi_frame_size, 256, 0);
4320 
4321 	if (!instance->frame_dma_pool) {
4322 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4323 		return -ENOMEM;
4324 	}
4325 
4326 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4327 						   &instance->pdev->dev, 128,
4328 						   4, 0);
4329 
4330 	if (!instance->sense_dma_pool) {
4331 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4332 
4333 		dma_pool_destroy(instance->frame_dma_pool);
4334 		instance->frame_dma_pool = NULL;
4335 
4336 		return -ENOMEM;
4337 	}
4338 
4339 	/*
4340 	 * Allocate and attach a frame to each of the commands in cmd_list.
4341 	 * By making cmd->index as the context instead of the &cmd, we can
4342 	 * always use 32bit context regardless of the architecture
4343 	 */
4344 	for (i = 0; i < max_cmd; i++) {
4345 
4346 		cmd = instance->cmd_list[i];
4347 
4348 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4349 					    GFP_KERNEL, &cmd->frame_phys_addr);
4350 
4351 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4352 					    GFP_KERNEL, &cmd->sense_phys_addr);
4353 
4354 		/*
4355 		 * megasas_teardown_frame_pool() takes care of freeing
4356 		 * whatever has been allocated
4357 		 */
4358 		if (!cmd->frame || !cmd->sense) {
4359 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4360 			megasas_teardown_frame_pool(instance);
4361 			return -ENOMEM;
4362 		}
4363 
4364 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4365 		cmd->frame->io.pad_0 = 0;
4366 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4367 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4368 	}
4369 
4370 	return 0;
4371 }
4372 
4373 /**
4374  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4375  * @instance:		Adapter soft state
4376  */
megasas_free_cmds(struct megasas_instance * instance)4377 void megasas_free_cmds(struct megasas_instance *instance)
4378 {
4379 	int i;
4380 
4381 	/* First free the MFI frame pool */
4382 	megasas_teardown_frame_pool(instance);
4383 
4384 	/* Free all the commands in the cmd_list */
4385 	for (i = 0; i < instance->max_mfi_cmds; i++)
4386 
4387 		kfree(instance->cmd_list[i]);
4388 
4389 	/* Free the cmd_list buffer itself */
4390 	kfree(instance->cmd_list);
4391 	instance->cmd_list = NULL;
4392 
4393 	INIT_LIST_HEAD(&instance->cmd_pool);
4394 }
4395 
4396 /**
4397  * megasas_alloc_cmds -	Allocates the command packets
4398  * @instance:		Adapter soft state
4399  *
4400  * Each command that is issued to the FW, whether IO commands from the OS or
4401  * internal commands like IOCTLs, are wrapped in local data structure called
4402  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4403  * the FW.
4404  *
4405  * Each frame has a 32-bit field called context (tag). This context is used
4406  * to get back the megasas_cmd from the frame when a frame gets completed in
4407  * the ISR. Typically the address of the megasas_cmd itself would be used as
4408  * the context. But we wanted to keep the differences between 32 and 64 bit
4409  * systems to the mininum. We always use 32 bit integers for the context. In
4410  * this driver, the 32 bit values are the indices into an array cmd_list.
4411  * This array is used only to look up the megasas_cmd given the context. The
4412  * free commands themselves are maintained in a linked list called cmd_pool.
4413  */
megasas_alloc_cmds(struct megasas_instance * instance)4414 int megasas_alloc_cmds(struct megasas_instance *instance)
4415 {
4416 	int i;
4417 	int j;
4418 	u16 max_cmd;
4419 	struct megasas_cmd *cmd;
4420 
4421 	max_cmd = instance->max_mfi_cmds;
4422 
4423 	/*
4424 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4425 	 * Allocate the dynamic array first and then allocate individual
4426 	 * commands.
4427 	 */
4428 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4429 
4430 	if (!instance->cmd_list) {
4431 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4432 		return -ENOMEM;
4433 	}
4434 
4435 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4436 
4437 	for (i = 0; i < max_cmd; i++) {
4438 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4439 						GFP_KERNEL);
4440 
4441 		if (!instance->cmd_list[i]) {
4442 
4443 			for (j = 0; j < i; j++)
4444 				kfree(instance->cmd_list[j]);
4445 
4446 			kfree(instance->cmd_list);
4447 			instance->cmd_list = NULL;
4448 
4449 			return -ENOMEM;
4450 		}
4451 	}
4452 
4453 	for (i = 0; i < max_cmd; i++) {
4454 		cmd = instance->cmd_list[i];
4455 		memset(cmd, 0, sizeof(struct megasas_cmd));
4456 		cmd->index = i;
4457 		cmd->scmd = NULL;
4458 		cmd->instance = instance;
4459 
4460 		list_add_tail(&cmd->list, &instance->cmd_pool);
4461 	}
4462 
4463 	/*
4464 	 * Create a frame pool and assign one frame to each cmd
4465 	 */
4466 	if (megasas_create_frame_pool(instance)) {
4467 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4468 		megasas_free_cmds(instance);
4469 		return -ENOMEM;
4470 	}
4471 
4472 	return 0;
4473 }
4474 
4475 /*
4476  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4477  * @instance:				Adapter soft state
4478  *
4479  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4480  * or FW is not under OCR.
4481  */
4482 inline int
dcmd_timeout_ocr_possible(struct megasas_instance * instance)4483 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4484 
4485 	if (instance->adapter_type == MFI_SERIES)
4486 		return KILL_ADAPTER;
4487 	else if (instance->unload ||
4488 			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4489 				 &instance->reset_flags))
4490 		return IGNORE_TIMEOUT;
4491 	else
4492 		return INITIATE_OCR;
4493 }
4494 
4495 static void
megasas_get_pd_info(struct megasas_instance * instance,struct scsi_device * sdev)4496 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4497 {
4498 	int ret;
4499 	struct megasas_cmd *cmd;
4500 	struct megasas_dcmd_frame *dcmd;
4501 
4502 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4503 	u16 device_id = 0;
4504 
4505 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4506 	cmd = megasas_get_cmd(instance);
4507 
4508 	if (!cmd) {
4509 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4510 		return;
4511 	}
4512 
4513 	dcmd = &cmd->frame->dcmd;
4514 
4515 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4516 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4517 
4518 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4519 	dcmd->cmd = MFI_CMD_DCMD;
4520 	dcmd->cmd_status = 0xFF;
4521 	dcmd->sge_count = 1;
4522 	dcmd->flags = MFI_FRAME_DIR_READ;
4523 	dcmd->timeout = 0;
4524 	dcmd->pad_0 = 0;
4525 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4526 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4527 
4528 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4529 				 sizeof(struct MR_PD_INFO));
4530 
4531 	if ((instance->adapter_type != MFI_SERIES) &&
4532 	    !instance->mask_interrupts)
4533 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4534 	else
4535 		ret = megasas_issue_polled(instance, cmd);
4536 
4537 	switch (ret) {
4538 	case DCMD_SUCCESS:
4539 		mr_device_priv_data = sdev->hostdata;
4540 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4541 		mr_device_priv_data->interface_type =
4542 				instance->pd_info->state.ddf.pdType.intf;
4543 		break;
4544 
4545 	case DCMD_TIMEOUT:
4546 
4547 		switch (dcmd_timeout_ocr_possible(instance)) {
4548 		case INITIATE_OCR:
4549 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4550 			mutex_unlock(&instance->reset_mutex);
4551 			megasas_reset_fusion(instance->host,
4552 				MFI_IO_TIMEOUT_OCR);
4553 			mutex_lock(&instance->reset_mutex);
4554 			break;
4555 		case KILL_ADAPTER:
4556 			megaraid_sas_kill_hba(instance);
4557 			break;
4558 		case IGNORE_TIMEOUT:
4559 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4560 				__func__, __LINE__);
4561 			break;
4562 		}
4563 
4564 		break;
4565 	}
4566 
4567 	if (ret != DCMD_TIMEOUT)
4568 		megasas_return_cmd(instance, cmd);
4569 
4570 	return;
4571 }
4572 /*
4573  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4574  * @instance:				Adapter soft state
4575  * @pd_list:				pd_list structure
4576  *
4577  * Issues an internal command (DCMD) to get the FW's controller PD
4578  * list structure.  This information is mainly used to find out SYSTEM
4579  * supported by the FW.
4580  */
4581 static int
megasas_get_pd_list(struct megasas_instance * instance)4582 megasas_get_pd_list(struct megasas_instance *instance)
4583 {
4584 	int ret = 0, pd_index = 0;
4585 	struct megasas_cmd *cmd;
4586 	struct megasas_dcmd_frame *dcmd;
4587 	struct MR_PD_LIST *ci;
4588 	struct MR_PD_ADDRESS *pd_addr;
4589 
4590 	if (instance->pd_list_not_supported) {
4591 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4592 		"not supported by firmware\n");
4593 		return ret;
4594 	}
4595 
4596 	ci = instance->pd_list_buf;
4597 
4598 	cmd = megasas_get_cmd(instance);
4599 
4600 	if (!cmd) {
4601 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4602 		return -ENOMEM;
4603 	}
4604 
4605 	dcmd = &cmd->frame->dcmd;
4606 
4607 	memset(ci, 0, sizeof(*ci));
4608 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4609 
4610 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4611 	dcmd->mbox.b[1] = 0;
4612 	dcmd->cmd = MFI_CMD_DCMD;
4613 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4614 	dcmd->sge_count = 1;
4615 	dcmd->flags = MFI_FRAME_DIR_READ;
4616 	dcmd->timeout = 0;
4617 	dcmd->pad_0 = 0;
4618 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4619 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4620 
4621 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4622 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4623 
4624 	if ((instance->adapter_type != MFI_SERIES) &&
4625 	    !instance->mask_interrupts)
4626 		ret = megasas_issue_blocked_cmd(instance, cmd,
4627 			MFI_IO_TIMEOUT_SECS);
4628 	else
4629 		ret = megasas_issue_polled(instance, cmd);
4630 
4631 	switch (ret) {
4632 	case DCMD_FAILED:
4633 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4634 			"failed/not supported by firmware\n");
4635 
4636 		if (instance->adapter_type != MFI_SERIES)
4637 			megaraid_sas_kill_hba(instance);
4638 		else
4639 			instance->pd_list_not_supported = 1;
4640 		break;
4641 	case DCMD_TIMEOUT:
4642 
4643 		switch (dcmd_timeout_ocr_possible(instance)) {
4644 		case INITIATE_OCR:
4645 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4646 			/*
4647 			 * DCMD failed from AEN path.
4648 			 * AEN path already hold reset_mutex to avoid PCI access
4649 			 * while OCR is in progress.
4650 			 */
4651 			mutex_unlock(&instance->reset_mutex);
4652 			megasas_reset_fusion(instance->host,
4653 						MFI_IO_TIMEOUT_OCR);
4654 			mutex_lock(&instance->reset_mutex);
4655 			break;
4656 		case KILL_ADAPTER:
4657 			megaraid_sas_kill_hba(instance);
4658 			break;
4659 		case IGNORE_TIMEOUT:
4660 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4661 				__func__, __LINE__);
4662 			break;
4663 		}
4664 
4665 		break;
4666 
4667 	case DCMD_SUCCESS:
4668 		pd_addr = ci->addr;
4669 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4670 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4671 				 __func__, le32_to_cpu(ci->count));
4672 
4673 		if ((le32_to_cpu(ci->count) >
4674 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4675 			break;
4676 
4677 		memset(instance->local_pd_list, 0,
4678 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4679 
4680 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4681 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4682 					le16_to_cpu(pd_addr->deviceId);
4683 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4684 					pd_addr->scsiDevType;
4685 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4686 					MR_PD_STATE_SYSTEM;
4687 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4688 				dev_info(&instance->pdev->dev,
4689 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4690 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4691 					 pd_addr->scsiDevType);
4692 			pd_addr++;
4693 		}
4694 
4695 		memcpy(instance->pd_list, instance->local_pd_list,
4696 			sizeof(instance->pd_list));
4697 		break;
4698 
4699 	}
4700 
4701 	if (ret != DCMD_TIMEOUT)
4702 		megasas_return_cmd(instance, cmd);
4703 
4704 	return ret;
4705 }
4706 
4707 /*
4708  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4709  * @instance:				Adapter soft state
4710  * @ld_list:				ld_list structure
4711  *
4712  * Issues an internal command (DCMD) to get the FW's controller PD
4713  * list structure.  This information is mainly used to find out SYSTEM
4714  * supported by the FW.
4715  */
4716 static int
megasas_get_ld_list(struct megasas_instance * instance)4717 megasas_get_ld_list(struct megasas_instance *instance)
4718 {
4719 	int ret = 0, ld_index = 0, ids = 0;
4720 	struct megasas_cmd *cmd;
4721 	struct megasas_dcmd_frame *dcmd;
4722 	struct MR_LD_LIST *ci;
4723 	dma_addr_t ci_h = 0;
4724 	u32 ld_count;
4725 
4726 	ci = instance->ld_list_buf;
4727 	ci_h = instance->ld_list_buf_h;
4728 
4729 	cmd = megasas_get_cmd(instance);
4730 
4731 	if (!cmd) {
4732 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4733 		return -ENOMEM;
4734 	}
4735 
4736 	dcmd = &cmd->frame->dcmd;
4737 
4738 	memset(ci, 0, sizeof(*ci));
4739 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4740 
4741 	if (instance->supportmax256vd)
4742 		dcmd->mbox.b[0] = 1;
4743 	dcmd->cmd = MFI_CMD_DCMD;
4744 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4745 	dcmd->sge_count = 1;
4746 	dcmd->flags = MFI_FRAME_DIR_READ;
4747 	dcmd->timeout = 0;
4748 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4749 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4750 	dcmd->pad_0  = 0;
4751 
4752 	megasas_set_dma_settings(instance, dcmd, ci_h,
4753 				 sizeof(struct MR_LD_LIST));
4754 
4755 	if ((instance->adapter_type != MFI_SERIES) &&
4756 	    !instance->mask_interrupts)
4757 		ret = megasas_issue_blocked_cmd(instance, cmd,
4758 			MFI_IO_TIMEOUT_SECS);
4759 	else
4760 		ret = megasas_issue_polled(instance, cmd);
4761 
4762 	ld_count = le32_to_cpu(ci->ldCount);
4763 
4764 	switch (ret) {
4765 	case DCMD_FAILED:
4766 		megaraid_sas_kill_hba(instance);
4767 		break;
4768 	case DCMD_TIMEOUT:
4769 
4770 		switch (dcmd_timeout_ocr_possible(instance)) {
4771 		case INITIATE_OCR:
4772 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4773 			/*
4774 			 * DCMD failed from AEN path.
4775 			 * AEN path already hold reset_mutex to avoid PCI access
4776 			 * while OCR is in progress.
4777 			 */
4778 			mutex_unlock(&instance->reset_mutex);
4779 			megasas_reset_fusion(instance->host,
4780 						MFI_IO_TIMEOUT_OCR);
4781 			mutex_lock(&instance->reset_mutex);
4782 			break;
4783 		case KILL_ADAPTER:
4784 			megaraid_sas_kill_hba(instance);
4785 			break;
4786 		case IGNORE_TIMEOUT:
4787 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4788 				__func__, __LINE__);
4789 			break;
4790 		}
4791 
4792 		break;
4793 
4794 	case DCMD_SUCCESS:
4795 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4796 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4797 				 __func__, ld_count);
4798 
4799 		if (ld_count > instance->fw_supported_vd_count)
4800 			break;
4801 
4802 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4803 
4804 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4805 			if (ci->ldList[ld_index].state != 0) {
4806 				ids = ci->ldList[ld_index].ref.targetId;
4807 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4808 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4809 					dev_info(&instance->pdev->dev,
4810 						 "LD%d: targetID: 0x%03x\n",
4811 						 ld_index, ids);
4812 			}
4813 		}
4814 
4815 		break;
4816 	}
4817 
4818 	if (ret != DCMD_TIMEOUT)
4819 		megasas_return_cmd(instance, cmd);
4820 
4821 	return ret;
4822 }
4823 
4824 /**
4825  * megasas_ld_list_query -	Returns FW's ld_list structure
4826  * @instance:				Adapter soft state
4827  * @query_type:				ld_list structure type
4828  *
4829  * Issues an internal command (DCMD) to get the FW's controller PD
4830  * list structure.  This information is mainly used to find out SYSTEM
4831  * supported by the FW.
4832  */
4833 static int
megasas_ld_list_query(struct megasas_instance * instance,u8 query_type)4834 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4835 {
4836 	int ret = 0, ld_index = 0, ids = 0;
4837 	struct megasas_cmd *cmd;
4838 	struct megasas_dcmd_frame *dcmd;
4839 	struct MR_LD_TARGETID_LIST *ci;
4840 	dma_addr_t ci_h = 0;
4841 	u32 tgtid_count;
4842 
4843 	ci = instance->ld_targetid_list_buf;
4844 	ci_h = instance->ld_targetid_list_buf_h;
4845 
4846 	cmd = megasas_get_cmd(instance);
4847 
4848 	if (!cmd) {
4849 		dev_warn(&instance->pdev->dev,
4850 		         "megasas_ld_list_query: Failed to get cmd\n");
4851 		return -ENOMEM;
4852 	}
4853 
4854 	dcmd = &cmd->frame->dcmd;
4855 
4856 	memset(ci, 0, sizeof(*ci));
4857 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4858 
4859 	dcmd->mbox.b[0] = query_type;
4860 	if (instance->supportmax256vd)
4861 		dcmd->mbox.b[2] = 1;
4862 
4863 	dcmd->cmd = MFI_CMD_DCMD;
4864 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4865 	dcmd->sge_count = 1;
4866 	dcmd->flags = MFI_FRAME_DIR_READ;
4867 	dcmd->timeout = 0;
4868 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4869 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4870 	dcmd->pad_0  = 0;
4871 
4872 	megasas_set_dma_settings(instance, dcmd, ci_h,
4873 				 sizeof(struct MR_LD_TARGETID_LIST));
4874 
4875 	if ((instance->adapter_type != MFI_SERIES) &&
4876 	    !instance->mask_interrupts)
4877 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4878 	else
4879 		ret = megasas_issue_polled(instance, cmd);
4880 
4881 	switch (ret) {
4882 	case DCMD_FAILED:
4883 		dev_info(&instance->pdev->dev,
4884 			"DCMD not supported by firmware - %s %d\n",
4885 				__func__, __LINE__);
4886 		ret = megasas_get_ld_list(instance);
4887 		break;
4888 	case DCMD_TIMEOUT:
4889 		switch (dcmd_timeout_ocr_possible(instance)) {
4890 		case INITIATE_OCR:
4891 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4892 			/*
4893 			 * DCMD failed from AEN path.
4894 			 * AEN path already hold reset_mutex to avoid PCI access
4895 			 * while OCR is in progress.
4896 			 */
4897 			mutex_unlock(&instance->reset_mutex);
4898 			megasas_reset_fusion(instance->host,
4899 						MFI_IO_TIMEOUT_OCR);
4900 			mutex_lock(&instance->reset_mutex);
4901 			break;
4902 		case KILL_ADAPTER:
4903 			megaraid_sas_kill_hba(instance);
4904 			break;
4905 		case IGNORE_TIMEOUT:
4906 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4907 				__func__, __LINE__);
4908 			break;
4909 		}
4910 
4911 		break;
4912 	case DCMD_SUCCESS:
4913 		tgtid_count = le32_to_cpu(ci->count);
4914 
4915 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4916 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4917 				 __func__, tgtid_count);
4918 
4919 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4920 			break;
4921 
4922 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4923 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4924 			ids = ci->targetId[ld_index];
4925 			instance->ld_ids[ids] = ci->targetId[ld_index];
4926 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4927 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4928 					 ld_index, ci->targetId[ld_index]);
4929 		}
4930 
4931 		break;
4932 	}
4933 
4934 	if (ret != DCMD_TIMEOUT)
4935 		megasas_return_cmd(instance, cmd);
4936 
4937 	return ret;
4938 }
4939 
4940 /**
4941  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4942  * dcmd.mbox              - reserved
4943  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4944  * Desc:    This DCMD will return the combined device list
4945  * Status:  MFI_STAT_OK - List returned successfully
4946  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4947  *                                 disabled
4948  * @instance:			Adapter soft state
4949  * @is_probe:			Driver probe check
4950  * Return:			0 if DCMD succeeded
4951  *				 non-zero if failed
4952  */
4953 static int
megasas_host_device_list_query(struct megasas_instance * instance,bool is_probe)4954 megasas_host_device_list_query(struct megasas_instance *instance,
4955 			       bool is_probe)
4956 {
4957 	int ret, i, target_id;
4958 	struct megasas_cmd *cmd;
4959 	struct megasas_dcmd_frame *dcmd;
4960 	struct MR_HOST_DEVICE_LIST *ci;
4961 	u32 count;
4962 	dma_addr_t ci_h;
4963 
4964 	ci = instance->host_device_list_buf;
4965 	ci_h = instance->host_device_list_buf_h;
4966 
4967 	cmd = megasas_get_cmd(instance);
4968 
4969 	if (!cmd) {
4970 		dev_warn(&instance->pdev->dev,
4971 			 "%s: failed to get cmd\n",
4972 			 __func__);
4973 		return -ENOMEM;
4974 	}
4975 
4976 	dcmd = &cmd->frame->dcmd;
4977 
4978 	memset(ci, 0, sizeof(*ci));
4979 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4980 
4981 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4982 	dcmd->cmd = MFI_CMD_DCMD;
4983 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4984 	dcmd->sge_count = 1;
4985 	dcmd->flags = MFI_FRAME_DIR_READ;
4986 	dcmd->timeout = 0;
4987 	dcmd->pad_0 = 0;
4988 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4989 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4990 
4991 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4992 
4993 	if (!instance->mask_interrupts) {
4994 		ret = megasas_issue_blocked_cmd(instance, cmd,
4995 						MFI_IO_TIMEOUT_SECS);
4996 	} else {
4997 		ret = megasas_issue_polled(instance, cmd);
4998 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4999 	}
5000 
5001 	switch (ret) {
5002 	case DCMD_SUCCESS:
5003 		/* Fill the internal pd_list and ld_ids array based on
5004 		 * targetIds returned by FW
5005 		 */
5006 		count = le32_to_cpu(ci->count);
5007 
5008 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
5009 			break;
5010 
5011 		if (megasas_dbg_lvl & LD_PD_DEBUG)
5012 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
5013 				 __func__, count);
5014 
5015 		memset(instance->local_pd_list, 0,
5016 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
5017 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
5018 		for (i = 0; i < count; i++) {
5019 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
5020 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
5021 				instance->local_pd_list[target_id].tid = target_id;
5022 				instance->local_pd_list[target_id].driveType =
5023 						ci->host_device_list[i].scsi_type;
5024 				instance->local_pd_list[target_id].driveState =
5025 						MR_PD_STATE_SYSTEM;
5026 				if (megasas_dbg_lvl & LD_PD_DEBUG)
5027 					dev_info(&instance->pdev->dev,
5028 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
5029 						 i, target_id, ci->host_device_list[i].scsi_type);
5030 			} else {
5031 				instance->ld_ids[target_id] = target_id;
5032 				if (megasas_dbg_lvl & LD_PD_DEBUG)
5033 					dev_info(&instance->pdev->dev,
5034 						 "Device %d: LD targetID: 0x%03x\n",
5035 						 i, target_id);
5036 			}
5037 		}
5038 
5039 		memcpy(instance->pd_list, instance->local_pd_list,
5040 		       sizeof(instance->pd_list));
5041 		break;
5042 
5043 	case DCMD_TIMEOUT:
5044 		switch (dcmd_timeout_ocr_possible(instance)) {
5045 		case INITIATE_OCR:
5046 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5047 			mutex_unlock(&instance->reset_mutex);
5048 			megasas_reset_fusion(instance->host,
5049 				MFI_IO_TIMEOUT_OCR);
5050 			mutex_lock(&instance->reset_mutex);
5051 			break;
5052 		case KILL_ADAPTER:
5053 			megaraid_sas_kill_hba(instance);
5054 			break;
5055 		case IGNORE_TIMEOUT:
5056 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5057 				 __func__, __LINE__);
5058 			break;
5059 		}
5060 		break;
5061 	case DCMD_FAILED:
5062 		dev_err(&instance->pdev->dev,
5063 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
5064 			__func__);
5065 		break;
5066 	}
5067 
5068 	if (ret != DCMD_TIMEOUT)
5069 		megasas_return_cmd(instance, cmd);
5070 
5071 	return ret;
5072 }
5073 
5074 /*
5075  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
5076  * instance			 : Controller's instance
5077 */
megasas_update_ext_vd_details(struct megasas_instance * instance)5078 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
5079 {
5080 	struct fusion_context *fusion;
5081 	u32 ventura_map_sz = 0;
5082 
5083 	fusion = instance->ctrl_context;
5084 	/* For MFI based controllers return dummy success */
5085 	if (!fusion)
5086 		return;
5087 
5088 	instance->supportmax256vd =
5089 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5090 	/* Below is additional check to address future FW enhancement */
5091 	if (instance->ctrl_info_buf->max_lds > 64)
5092 		instance->supportmax256vd = 1;
5093 
5094 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5095 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5096 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5097 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5098 	if (instance->supportmax256vd) {
5099 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5100 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5101 	} else {
5102 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5103 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5104 	}
5105 
5106 	dev_info(&instance->pdev->dev,
5107 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5108 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5109 		instance->ctrl_info_buf->max_lds);
5110 
5111 	if (instance->max_raid_mapsize) {
5112 		ventura_map_sz = instance->max_raid_mapsize *
5113 						MR_MIN_MAP_SIZE; /* 64k */
5114 		fusion->current_map_sz = ventura_map_sz;
5115 		fusion->max_map_sz = ventura_map_sz;
5116 	} else {
5117 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
5118 					(sizeof(struct MR_LD_SPAN_MAP) *
5119 					(instance->fw_supported_vd_count - 1));
5120 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5121 
5122 		fusion->max_map_sz =
5123 			max(fusion->old_map_sz, fusion->new_map_sz);
5124 
5125 		if (instance->supportmax256vd)
5126 			fusion->current_map_sz = fusion->new_map_sz;
5127 		else
5128 			fusion->current_map_sz = fusion->old_map_sz;
5129 	}
5130 	/* irrespective of FW raid maps, driver raid map is constant */
5131 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5132 }
5133 
5134 /*
5135  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5136  * dcmd.hdr.length            - number of bytes to read
5137  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5138  * Desc:			 Fill in snapdump properties
5139  * Status:			 MFI_STAT_OK- Command successful
5140  */
megasas_get_snapdump_properties(struct megasas_instance * instance)5141 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5142 {
5143 	int ret = 0;
5144 	struct megasas_cmd *cmd;
5145 	struct megasas_dcmd_frame *dcmd;
5146 	struct MR_SNAPDUMP_PROPERTIES *ci;
5147 	dma_addr_t ci_h = 0;
5148 
5149 	ci = instance->snapdump_prop;
5150 	ci_h = instance->snapdump_prop_h;
5151 
5152 	if (!ci)
5153 		return;
5154 
5155 	cmd = megasas_get_cmd(instance);
5156 
5157 	if (!cmd) {
5158 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5159 		return;
5160 	}
5161 
5162 	dcmd = &cmd->frame->dcmd;
5163 
5164 	memset(ci, 0, sizeof(*ci));
5165 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5166 
5167 	dcmd->cmd = MFI_CMD_DCMD;
5168 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5169 	dcmd->sge_count = 1;
5170 	dcmd->flags = MFI_FRAME_DIR_READ;
5171 	dcmd->timeout = 0;
5172 	dcmd->pad_0 = 0;
5173 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5174 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5175 
5176 	megasas_set_dma_settings(instance, dcmd, ci_h,
5177 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5178 
5179 	if (!instance->mask_interrupts) {
5180 		ret = megasas_issue_blocked_cmd(instance, cmd,
5181 						MFI_IO_TIMEOUT_SECS);
5182 	} else {
5183 		ret = megasas_issue_polled(instance, cmd);
5184 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5185 	}
5186 
5187 	switch (ret) {
5188 	case DCMD_SUCCESS:
5189 		instance->snapdump_wait_time =
5190 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5191 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5192 		break;
5193 
5194 	case DCMD_TIMEOUT:
5195 		switch (dcmd_timeout_ocr_possible(instance)) {
5196 		case INITIATE_OCR:
5197 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5198 			mutex_unlock(&instance->reset_mutex);
5199 			megasas_reset_fusion(instance->host,
5200 				MFI_IO_TIMEOUT_OCR);
5201 			mutex_lock(&instance->reset_mutex);
5202 			break;
5203 		case KILL_ADAPTER:
5204 			megaraid_sas_kill_hba(instance);
5205 			break;
5206 		case IGNORE_TIMEOUT:
5207 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5208 				__func__, __LINE__);
5209 			break;
5210 		}
5211 	}
5212 
5213 	if (ret != DCMD_TIMEOUT)
5214 		megasas_return_cmd(instance, cmd);
5215 }
5216 
5217 /**
5218  * megasas_get_controller_info -	Returns FW's controller structure
5219  * @instance:				Adapter soft state
5220  *
5221  * Issues an internal command (DCMD) to get the FW's controller structure.
5222  * This information is mainly used to find out the maximum IO transfer per
5223  * command supported by the FW.
5224  */
5225 int
megasas_get_ctrl_info(struct megasas_instance * instance)5226 megasas_get_ctrl_info(struct megasas_instance *instance)
5227 {
5228 	int ret = 0;
5229 	struct megasas_cmd *cmd;
5230 	struct megasas_dcmd_frame *dcmd;
5231 	struct megasas_ctrl_info *ci;
5232 	dma_addr_t ci_h = 0;
5233 
5234 	ci = instance->ctrl_info_buf;
5235 	ci_h = instance->ctrl_info_buf_h;
5236 
5237 	cmd = megasas_get_cmd(instance);
5238 
5239 	if (!cmd) {
5240 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5241 		return -ENOMEM;
5242 	}
5243 
5244 	dcmd = &cmd->frame->dcmd;
5245 
5246 	memset(ci, 0, sizeof(*ci));
5247 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5248 
5249 	dcmd->cmd = MFI_CMD_DCMD;
5250 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5251 	dcmd->sge_count = 1;
5252 	dcmd->flags = MFI_FRAME_DIR_READ;
5253 	dcmd->timeout = 0;
5254 	dcmd->pad_0 = 0;
5255 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5256 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5257 	dcmd->mbox.b[0] = 1;
5258 
5259 	megasas_set_dma_settings(instance, dcmd, ci_h,
5260 				 sizeof(struct megasas_ctrl_info));
5261 
5262 	if ((instance->adapter_type != MFI_SERIES) &&
5263 	    !instance->mask_interrupts) {
5264 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5265 	} else {
5266 		ret = megasas_issue_polled(instance, cmd);
5267 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5268 	}
5269 
5270 	switch (ret) {
5271 	case DCMD_SUCCESS:
5272 		/* Save required controller information in
5273 		 * CPU endianness format.
5274 		 */
5275 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5276 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5277 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5278 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5279 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5280 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5281 
5282 		/* Update the latest Ext VD info.
5283 		 * From Init path, store current firmware details.
5284 		 * From OCR path, detect any firmware properties changes.
5285 		 * in case of Firmware upgrade without system reboot.
5286 		 */
5287 		megasas_update_ext_vd_details(instance);
5288 		instance->support_seqnum_jbod_fp =
5289 			ci->adapterOperations3.useSeqNumJbodFP;
5290 		instance->support_morethan256jbod =
5291 			ci->adapter_operations4.support_pd_map_target_id;
5292 		instance->support_nvme_passthru =
5293 			ci->adapter_operations4.support_nvme_passthru;
5294 		instance->support_pci_lane_margining =
5295 			ci->adapter_operations5.support_pci_lane_margining;
5296 		instance->task_abort_tmo = ci->TaskAbortTO;
5297 		instance->max_reset_tmo = ci->MaxResetTO;
5298 
5299 		/*Check whether controller is iMR or MR */
5300 		instance->is_imr = (ci->memory_size ? 0 : 1);
5301 
5302 		instance->snapdump_wait_time =
5303 			(ci->properties.on_off_properties2.enable_snap_dump ?
5304 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5305 
5306 		instance->enable_fw_dev_list =
5307 			ci->properties.on_off_properties2.enable_fw_dev_list;
5308 
5309 		dev_info(&instance->pdev->dev,
5310 			"controller type\t: %s(%dMB)\n",
5311 			instance->is_imr ? "iMR" : "MR",
5312 			le16_to_cpu(ci->memory_size));
5313 
5314 		instance->disableOnlineCtrlReset =
5315 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5316 		instance->secure_jbod_support =
5317 			ci->adapterOperations3.supportSecurityonJBOD;
5318 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5319 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5320 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5321 			instance->secure_jbod_support ? "Yes" : "No");
5322 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5323 			 instance->support_nvme_passthru ? "Yes" : "No");
5324 		dev_info(&instance->pdev->dev,
5325 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5326 			 instance->task_abort_tmo, instance->max_reset_tmo);
5327 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5328 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5329 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5330 			 instance->support_pci_lane_margining ? "Yes" : "No");
5331 
5332 		break;
5333 
5334 	case DCMD_TIMEOUT:
5335 		switch (dcmd_timeout_ocr_possible(instance)) {
5336 		case INITIATE_OCR:
5337 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5338 			mutex_unlock(&instance->reset_mutex);
5339 			megasas_reset_fusion(instance->host,
5340 				MFI_IO_TIMEOUT_OCR);
5341 			mutex_lock(&instance->reset_mutex);
5342 			break;
5343 		case KILL_ADAPTER:
5344 			megaraid_sas_kill_hba(instance);
5345 			break;
5346 		case IGNORE_TIMEOUT:
5347 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5348 				__func__, __LINE__);
5349 			break;
5350 		}
5351 		break;
5352 	case DCMD_FAILED:
5353 		megaraid_sas_kill_hba(instance);
5354 		break;
5355 
5356 	}
5357 
5358 	if (ret != DCMD_TIMEOUT)
5359 		megasas_return_cmd(instance, cmd);
5360 
5361 	return ret;
5362 }
5363 
5364 /*
5365  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5366  *					to firmware
5367  *
5368  * @instance:				Adapter soft state
5369  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5370 					MR_CRASH_BUF_TURN_OFF = 0
5371 					MR_CRASH_BUF_TURN_ON = 1
5372  * @return 0 on success non-zero on failure.
5373  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5374  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5375  * that driver supports crash dump feature. This DCMD will be sent only if
5376  * crash dump feature is supported by the FW.
5377  *
5378  */
megasas_set_crash_dump_params(struct megasas_instance * instance,u8 crash_buf_state)5379 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5380 	u8 crash_buf_state)
5381 {
5382 	int ret = 0;
5383 	struct megasas_cmd *cmd;
5384 	struct megasas_dcmd_frame *dcmd;
5385 
5386 	cmd = megasas_get_cmd(instance);
5387 
5388 	if (!cmd) {
5389 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5390 		return -ENOMEM;
5391 	}
5392 
5393 
5394 	dcmd = &cmd->frame->dcmd;
5395 
5396 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5397 	dcmd->mbox.b[0] = crash_buf_state;
5398 	dcmd->cmd = MFI_CMD_DCMD;
5399 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5400 	dcmd->sge_count = 1;
5401 	dcmd->flags = MFI_FRAME_DIR_NONE;
5402 	dcmd->timeout = 0;
5403 	dcmd->pad_0 = 0;
5404 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5405 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5406 
5407 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5408 				 CRASH_DMA_BUF_SIZE);
5409 
5410 	if ((instance->adapter_type != MFI_SERIES) &&
5411 	    !instance->mask_interrupts)
5412 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5413 	else
5414 		ret = megasas_issue_polled(instance, cmd);
5415 
5416 	if (ret == DCMD_TIMEOUT) {
5417 		switch (dcmd_timeout_ocr_possible(instance)) {
5418 		case INITIATE_OCR:
5419 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5420 			megasas_reset_fusion(instance->host,
5421 					MFI_IO_TIMEOUT_OCR);
5422 			break;
5423 		case KILL_ADAPTER:
5424 			megaraid_sas_kill_hba(instance);
5425 			break;
5426 		case IGNORE_TIMEOUT:
5427 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5428 				__func__, __LINE__);
5429 			break;
5430 		}
5431 	} else
5432 		megasas_return_cmd(instance, cmd);
5433 
5434 	return ret;
5435 }
5436 
5437 /**
5438  * megasas_issue_init_mfi -	Initializes the FW
5439  * @instance:		Adapter soft state
5440  *
5441  * Issues the INIT MFI cmd
5442  */
5443 static int
megasas_issue_init_mfi(struct megasas_instance * instance)5444 megasas_issue_init_mfi(struct megasas_instance *instance)
5445 {
5446 	__le32 context;
5447 	struct megasas_cmd *cmd;
5448 	struct megasas_init_frame *init_frame;
5449 	struct megasas_init_queue_info *initq_info;
5450 	dma_addr_t init_frame_h;
5451 	dma_addr_t initq_info_h;
5452 
5453 	/*
5454 	 * Prepare a init frame. Note the init frame points to queue info
5455 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5456 	 * this frame - since we don't need any SGL - we use SGL's space as
5457 	 * queue info structure
5458 	 *
5459 	 * We will not get a NULL command below. We just created the pool.
5460 	 */
5461 	cmd = megasas_get_cmd(instance);
5462 
5463 	init_frame = (struct megasas_init_frame *)cmd->frame;
5464 	initq_info = (struct megasas_init_queue_info *)
5465 		((unsigned long)init_frame + 64);
5466 
5467 	init_frame_h = cmd->frame_phys_addr;
5468 	initq_info_h = init_frame_h + 64;
5469 
5470 	context = init_frame->context;
5471 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5472 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5473 	init_frame->context = context;
5474 
5475 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5476 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5477 
5478 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5479 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5480 
5481 	init_frame->cmd = MFI_CMD_INIT;
5482 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5483 	init_frame->queue_info_new_phys_addr_lo =
5484 		cpu_to_le32(lower_32_bits(initq_info_h));
5485 	init_frame->queue_info_new_phys_addr_hi =
5486 		cpu_to_le32(upper_32_bits(initq_info_h));
5487 
5488 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5489 
5490 	/*
5491 	 * disable the intr before firing the init frame to FW
5492 	 */
5493 	instance->instancet->disable_intr(instance);
5494 
5495 	/*
5496 	 * Issue the init frame in polled mode
5497 	 */
5498 
5499 	if (megasas_issue_polled(instance, cmd)) {
5500 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5501 		megasas_return_cmd(instance, cmd);
5502 		goto fail_fw_init;
5503 	}
5504 
5505 	megasas_return_cmd(instance, cmd);
5506 
5507 	return 0;
5508 
5509 fail_fw_init:
5510 	return -EINVAL;
5511 }
5512 
5513 static u32
megasas_init_adapter_mfi(struct megasas_instance * instance)5514 megasas_init_adapter_mfi(struct megasas_instance *instance)
5515 {
5516 	u32 context_sz;
5517 	u32 reply_q_sz;
5518 
5519 	/*
5520 	 * Get various operational parameters from status register
5521 	 */
5522 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5523 	/*
5524 	 * Reduce the max supported cmds by 1. This is to ensure that the
5525 	 * reply_q_sz (1 more than the max cmd that driver may send)
5526 	 * does not exceed max cmds that the FW can support
5527 	 */
5528 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5529 	instance->max_mfi_cmds = instance->max_fw_cmds;
5530 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5531 					0x10;
5532 	/*
5533 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5534 	 * are reserved for IOCTL + driver's internal DCMDs.
5535 	 */
5536 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5537 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5538 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5539 			MEGASAS_SKINNY_INT_CMDS);
5540 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5541 	} else {
5542 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5543 			MEGASAS_INT_CMDS);
5544 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5545 	}
5546 
5547 	instance->cur_can_queue = instance->max_scsi_cmds;
5548 	/*
5549 	 * Create a pool of commands
5550 	 */
5551 	if (megasas_alloc_cmds(instance))
5552 		goto fail_alloc_cmds;
5553 
5554 	/*
5555 	 * Allocate memory for reply queue. Length of reply queue should
5556 	 * be _one_ more than the maximum commands handled by the firmware.
5557 	 *
5558 	 * Note: When FW completes commands, it places corresponding contex
5559 	 * values in this circular reply queue. This circular queue is a fairly
5560 	 * typical producer-consumer queue. FW is the producer (of completed
5561 	 * commands) and the driver is the consumer.
5562 	 */
5563 	context_sz = sizeof(u32);
5564 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5565 
5566 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5567 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5568 
5569 	if (!instance->reply_queue) {
5570 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5571 		goto fail_reply_queue;
5572 	}
5573 
5574 	if (megasas_issue_init_mfi(instance))
5575 		goto fail_fw_init;
5576 
5577 	if (megasas_get_ctrl_info(instance)) {
5578 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5579 			"Fail from %s %d\n", instance->unique_id,
5580 			__func__, __LINE__);
5581 		goto fail_fw_init;
5582 	}
5583 
5584 	instance->fw_support_ieee = 0;
5585 	instance->fw_support_ieee =
5586 		(instance->instancet->read_fw_status_reg(instance) &
5587 		0x04000000);
5588 
5589 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5590 			instance->fw_support_ieee);
5591 
5592 	if (instance->fw_support_ieee)
5593 		instance->flag_ieee = 1;
5594 
5595 	return 0;
5596 
5597 fail_fw_init:
5598 
5599 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5600 			    instance->reply_queue, instance->reply_queue_h);
5601 fail_reply_queue:
5602 	megasas_free_cmds(instance);
5603 
5604 fail_alloc_cmds:
5605 	return 1;
5606 }
5607 
5608 static
megasas_setup_irq_poll(struct megasas_instance * instance)5609 void megasas_setup_irq_poll(struct megasas_instance *instance)
5610 {
5611 	struct megasas_irq_context *irq_ctx;
5612 	u32 count, i;
5613 
5614 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5615 
5616 	/* Initialize IRQ poll */
5617 	for (i = 0; i < count; i++) {
5618 		irq_ctx = &instance->irq_context[i];
5619 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5620 		irq_ctx->irq_poll_scheduled = false;
5621 		irq_poll_init(&irq_ctx->irqpoll,
5622 			      instance->threshold_reply_count,
5623 			      megasas_irqpoll);
5624 	}
5625 }
5626 
5627 /*
5628  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5629  * @instance:				Adapter soft state
5630  *
5631  * Do not enable interrupt, only setup ISRs.
5632  *
5633  * Return 0 on success.
5634  */
5635 static int
megasas_setup_irqs_ioapic(struct megasas_instance * instance)5636 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5637 {
5638 	struct pci_dev *pdev;
5639 
5640 	pdev = instance->pdev;
5641 	instance->irq_context[0].instance = instance;
5642 	instance->irq_context[0].MSIxIndex = 0;
5643 	snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5644 		"megasas", instance->host->host_no);
5645 	if (request_irq(pci_irq_vector(pdev, 0),
5646 			instance->instancet->service_isr, IRQF_SHARED,
5647 			instance->irq_context->name, &instance->irq_context[0])) {
5648 		dev_err(&instance->pdev->dev,
5649 				"Failed to register IRQ from %s %d\n",
5650 				__func__, __LINE__);
5651 		return -1;
5652 	}
5653 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5654 	instance->low_latency_index_start = 0;
5655 	return 0;
5656 }
5657 
5658 /**
5659  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5660  * @instance:				Adapter soft state
5661  * @is_probe:				Driver probe check
5662  *
5663  * Do not enable interrupt, only setup ISRs.
5664  *
5665  * Return 0 on success.
5666  */
5667 static int
megasas_setup_irqs_msix(struct megasas_instance * instance,u8 is_probe)5668 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5669 {
5670 	int i, j;
5671 	struct pci_dev *pdev;
5672 
5673 	pdev = instance->pdev;
5674 
5675 	/* Try MSI-x */
5676 	for (i = 0; i < instance->msix_vectors; i++) {
5677 		instance->irq_context[i].instance = instance;
5678 		instance->irq_context[i].MSIxIndex = i;
5679 		snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5680 			"megasas", instance->host->host_no, i);
5681 		if (request_irq(pci_irq_vector(pdev, i),
5682 			instance->instancet->service_isr, 0, instance->irq_context[i].name,
5683 			&instance->irq_context[i])) {
5684 			dev_err(&instance->pdev->dev,
5685 				"Failed to register IRQ for vector %d.\n", i);
5686 			for (j = 0; j < i; j++) {
5687 				if (j < instance->low_latency_index_start)
5688 					irq_set_affinity_hint(
5689 						pci_irq_vector(pdev, j), NULL);
5690 				free_irq(pci_irq_vector(pdev, j),
5691 					 &instance->irq_context[j]);
5692 			}
5693 			/* Retry irq register for IO_APIC*/
5694 			instance->msix_vectors = 0;
5695 			instance->msix_load_balance = false;
5696 			if (is_probe) {
5697 				pci_free_irq_vectors(instance->pdev);
5698 				return megasas_setup_irqs_ioapic(instance);
5699 			} else {
5700 				return -1;
5701 			}
5702 		}
5703 	}
5704 
5705 	return 0;
5706 }
5707 
5708 /*
5709  * megasas_destroy_irqs-		unregister interrupts.
5710  * @instance:				Adapter soft state
5711  * return:				void
5712  */
5713 static void
megasas_destroy_irqs(struct megasas_instance * instance)5714 megasas_destroy_irqs(struct megasas_instance *instance) {
5715 
5716 	int i;
5717 	int count;
5718 	struct megasas_irq_context *irq_ctx;
5719 
5720 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5721 	if (instance->adapter_type != MFI_SERIES) {
5722 		for (i = 0; i < count; i++) {
5723 			irq_ctx = &instance->irq_context[i];
5724 			irq_poll_disable(&irq_ctx->irqpoll);
5725 		}
5726 	}
5727 
5728 	if (instance->msix_vectors)
5729 		for (i = 0; i < instance->msix_vectors; i++) {
5730 			if (i < instance->low_latency_index_start)
5731 				irq_set_affinity_hint(
5732 				    pci_irq_vector(instance->pdev, i), NULL);
5733 			free_irq(pci_irq_vector(instance->pdev, i),
5734 				 &instance->irq_context[i]);
5735 		}
5736 	else
5737 		free_irq(pci_irq_vector(instance->pdev, 0),
5738 			 &instance->irq_context[0]);
5739 }
5740 
5741 /**
5742  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5743  * @instance:				Adapter soft state
5744  *
5745  * Return 0 on success.
5746  */
5747 void
megasas_setup_jbod_map(struct megasas_instance * instance)5748 megasas_setup_jbod_map(struct megasas_instance *instance)
5749 {
5750 	int i;
5751 	struct fusion_context *fusion = instance->ctrl_context;
5752 	u32 pd_seq_map_sz;
5753 
5754 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5755 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5756 
5757 	instance->use_seqnum_jbod_fp =
5758 		instance->support_seqnum_jbod_fp;
5759 	if (reset_devices || !fusion ||
5760 		!instance->support_seqnum_jbod_fp) {
5761 		dev_info(&instance->pdev->dev,
5762 			"JBOD sequence map is disabled %s %d\n",
5763 			__func__, __LINE__);
5764 		instance->use_seqnum_jbod_fp = false;
5765 		return;
5766 	}
5767 
5768 	if (fusion->pd_seq_sync[0])
5769 		goto skip_alloc;
5770 
5771 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5772 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5773 			(&instance->pdev->dev, pd_seq_map_sz,
5774 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5775 		if (!fusion->pd_seq_sync[i]) {
5776 			dev_err(&instance->pdev->dev,
5777 				"Failed to allocate memory from %s %d\n",
5778 				__func__, __LINE__);
5779 			if (i == 1) {
5780 				dma_free_coherent(&instance->pdev->dev,
5781 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5782 					fusion->pd_seq_phys[0]);
5783 				fusion->pd_seq_sync[0] = NULL;
5784 			}
5785 			instance->use_seqnum_jbod_fp = false;
5786 			return;
5787 		}
5788 	}
5789 
5790 skip_alloc:
5791 	if (!megasas_sync_pd_seq_num(instance, false) &&
5792 		!megasas_sync_pd_seq_num(instance, true))
5793 		instance->use_seqnum_jbod_fp = true;
5794 	else
5795 		instance->use_seqnum_jbod_fp = false;
5796 }
5797 
megasas_setup_reply_map(struct megasas_instance * instance)5798 static void megasas_setup_reply_map(struct megasas_instance *instance)
5799 {
5800 	const struct cpumask *mask;
5801 	unsigned int queue, cpu, low_latency_index_start;
5802 
5803 	low_latency_index_start = instance->low_latency_index_start;
5804 
5805 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5806 		mask = pci_irq_get_affinity(instance->pdev, queue);
5807 		if (!mask)
5808 			goto fallback;
5809 
5810 		for_each_cpu(cpu, mask)
5811 			instance->reply_map[cpu] = queue;
5812 	}
5813 	return;
5814 
5815 fallback:
5816 	queue = low_latency_index_start;
5817 	for_each_possible_cpu(cpu) {
5818 		instance->reply_map[cpu] = queue;
5819 		if (queue == (instance->msix_vectors - 1))
5820 			queue = low_latency_index_start;
5821 		else
5822 			queue++;
5823 	}
5824 }
5825 
5826 /**
5827  * megasas_get_device_list -	Get the PD and LD device list from FW.
5828  * @instance:			Adapter soft state
5829  * @return:			Success or failure
5830  *
5831  * Issue DCMDs to Firmware to get the PD and LD list.
5832  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5833  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5834  */
5835 static
megasas_get_device_list(struct megasas_instance * instance)5836 int megasas_get_device_list(struct megasas_instance *instance)
5837 {
5838 	memset(instance->pd_list, 0,
5839 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5840 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5841 
5842 	if (instance->enable_fw_dev_list) {
5843 		if (megasas_host_device_list_query(instance, true))
5844 			return FAILED;
5845 	} else {
5846 		if (megasas_get_pd_list(instance) < 0) {
5847 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5848 			return FAILED;
5849 		}
5850 
5851 		if (megasas_ld_list_query(instance,
5852 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5853 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5854 			return FAILED;
5855 		}
5856 	}
5857 
5858 	return SUCCESS;
5859 }
5860 
5861 /**
5862  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5863  * @instance:					Adapter soft state
5864  * return:					void
5865  */
5866 static inline void
megasas_set_high_iops_queue_affinity_hint(struct megasas_instance * instance)5867 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5868 {
5869 	int i;
5870 	int local_numa_node;
5871 
5872 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5873 		local_numa_node = dev_to_node(&instance->pdev->dev);
5874 
5875 		for (i = 0; i < instance->low_latency_index_start; i++)
5876 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5877 				cpumask_of_node(local_numa_node));
5878 	}
5879 }
5880 
5881 static int
__megasas_alloc_irq_vectors(struct megasas_instance * instance)5882 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5883 {
5884 	int i, irq_flags;
5885 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5886 	struct irq_affinity *descp = &desc;
5887 
5888 	irq_flags = PCI_IRQ_MSIX;
5889 
5890 	if (instance->smp_affinity_enable)
5891 		irq_flags |= PCI_IRQ_AFFINITY;
5892 	else
5893 		descp = NULL;
5894 
5895 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5896 		instance->low_latency_index_start,
5897 		instance->msix_vectors, irq_flags, descp);
5898 
5899 	return i;
5900 }
5901 
5902 /**
5903  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5904  * @instance:			Adapter soft state
5905  * return:			void
5906  */
5907 static void
megasas_alloc_irq_vectors(struct megasas_instance * instance)5908 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5909 {
5910 	int i;
5911 	unsigned int num_msix_req;
5912 
5913 	i = __megasas_alloc_irq_vectors(instance);
5914 
5915 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5916 	    (i != instance->msix_vectors)) {
5917 		if (instance->msix_vectors)
5918 			pci_free_irq_vectors(instance->pdev);
5919 		/* Disable Balanced IOPS mode and try realloc vectors */
5920 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5921 		instance->low_latency_index_start = 1;
5922 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5923 
5924 		instance->msix_vectors = min(num_msix_req,
5925 				instance->msix_vectors);
5926 
5927 		i = __megasas_alloc_irq_vectors(instance);
5928 
5929 	}
5930 
5931 	dev_info(&instance->pdev->dev,
5932 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5933 
5934 	if (i > 0)
5935 		instance->msix_vectors = i;
5936 	else
5937 		instance->msix_vectors = 0;
5938 
5939 	if (instance->smp_affinity_enable)
5940 		megasas_set_high_iops_queue_affinity_hint(instance);
5941 }
5942 
5943 /**
5944  * megasas_init_fw -	Initializes the FW
5945  * @instance:		Adapter soft state
5946  *
5947  * This is the main function for initializing firmware
5948  */
5949 
megasas_init_fw(struct megasas_instance * instance)5950 static int megasas_init_fw(struct megasas_instance *instance)
5951 {
5952 	u32 max_sectors_1;
5953 	u32 max_sectors_2, tmp_sectors, msix_enable;
5954 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5955 	resource_size_t base_addr;
5956 	void *base_addr_phys;
5957 	struct megasas_ctrl_info *ctrl_info = NULL;
5958 	unsigned long bar_list;
5959 	int i, j, loop;
5960 	struct IOV_111 *iovPtr;
5961 	struct fusion_context *fusion;
5962 	bool intr_coalescing;
5963 	unsigned int num_msix_req;
5964 	u16 lnksta, speed;
5965 
5966 	fusion = instance->ctrl_context;
5967 
5968 	/* Find first memory bar */
5969 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5970 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5971 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5972 					 "megasas: LSI")) {
5973 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5974 		return -EBUSY;
5975 	}
5976 
5977 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5978 	instance->reg_set = ioremap(base_addr, 8192);
5979 
5980 	if (!instance->reg_set) {
5981 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5982 		goto fail_ioremap;
5983 	}
5984 
5985 	base_addr_phys = &base_addr;
5986 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5987 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5988 		   instance->bar, base_addr_phys, instance->reg_set);
5989 
5990 	if (instance->adapter_type != MFI_SERIES)
5991 		instance->instancet = &megasas_instance_template_fusion;
5992 	else {
5993 		switch (instance->pdev->device) {
5994 		case PCI_DEVICE_ID_LSI_SAS1078R:
5995 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5996 			instance->instancet = &megasas_instance_template_ppc;
5997 			break;
5998 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5999 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
6000 			instance->instancet = &megasas_instance_template_gen2;
6001 			break;
6002 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
6003 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
6004 			instance->instancet = &megasas_instance_template_skinny;
6005 			break;
6006 		case PCI_DEVICE_ID_LSI_SAS1064R:
6007 		case PCI_DEVICE_ID_DELL_PERC5:
6008 		default:
6009 			instance->instancet = &megasas_instance_template_xscale;
6010 			instance->pd_list_not_supported = 1;
6011 			break;
6012 		}
6013 	}
6014 
6015 	if (megasas_transition_to_ready(instance, 0)) {
6016 		dev_info(&instance->pdev->dev,
6017 			 "Failed to transition controller to ready from %s!\n",
6018 			 __func__);
6019 		if (instance->adapter_type != MFI_SERIES) {
6020 			status_reg = instance->instancet->read_fw_status_reg(
6021 					instance);
6022 			if (status_reg & MFI_RESET_ADAPTER) {
6023 				if (megasas_adp_reset_wait_for_ready
6024 					(instance, true, 0) == FAILED)
6025 					goto fail_ready_state;
6026 			} else {
6027 				goto fail_ready_state;
6028 			}
6029 		} else {
6030 			atomic_set(&instance->fw_reset_no_pci_access, 1);
6031 			instance->instancet->adp_reset
6032 				(instance, instance->reg_set);
6033 			atomic_set(&instance->fw_reset_no_pci_access, 0);
6034 
6035 			/*waiting for about 30 second before retry*/
6036 			ssleep(30);
6037 
6038 			if (megasas_transition_to_ready(instance, 0))
6039 				goto fail_ready_state;
6040 		}
6041 
6042 		dev_info(&instance->pdev->dev,
6043 			 "FW restarted successfully from %s!\n",
6044 			 __func__);
6045 	}
6046 
6047 	megasas_init_ctrl_params(instance);
6048 
6049 	if (megasas_set_dma_mask(instance))
6050 		goto fail_ready_state;
6051 
6052 	if (megasas_alloc_ctrl_mem(instance))
6053 		goto fail_alloc_dma_buf;
6054 
6055 	if (megasas_alloc_ctrl_dma_buffers(instance))
6056 		goto fail_alloc_dma_buf;
6057 
6058 	fusion = instance->ctrl_context;
6059 
6060 	if (instance->adapter_type >= VENTURA_SERIES) {
6061 		scratch_pad_2 =
6062 			megasas_readl(instance,
6063 				      &instance->reg_set->outbound_scratch_pad_2);
6064 		instance->max_raid_mapsize = ((scratch_pad_2 >>
6065 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
6066 			MR_MAX_RAID_MAP_SIZE_MASK);
6067 	}
6068 
6069 	instance->enable_sdev_max_qd = enable_sdev_max_qd;
6070 
6071 	switch (instance->adapter_type) {
6072 	case VENTURA_SERIES:
6073 		fusion->pcie_bw_limitation = true;
6074 		break;
6075 	case AERO_SERIES:
6076 		fusion->r56_div_offload = true;
6077 		break;
6078 	default:
6079 		break;
6080 	}
6081 
6082 	/* Check if MSI-X is supported while in ready state */
6083 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
6084 		       0x4000000) >> 0x1a;
6085 	if (msix_enable && !msix_disable) {
6086 
6087 		scratch_pad_1 = megasas_readl
6088 			(instance, &instance->reg_set->outbound_scratch_pad_1);
6089 		/* Check max MSI-X vectors */
6090 		if (fusion) {
6091 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
6092 				/* Thunderbolt Series*/
6093 				instance->msix_vectors = (scratch_pad_1
6094 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6095 			} else {
6096 				instance->msix_vectors = ((scratch_pad_1
6097 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6098 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6099 
6100 				/*
6101 				 * For Invader series, > 8 MSI-x vectors
6102 				 * supported by FW/HW implies combined
6103 				 * reply queue mode is enabled.
6104 				 * For Ventura series, > 16 MSI-x vectors
6105 				 * supported by FW/HW implies combined
6106 				 * reply queue mode is enabled.
6107 				 */
6108 				switch (instance->adapter_type) {
6109 				case INVADER_SERIES:
6110 					if (instance->msix_vectors > 8)
6111 						instance->msix_combined = true;
6112 					break;
6113 				case AERO_SERIES:
6114 				case VENTURA_SERIES:
6115 					if (instance->msix_vectors > 16)
6116 						instance->msix_combined = true;
6117 					break;
6118 				}
6119 
6120 				if (rdpq_enable)
6121 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6122 								1 : 0;
6123 
6124 				if (instance->adapter_type >= INVADER_SERIES &&
6125 				    !instance->msix_combined) {
6126 					instance->msix_load_balance = true;
6127 					instance->smp_affinity_enable = false;
6128 				}
6129 
6130 				/* Save 1-15 reply post index address to local memory
6131 				 * Index 0 is already saved from reg offset
6132 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6133 				 */
6134 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6135 					instance->reply_post_host_index_addr[loop] =
6136 						(u32 __iomem *)
6137 						((u8 __iomem *)instance->reg_set +
6138 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6139 						+ (loop * 0x10));
6140 				}
6141 			}
6142 
6143 			dev_info(&instance->pdev->dev,
6144 				 "firmware supports msix\t: (%d)",
6145 				 instance->msix_vectors);
6146 			if (msix_vectors)
6147 				instance->msix_vectors = min(msix_vectors,
6148 					instance->msix_vectors);
6149 		} else /* MFI adapters */
6150 			instance->msix_vectors = 1;
6151 
6152 
6153 		/*
6154 		 * For Aero (if some conditions are met), driver will configure a
6155 		 * few additional reply queues with interrupt coalescing enabled.
6156 		 * These queues with interrupt coalescing enabled are called
6157 		 * High IOPS queues and rest of reply queues (based on number of
6158 		 * logical CPUs) are termed as Low latency queues.
6159 		 *
6160 		 * Total Number of reply queues = High IOPS queues + low latency queues
6161 		 *
6162 		 * For rest of fusion adapters, 1 additional reply queue will be
6163 		 * reserved for management commands, rest of reply queues
6164 		 * (based on number of logical CPUs) will be used for IOs and
6165 		 * referenced as IO queues.
6166 		 * Total Number of reply queues = 1 + IO queues
6167 		 *
6168 		 * MFI adapters supports single MSI-x so single reply queue
6169 		 * will be used for IO and management commands.
6170 		 */
6171 
6172 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6173 								true : false;
6174 		if (intr_coalescing &&
6175 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6176 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6177 			instance->perf_mode = MR_BALANCED_PERF_MODE;
6178 		else
6179 			instance->perf_mode = MR_LATENCY_PERF_MODE;
6180 
6181 
6182 		if (instance->adapter_type == AERO_SERIES) {
6183 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6184 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6185 
6186 			/*
6187 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6188 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6189 			 */
6190 			if (speed < 0x4) {
6191 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6192 				fusion->pcie_bw_limitation = true;
6193 			}
6194 
6195 			/*
6196 			 * Performance mode settings provided through module parameter-perf_mode will
6197 			 * take affect only for:
6198 			 * 1. Aero family of adapters.
6199 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6200 			 */
6201 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6202 				(perf_mode <= MR_LATENCY_PERF_MODE))
6203 				instance->perf_mode = perf_mode;
6204 			/*
6205 			 * If intr coalescing is not supported by controller FW, then IOPS
6206 			 * and Balanced modes are not feasible.
6207 			 */
6208 			if (!intr_coalescing)
6209 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6210 
6211 		}
6212 
6213 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6214 			instance->low_latency_index_start =
6215 				MR_HIGH_IOPS_QUEUE_COUNT;
6216 		else
6217 			instance->low_latency_index_start = 1;
6218 
6219 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6220 
6221 		instance->msix_vectors = min(num_msix_req,
6222 				instance->msix_vectors);
6223 
6224 		megasas_alloc_irq_vectors(instance);
6225 		if (!instance->msix_vectors)
6226 			instance->msix_load_balance = false;
6227 	}
6228 	/*
6229 	 * MSI-X host index 0 is common for all adapter.
6230 	 * It is used for all MPT based Adapters.
6231 	 */
6232 	if (instance->msix_combined) {
6233 		instance->reply_post_host_index_addr[0] =
6234 				(u32 *)((u8 *)instance->reg_set +
6235 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6236 	} else {
6237 		instance->reply_post_host_index_addr[0] =
6238 			(u32 *)((u8 *)instance->reg_set +
6239 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6240 	}
6241 
6242 	if (!instance->msix_vectors) {
6243 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6244 		if (i < 0)
6245 			goto fail_init_adapter;
6246 	}
6247 
6248 	megasas_setup_reply_map(instance);
6249 
6250 	dev_info(&instance->pdev->dev,
6251 		"current msix/online cpus\t: (%d/%d)\n",
6252 		instance->msix_vectors, (unsigned int)num_online_cpus());
6253 	dev_info(&instance->pdev->dev,
6254 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6255 
6256 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6257 		(unsigned long)instance);
6258 
6259 	/*
6260 	 * Below are default value for legacy Firmware.
6261 	 * non-fusion based controllers
6262 	 */
6263 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6264 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6265 	/* Get operational params, sge flags, send init cmd to controller */
6266 	if (instance->instancet->init_adapter(instance))
6267 		goto fail_init_adapter;
6268 
6269 	if (instance->adapter_type >= VENTURA_SERIES) {
6270 		scratch_pad_3 =
6271 			megasas_readl(instance,
6272 				      &instance->reg_set->outbound_scratch_pad_3);
6273 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6274 			MR_DEFAULT_NVME_PAGE_SHIFT)
6275 			instance->nvme_page_size =
6276 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6277 
6278 		dev_info(&instance->pdev->dev,
6279 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6280 	}
6281 
6282 	if (instance->msix_vectors ?
6283 		megasas_setup_irqs_msix(instance, 1) :
6284 		megasas_setup_irqs_ioapic(instance))
6285 		goto fail_init_adapter;
6286 
6287 	if (instance->adapter_type != MFI_SERIES)
6288 		megasas_setup_irq_poll(instance);
6289 
6290 	instance->instancet->enable_intr(instance);
6291 
6292 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6293 
6294 	megasas_setup_jbod_map(instance);
6295 
6296 	if (megasas_get_device_list(instance) != SUCCESS) {
6297 		dev_err(&instance->pdev->dev,
6298 			"%s: megasas_get_device_list failed\n",
6299 			__func__);
6300 		goto fail_get_ld_pd_list;
6301 	}
6302 
6303 	/* stream detection initialization */
6304 	if (instance->adapter_type >= VENTURA_SERIES) {
6305 		fusion->stream_detect_by_ld =
6306 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6307 				sizeof(struct LD_STREAM_DETECT *),
6308 				GFP_KERNEL);
6309 		if (!fusion->stream_detect_by_ld) {
6310 			dev_err(&instance->pdev->dev,
6311 				"unable to allocate stream detection for pool of LDs\n");
6312 			goto fail_get_ld_pd_list;
6313 		}
6314 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6315 			fusion->stream_detect_by_ld[i] =
6316 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6317 				GFP_KERNEL);
6318 			if (!fusion->stream_detect_by_ld[i]) {
6319 				dev_err(&instance->pdev->dev,
6320 					"unable to allocate stream detect by LD\n ");
6321 				for (j = 0; j < i; ++j)
6322 					kfree(fusion->stream_detect_by_ld[j]);
6323 				kfree(fusion->stream_detect_by_ld);
6324 				fusion->stream_detect_by_ld = NULL;
6325 				goto fail_get_ld_pd_list;
6326 			}
6327 			fusion->stream_detect_by_ld[i]->mru_bit_map
6328 				= MR_STREAM_BITMAP;
6329 		}
6330 	}
6331 
6332 	/*
6333 	 * Compute the max allowed sectors per IO: The controller info has two
6334 	 * limits on max sectors. Driver should use the minimum of these two.
6335 	 *
6336 	 * 1 << stripe_sz_ops.min = max sectors per strip
6337 	 *
6338 	 * Note that older firmwares ( < FW ver 30) didn't report information
6339 	 * to calculate max_sectors_1. So the number ended up as zero always.
6340 	 */
6341 	tmp_sectors = 0;
6342 	ctrl_info = instance->ctrl_info_buf;
6343 
6344 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6345 		le16_to_cpu(ctrl_info->max_strips_per_io);
6346 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6347 
6348 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6349 
6350 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6351 	instance->passive = ctrl_info->cluster.passive;
6352 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6353 	instance->UnevenSpanSupport =
6354 		ctrl_info->adapterOperations2.supportUnevenSpans;
6355 	if (instance->UnevenSpanSupport) {
6356 		struct fusion_context *fusion = instance->ctrl_context;
6357 		if (MR_ValidateMapInfo(instance, instance->map_id))
6358 			fusion->fast_path_io = 1;
6359 		else
6360 			fusion->fast_path_io = 0;
6361 
6362 	}
6363 	if (ctrl_info->host_interface.SRIOV) {
6364 		instance->requestorId = ctrl_info->iov.requestorId;
6365 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6366 			if (!ctrl_info->adapterOperations2.activePassive)
6367 			    instance->PlasmaFW111 = 1;
6368 
6369 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6370 			    instance->PlasmaFW111 ? "1.11" : "new");
6371 
6372 			if (instance->PlasmaFW111) {
6373 			    iovPtr = (struct IOV_111 *)
6374 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6375 			    instance->requestorId = iovPtr->requestorId;
6376 			}
6377 		}
6378 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6379 			instance->requestorId);
6380 	}
6381 
6382 	instance->crash_dump_fw_support =
6383 		ctrl_info->adapterOperations3.supportCrashDump;
6384 	instance->crash_dump_drv_support =
6385 		(instance->crash_dump_fw_support &&
6386 		instance->crash_dump_buf);
6387 	if (instance->crash_dump_drv_support)
6388 		megasas_set_crash_dump_params(instance,
6389 			MR_CRASH_BUF_TURN_OFF);
6390 
6391 	else {
6392 		if (instance->crash_dump_buf)
6393 			dma_free_coherent(&instance->pdev->dev,
6394 				CRASH_DMA_BUF_SIZE,
6395 				instance->crash_dump_buf,
6396 				instance->crash_dump_h);
6397 		instance->crash_dump_buf = NULL;
6398 	}
6399 
6400 	if (instance->snapdump_wait_time) {
6401 		megasas_get_snapdump_properties(instance);
6402 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6403 			 instance->snapdump_wait_time);
6404 	}
6405 
6406 	dev_info(&instance->pdev->dev,
6407 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6408 		le16_to_cpu(ctrl_info->pci.vendor_id),
6409 		le16_to_cpu(ctrl_info->pci.device_id),
6410 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6411 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6412 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6413 		instance->UnevenSpanSupport ? "yes" : "no");
6414 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6415 		instance->crash_dump_drv_support ? "yes" : "no");
6416 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6417 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6418 
6419 	instance->max_sectors_per_req = instance->max_num_sge *
6420 						SGE_BUFFER_SIZE / 512;
6421 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6422 		instance->max_sectors_per_req = tmp_sectors;
6423 
6424 	/* Check for valid throttlequeuedepth module parameter */
6425 	if (throttlequeuedepth &&
6426 			throttlequeuedepth <= instance->max_scsi_cmds)
6427 		instance->throttlequeuedepth = throttlequeuedepth;
6428 	else
6429 		instance->throttlequeuedepth =
6430 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6431 
6432 	if ((resetwaittime < 1) ||
6433 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6434 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6435 
6436 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6437 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6438 
6439 	/* Launch SR-IOV heartbeat timer */
6440 	if (instance->requestorId) {
6441 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6442 			megasas_start_timer(instance);
6443 		} else {
6444 			instance->skip_heartbeat_timer_del = 1;
6445 			goto fail_get_ld_pd_list;
6446 		}
6447 	}
6448 
6449 	/*
6450 	 * Create and start watchdog thread which will monitor
6451 	 * controller state every 1 sec and trigger OCR when
6452 	 * it enters fault state
6453 	 */
6454 	if (instance->adapter_type != MFI_SERIES)
6455 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6456 			goto fail_start_watchdog;
6457 
6458 	return 0;
6459 
6460 fail_start_watchdog:
6461 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6462 		del_timer_sync(&instance->sriov_heartbeat_timer);
6463 fail_get_ld_pd_list:
6464 	instance->instancet->disable_intr(instance);
6465 	megasas_destroy_irqs(instance);
6466 fail_init_adapter:
6467 	if (instance->msix_vectors)
6468 		pci_free_irq_vectors(instance->pdev);
6469 	instance->msix_vectors = 0;
6470 fail_alloc_dma_buf:
6471 	megasas_free_ctrl_dma_buffers(instance);
6472 	megasas_free_ctrl_mem(instance);
6473 fail_ready_state:
6474 	iounmap(instance->reg_set);
6475 
6476 fail_ioremap:
6477 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6478 
6479 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6480 		__func__, __LINE__);
6481 	return -EINVAL;
6482 }
6483 
6484 /**
6485  * megasas_release_mfi -	Reverses the FW initialization
6486  * @instance:			Adapter soft state
6487  */
megasas_release_mfi(struct megasas_instance * instance)6488 static void megasas_release_mfi(struct megasas_instance *instance)
6489 {
6490 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6491 
6492 	if (instance->reply_queue)
6493 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6494 			    instance->reply_queue, instance->reply_queue_h);
6495 
6496 	megasas_free_cmds(instance);
6497 
6498 	iounmap(instance->reg_set);
6499 
6500 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6501 }
6502 
6503 /**
6504  * megasas_get_seq_num -	Gets latest event sequence numbers
6505  * @instance:			Adapter soft state
6506  * @eli:			FW event log sequence numbers information
6507  *
6508  * FW maintains a log of all events in a non-volatile area. Upper layers would
6509  * usually find out the latest sequence number of the events, the seq number at
6510  * the boot etc. They would "read" all the events below the latest seq number
6511  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6512  * number), they would subsribe to AEN (asynchronous event notification) and
6513  * wait for the events to happen.
6514  */
6515 static int
megasas_get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)6516 megasas_get_seq_num(struct megasas_instance *instance,
6517 		    struct megasas_evt_log_info *eli)
6518 {
6519 	struct megasas_cmd *cmd;
6520 	struct megasas_dcmd_frame *dcmd;
6521 	struct megasas_evt_log_info *el_info;
6522 	dma_addr_t el_info_h = 0;
6523 	int ret;
6524 
6525 	cmd = megasas_get_cmd(instance);
6526 
6527 	if (!cmd) {
6528 		return -ENOMEM;
6529 	}
6530 
6531 	dcmd = &cmd->frame->dcmd;
6532 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6533 				     sizeof(struct megasas_evt_log_info),
6534 				     &el_info_h, GFP_KERNEL);
6535 	if (!el_info) {
6536 		megasas_return_cmd(instance, cmd);
6537 		return -ENOMEM;
6538 	}
6539 
6540 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6541 
6542 	dcmd->cmd = MFI_CMD_DCMD;
6543 	dcmd->cmd_status = 0x0;
6544 	dcmd->sge_count = 1;
6545 	dcmd->flags = MFI_FRAME_DIR_READ;
6546 	dcmd->timeout = 0;
6547 	dcmd->pad_0 = 0;
6548 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6549 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6550 
6551 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6552 				 sizeof(struct megasas_evt_log_info));
6553 
6554 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6555 	if (ret != DCMD_SUCCESS) {
6556 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6557 			__func__, __LINE__);
6558 		goto dcmd_failed;
6559 	}
6560 
6561 	/*
6562 	 * Copy the data back into callers buffer
6563 	 */
6564 	eli->newest_seq_num = el_info->newest_seq_num;
6565 	eli->oldest_seq_num = el_info->oldest_seq_num;
6566 	eli->clear_seq_num = el_info->clear_seq_num;
6567 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6568 	eli->boot_seq_num = el_info->boot_seq_num;
6569 
6570 dcmd_failed:
6571 	dma_free_coherent(&instance->pdev->dev,
6572 			sizeof(struct megasas_evt_log_info),
6573 			el_info, el_info_h);
6574 
6575 	megasas_return_cmd(instance, cmd);
6576 
6577 	return ret;
6578 }
6579 
6580 /**
6581  * megasas_register_aen -	Registers for asynchronous event notification
6582  * @instance:			Adapter soft state
6583  * @seq_num:			The starting sequence number
6584  * @class_locale_word:		Class of the event
6585  *
6586  * This function subscribes for AEN for events beyond the @seq_num. It requests
6587  * to be notified if and only if the event is of type @class_locale
6588  */
6589 static int
megasas_register_aen(struct megasas_instance * instance,u32 seq_num,u32 class_locale_word)6590 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6591 		     u32 class_locale_word)
6592 {
6593 	int ret_val;
6594 	struct megasas_cmd *cmd;
6595 	struct megasas_dcmd_frame *dcmd;
6596 	union megasas_evt_class_locale curr_aen;
6597 	union megasas_evt_class_locale prev_aen;
6598 
6599 	/*
6600 	 * If there an AEN pending already (aen_cmd), check if the
6601 	 * class_locale of that pending AEN is inclusive of the new
6602 	 * AEN request we currently have. If it is, then we don't have
6603 	 * to do anything. In other words, whichever events the current
6604 	 * AEN request is subscribing to, have already been subscribed
6605 	 * to.
6606 	 *
6607 	 * If the old_cmd is _not_ inclusive, then we have to abort
6608 	 * that command, form a class_locale that is superset of both
6609 	 * old and current and re-issue to the FW
6610 	 */
6611 
6612 	curr_aen.word = class_locale_word;
6613 
6614 	if (instance->aen_cmd) {
6615 
6616 		prev_aen.word =
6617 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6618 
6619 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6620 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6621 			dev_info(&instance->pdev->dev,
6622 				 "%s %d out of range class %d send by application\n",
6623 				 __func__, __LINE__, curr_aen.members.class);
6624 			return 0;
6625 		}
6626 
6627 		/*
6628 		 * A class whose enum value is smaller is inclusive of all
6629 		 * higher values. If a PROGRESS (= -1) was previously
6630 		 * registered, then a new registration requests for higher
6631 		 * classes need not be sent to FW. They are automatically
6632 		 * included.
6633 		 *
6634 		 * Locale numbers don't have such hierarchy. They are bitmap
6635 		 * values
6636 		 */
6637 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6638 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6639 		      curr_aen.members.locale)) {
6640 			/*
6641 			 * Previously issued event registration includes
6642 			 * current request. Nothing to do.
6643 			 */
6644 			return 0;
6645 		} else {
6646 			curr_aen.members.locale |= prev_aen.members.locale;
6647 
6648 			if (prev_aen.members.class < curr_aen.members.class)
6649 				curr_aen.members.class = prev_aen.members.class;
6650 
6651 			instance->aen_cmd->abort_aen = 1;
6652 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6653 								  instance->
6654 								  aen_cmd, 30);
6655 
6656 			if (ret_val) {
6657 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6658 				       "previous AEN command\n");
6659 				return ret_val;
6660 			}
6661 		}
6662 	}
6663 
6664 	cmd = megasas_get_cmd(instance);
6665 
6666 	if (!cmd)
6667 		return -ENOMEM;
6668 
6669 	dcmd = &cmd->frame->dcmd;
6670 
6671 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6672 
6673 	/*
6674 	 * Prepare DCMD for aen registration
6675 	 */
6676 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6677 
6678 	dcmd->cmd = MFI_CMD_DCMD;
6679 	dcmd->cmd_status = 0x0;
6680 	dcmd->sge_count = 1;
6681 	dcmd->flags = MFI_FRAME_DIR_READ;
6682 	dcmd->timeout = 0;
6683 	dcmd->pad_0 = 0;
6684 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6685 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6686 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6687 	instance->last_seq_num = seq_num;
6688 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6689 
6690 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6691 				 sizeof(struct megasas_evt_detail));
6692 
6693 	if (instance->aen_cmd != NULL) {
6694 		megasas_return_cmd(instance, cmd);
6695 		return 0;
6696 	}
6697 
6698 	/*
6699 	 * Store reference to the cmd used to register for AEN. When an
6700 	 * application wants us to register for AEN, we have to abort this
6701 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6702 	 */
6703 	instance->aen_cmd = cmd;
6704 
6705 	/*
6706 	 * Issue the aen registration frame
6707 	 */
6708 	instance->instancet->issue_dcmd(instance, cmd);
6709 
6710 	return 0;
6711 }
6712 
6713 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6714  *
6715  * This DCMD will fetch few properties of LD/system PD defined
6716  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6717  *
6718  * DCMD send by drivers whenever new target is added to the OS.
6719  *
6720  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6721  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6722  *                       0 = system PD, 1 = LD.
6723  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6724  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6725  *
6726  * @instance:		Adapter soft state
6727  * @sdev:		OS provided scsi device
6728  *
6729  * Returns 0 on success non-zero on failure.
6730  */
6731 int
megasas_get_target_prop(struct megasas_instance * instance,struct scsi_device * sdev)6732 megasas_get_target_prop(struct megasas_instance *instance,
6733 			struct scsi_device *sdev)
6734 {
6735 	int ret;
6736 	struct megasas_cmd *cmd;
6737 	struct megasas_dcmd_frame *dcmd;
6738 	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6739 			sdev->id;
6740 
6741 	cmd = megasas_get_cmd(instance);
6742 
6743 	if (!cmd) {
6744 		dev_err(&instance->pdev->dev,
6745 			"Failed to get cmd %s\n", __func__);
6746 		return -ENOMEM;
6747 	}
6748 
6749 	dcmd = &cmd->frame->dcmd;
6750 
6751 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6752 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6753 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6754 
6755 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6756 	dcmd->cmd = MFI_CMD_DCMD;
6757 	dcmd->cmd_status = 0xFF;
6758 	dcmd->sge_count = 1;
6759 	dcmd->flags = MFI_FRAME_DIR_READ;
6760 	dcmd->timeout = 0;
6761 	dcmd->pad_0 = 0;
6762 	dcmd->data_xfer_len =
6763 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6764 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6765 
6766 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6767 				 sizeof(struct MR_TARGET_PROPERTIES));
6768 
6769 	if ((instance->adapter_type != MFI_SERIES) &&
6770 	    !instance->mask_interrupts)
6771 		ret = megasas_issue_blocked_cmd(instance,
6772 						cmd, MFI_IO_TIMEOUT_SECS);
6773 	else
6774 		ret = megasas_issue_polled(instance, cmd);
6775 
6776 	switch (ret) {
6777 	case DCMD_TIMEOUT:
6778 		switch (dcmd_timeout_ocr_possible(instance)) {
6779 		case INITIATE_OCR:
6780 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6781 			mutex_unlock(&instance->reset_mutex);
6782 			megasas_reset_fusion(instance->host,
6783 					     MFI_IO_TIMEOUT_OCR);
6784 			mutex_lock(&instance->reset_mutex);
6785 			break;
6786 		case KILL_ADAPTER:
6787 			megaraid_sas_kill_hba(instance);
6788 			break;
6789 		case IGNORE_TIMEOUT:
6790 			dev_info(&instance->pdev->dev,
6791 				 "Ignore DCMD timeout: %s %d\n",
6792 				 __func__, __LINE__);
6793 			break;
6794 		}
6795 		break;
6796 
6797 	default:
6798 		megasas_return_cmd(instance, cmd);
6799 	}
6800 	if (ret != DCMD_SUCCESS)
6801 		dev_err(&instance->pdev->dev,
6802 			"return from %s %d return value %d\n",
6803 			__func__, __LINE__, ret);
6804 
6805 	return ret;
6806 }
6807 
6808 /**
6809  * megasas_start_aen -	Subscribes to AEN during driver load time
6810  * @instance:		Adapter soft state
6811  */
megasas_start_aen(struct megasas_instance * instance)6812 static int megasas_start_aen(struct megasas_instance *instance)
6813 {
6814 	struct megasas_evt_log_info eli;
6815 	union megasas_evt_class_locale class_locale;
6816 
6817 	/*
6818 	 * Get the latest sequence number from FW
6819 	 */
6820 	memset(&eli, 0, sizeof(eli));
6821 
6822 	if (megasas_get_seq_num(instance, &eli))
6823 		return -1;
6824 
6825 	/*
6826 	 * Register AEN with FW for latest sequence number plus 1
6827 	 */
6828 	class_locale.members.reserved = 0;
6829 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6830 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6831 
6832 	return megasas_register_aen(instance,
6833 			le32_to_cpu(eli.newest_seq_num) + 1,
6834 			class_locale.word);
6835 }
6836 
6837 /**
6838  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6839  * @instance:		Adapter soft state
6840  */
megasas_io_attach(struct megasas_instance * instance)6841 static int megasas_io_attach(struct megasas_instance *instance)
6842 {
6843 	struct Scsi_Host *host = instance->host;
6844 
6845 	/*
6846 	 * Export parameters required by SCSI mid-layer
6847 	 */
6848 	host->unique_id = instance->unique_id;
6849 	host->can_queue = instance->max_scsi_cmds;
6850 	host->this_id = instance->init_id;
6851 	host->sg_tablesize = instance->max_num_sge;
6852 
6853 	if (instance->fw_support_ieee)
6854 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6855 
6856 	/*
6857 	 * Check if the module parameter value for max_sectors can be used
6858 	 */
6859 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6860 		instance->max_sectors_per_req = max_sectors;
6861 	else {
6862 		if (max_sectors) {
6863 			if (((instance->pdev->device ==
6864 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6865 				(instance->pdev->device ==
6866 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6867 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6868 				instance->max_sectors_per_req = max_sectors;
6869 			} else {
6870 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6871 				"and <= %d (or < 1MB for GEN2 controller)\n",
6872 				instance->max_sectors_per_req);
6873 			}
6874 		}
6875 	}
6876 
6877 	host->max_sectors = instance->max_sectors_per_req;
6878 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6879 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6880 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6881 	host->max_lun = MEGASAS_MAX_LUN;
6882 	host->max_cmd_len = 16;
6883 
6884 	/* Use shared host tagset only for fusion adaptors
6885 	 * if there are managed interrupts (smp affinity enabled case).
6886 	 * Single msix_vectors in kdump, so shared host tag is also disabled.
6887 	 */
6888 
6889 	host->host_tagset = 0;
6890 	host->nr_hw_queues = 1;
6891 
6892 	if ((instance->adapter_type != MFI_SERIES) &&
6893 		(instance->msix_vectors > instance->low_latency_index_start) &&
6894 		host_tagset_enable &&
6895 		instance->smp_affinity_enable) {
6896 		host->host_tagset = 1;
6897 		host->nr_hw_queues = instance->msix_vectors -
6898 			instance->low_latency_index_start;
6899 	}
6900 
6901 	dev_info(&instance->pdev->dev,
6902 		"Max firmware commands: %d shared with nr_hw_queues = %d\n",
6903 		instance->max_fw_cmds, host->nr_hw_queues);
6904 	/*
6905 	 * Notify the mid-layer about the new controller
6906 	 */
6907 	if (scsi_add_host(host, &instance->pdev->dev)) {
6908 		dev_err(&instance->pdev->dev,
6909 			"Failed to add host from %s %d\n",
6910 			__func__, __LINE__);
6911 		return -ENODEV;
6912 	}
6913 
6914 	return 0;
6915 }
6916 
6917 /**
6918  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6919  *
6920  * @instance:		Adapter soft state
6921  * Description:
6922  *
6923  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6924  *
6925  * For invader-
6926  *	By default, driver/FW will operate in 32bit DMA addresses
6927  *	for consistent DMA mapping but if 32 bit consistent
6928  *	DMA mask fails, driver will try with 63 bit consistent
6929  *	mask provided FW is true 63bit DMA capable
6930  *
6931  * For older controllers(Thunderbolt and MFI based adapters)-
6932  *	driver/FW will operate in 32 bit consistent DMA addresses.
6933  */
6934 static int
megasas_set_dma_mask(struct megasas_instance * instance)6935 megasas_set_dma_mask(struct megasas_instance *instance)
6936 {
6937 	u64 consistent_mask;
6938 	struct pci_dev *pdev;
6939 	u32 scratch_pad_1;
6940 
6941 	pdev = instance->pdev;
6942 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6943 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6944 
6945 	if (IS_DMA64) {
6946 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6947 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6948 			goto fail_set_dma_mask;
6949 
6950 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6951 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6952 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6953 			/*
6954 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6955 			 * for FW capable of handling 64 bit DMA.
6956 			 */
6957 			scratch_pad_1 = megasas_readl
6958 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6959 
6960 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6961 				goto fail_set_dma_mask;
6962 			else if (dma_set_mask_and_coherent(&pdev->dev,
6963 							   DMA_BIT_MASK(63)))
6964 				goto fail_set_dma_mask;
6965 		}
6966 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6967 		goto fail_set_dma_mask;
6968 
6969 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6970 		instance->consistent_mask_64bit = false;
6971 	else
6972 		instance->consistent_mask_64bit = true;
6973 
6974 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6975 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6976 		 (instance->consistent_mask_64bit ? "63" : "32"));
6977 
6978 	return 0;
6979 
6980 fail_set_dma_mask:
6981 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6982 	return -1;
6983 
6984 }
6985 
6986 /*
6987  * megasas_set_adapter_type -	Set adapter type.
6988  *				Supported controllers can be divided in
6989  *				different categories-
6990  *					enum MR_ADAPTER_TYPE {
6991  *						MFI_SERIES = 1,
6992  *						THUNDERBOLT_SERIES = 2,
6993  *						INVADER_SERIES = 3,
6994  *						VENTURA_SERIES = 4,
6995  *						AERO_SERIES = 5,
6996  *					};
6997  * @instance:			Adapter soft state
6998  * return:			void
6999  */
megasas_set_adapter_type(struct megasas_instance * instance)7000 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
7001 {
7002 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
7003 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
7004 		instance->adapter_type = MFI_SERIES;
7005 	} else {
7006 		switch (instance->pdev->device) {
7007 		case PCI_DEVICE_ID_LSI_AERO_10E1:
7008 		case PCI_DEVICE_ID_LSI_AERO_10E2:
7009 		case PCI_DEVICE_ID_LSI_AERO_10E5:
7010 		case PCI_DEVICE_ID_LSI_AERO_10E6:
7011 			instance->adapter_type = AERO_SERIES;
7012 			break;
7013 		case PCI_DEVICE_ID_LSI_VENTURA:
7014 		case PCI_DEVICE_ID_LSI_CRUSADER:
7015 		case PCI_DEVICE_ID_LSI_HARPOON:
7016 		case PCI_DEVICE_ID_LSI_TOMCAT:
7017 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
7018 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
7019 			instance->adapter_type = VENTURA_SERIES;
7020 			break;
7021 		case PCI_DEVICE_ID_LSI_FUSION:
7022 		case PCI_DEVICE_ID_LSI_PLASMA:
7023 			instance->adapter_type = THUNDERBOLT_SERIES;
7024 			break;
7025 		case PCI_DEVICE_ID_LSI_INVADER:
7026 		case PCI_DEVICE_ID_LSI_INTRUDER:
7027 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
7028 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
7029 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
7030 		case PCI_DEVICE_ID_LSI_FURY:
7031 			instance->adapter_type = INVADER_SERIES;
7032 			break;
7033 		default: /* For all other supported controllers */
7034 			instance->adapter_type = MFI_SERIES;
7035 			break;
7036 		}
7037 	}
7038 }
7039 
megasas_alloc_mfi_ctrl_mem(struct megasas_instance * instance)7040 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
7041 {
7042 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
7043 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
7044 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
7045 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
7046 
7047 	if (!instance->producer || !instance->consumer) {
7048 		dev_err(&instance->pdev->dev,
7049 			"Failed to allocate memory for producer, consumer\n");
7050 		return -1;
7051 	}
7052 
7053 	*instance->producer = 0;
7054 	*instance->consumer = 0;
7055 	return 0;
7056 }
7057 
7058 /**
7059  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
7060  *				structures which are not common across MFI
7061  *				adapters and fusion adapters.
7062  *				For MFI based adapters, allocate producer and
7063  *				consumer buffers. For fusion adapters, allocate
7064  *				memory for fusion context.
7065  * @instance:			Adapter soft state
7066  * return:			0 for SUCCESS
7067  */
megasas_alloc_ctrl_mem(struct megasas_instance * instance)7068 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
7069 {
7070 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
7071 				      GFP_KERNEL);
7072 	if (!instance->reply_map)
7073 		return -ENOMEM;
7074 
7075 	switch (instance->adapter_type) {
7076 	case MFI_SERIES:
7077 		if (megasas_alloc_mfi_ctrl_mem(instance))
7078 			goto fail;
7079 		break;
7080 	case AERO_SERIES:
7081 	case VENTURA_SERIES:
7082 	case THUNDERBOLT_SERIES:
7083 	case INVADER_SERIES:
7084 		if (megasas_alloc_fusion_context(instance))
7085 			goto fail;
7086 		break;
7087 	}
7088 
7089 	return 0;
7090  fail:
7091 	kfree(instance->reply_map);
7092 	instance->reply_map = NULL;
7093 	return -ENOMEM;
7094 }
7095 
7096 /*
7097  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
7098  *				producer, consumer buffers for MFI adapters
7099  *
7100  * @instance -			Adapter soft instance
7101  *
7102  */
megasas_free_ctrl_mem(struct megasas_instance * instance)7103 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
7104 {
7105 	kfree(instance->reply_map);
7106 	if (instance->adapter_type == MFI_SERIES) {
7107 		if (instance->producer)
7108 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7109 					    instance->producer,
7110 					    instance->producer_h);
7111 		if (instance->consumer)
7112 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7113 					    instance->consumer,
7114 					    instance->consumer_h);
7115 	} else {
7116 		megasas_free_fusion_context(instance);
7117 	}
7118 }
7119 
7120 /**
7121  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
7122  *					driver load time
7123  *
7124  * @instance:				Adapter soft instance
7125  *
7126  * @return:				O for SUCCESS
7127  */
7128 static inline
megasas_alloc_ctrl_dma_buffers(struct megasas_instance * instance)7129 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7130 {
7131 	struct pci_dev *pdev = instance->pdev;
7132 	struct fusion_context *fusion = instance->ctrl_context;
7133 
7134 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7135 			sizeof(struct megasas_evt_detail),
7136 			&instance->evt_detail_h, GFP_KERNEL);
7137 
7138 	if (!instance->evt_detail) {
7139 		dev_err(&instance->pdev->dev,
7140 			"Failed to allocate event detail buffer\n");
7141 		return -ENOMEM;
7142 	}
7143 
7144 	if (fusion) {
7145 		fusion->ioc_init_request =
7146 			dma_alloc_coherent(&pdev->dev,
7147 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7148 					   &fusion->ioc_init_request_phys,
7149 					   GFP_KERNEL);
7150 
7151 		if (!fusion->ioc_init_request) {
7152 			dev_err(&pdev->dev,
7153 				"Failed to allocate PD list buffer\n");
7154 			return -ENOMEM;
7155 		}
7156 
7157 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7158 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7159 				&instance->snapdump_prop_h, GFP_KERNEL);
7160 
7161 		if (!instance->snapdump_prop)
7162 			dev_err(&pdev->dev,
7163 				"Failed to allocate snapdump properties buffer\n");
7164 
7165 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7166 							HOST_DEVICE_LIST_SZ,
7167 							&instance->host_device_list_buf_h,
7168 							GFP_KERNEL);
7169 
7170 		if (!instance->host_device_list_buf) {
7171 			dev_err(&pdev->dev,
7172 				"Failed to allocate targetid list buffer\n");
7173 			return -ENOMEM;
7174 		}
7175 
7176 	}
7177 
7178 	instance->pd_list_buf =
7179 		dma_alloc_coherent(&pdev->dev,
7180 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7181 				     &instance->pd_list_buf_h, GFP_KERNEL);
7182 
7183 	if (!instance->pd_list_buf) {
7184 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7185 		return -ENOMEM;
7186 	}
7187 
7188 	instance->ctrl_info_buf =
7189 		dma_alloc_coherent(&pdev->dev,
7190 				     sizeof(struct megasas_ctrl_info),
7191 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7192 
7193 	if (!instance->ctrl_info_buf) {
7194 		dev_err(&pdev->dev,
7195 			"Failed to allocate controller info buffer\n");
7196 		return -ENOMEM;
7197 	}
7198 
7199 	instance->ld_list_buf =
7200 		dma_alloc_coherent(&pdev->dev,
7201 				     sizeof(struct MR_LD_LIST),
7202 				     &instance->ld_list_buf_h, GFP_KERNEL);
7203 
7204 	if (!instance->ld_list_buf) {
7205 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7206 		return -ENOMEM;
7207 	}
7208 
7209 	instance->ld_targetid_list_buf =
7210 		dma_alloc_coherent(&pdev->dev,
7211 				sizeof(struct MR_LD_TARGETID_LIST),
7212 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7213 
7214 	if (!instance->ld_targetid_list_buf) {
7215 		dev_err(&pdev->dev,
7216 			"Failed to allocate LD targetid list buffer\n");
7217 		return -ENOMEM;
7218 	}
7219 
7220 	if (!reset_devices) {
7221 		instance->system_info_buf =
7222 			dma_alloc_coherent(&pdev->dev,
7223 					sizeof(struct MR_DRV_SYSTEM_INFO),
7224 					&instance->system_info_h, GFP_KERNEL);
7225 		instance->pd_info =
7226 			dma_alloc_coherent(&pdev->dev,
7227 					sizeof(struct MR_PD_INFO),
7228 					&instance->pd_info_h, GFP_KERNEL);
7229 		instance->tgt_prop =
7230 			dma_alloc_coherent(&pdev->dev,
7231 					sizeof(struct MR_TARGET_PROPERTIES),
7232 					&instance->tgt_prop_h, GFP_KERNEL);
7233 		instance->crash_dump_buf =
7234 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7235 					&instance->crash_dump_h, GFP_KERNEL);
7236 
7237 		if (!instance->system_info_buf)
7238 			dev_err(&instance->pdev->dev,
7239 				"Failed to allocate system info buffer\n");
7240 
7241 		if (!instance->pd_info)
7242 			dev_err(&instance->pdev->dev,
7243 				"Failed to allocate pd_info buffer\n");
7244 
7245 		if (!instance->tgt_prop)
7246 			dev_err(&instance->pdev->dev,
7247 				"Failed to allocate tgt_prop buffer\n");
7248 
7249 		if (!instance->crash_dump_buf)
7250 			dev_err(&instance->pdev->dev,
7251 				"Failed to allocate crash dump buffer\n");
7252 	}
7253 
7254 	return 0;
7255 }
7256 
7257 /*
7258  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7259  *					during driver load time
7260  *
7261  * @instance-				Adapter soft instance
7262  *
7263  */
7264 static inline
megasas_free_ctrl_dma_buffers(struct megasas_instance * instance)7265 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7266 {
7267 	struct pci_dev *pdev = instance->pdev;
7268 	struct fusion_context *fusion = instance->ctrl_context;
7269 
7270 	if (instance->evt_detail)
7271 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7272 				    instance->evt_detail,
7273 				    instance->evt_detail_h);
7274 
7275 	if (fusion && fusion->ioc_init_request)
7276 		dma_free_coherent(&pdev->dev,
7277 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7278 				  fusion->ioc_init_request,
7279 				  fusion->ioc_init_request_phys);
7280 
7281 	if (instance->pd_list_buf)
7282 		dma_free_coherent(&pdev->dev,
7283 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7284 				    instance->pd_list_buf,
7285 				    instance->pd_list_buf_h);
7286 
7287 	if (instance->ld_list_buf)
7288 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7289 				    instance->ld_list_buf,
7290 				    instance->ld_list_buf_h);
7291 
7292 	if (instance->ld_targetid_list_buf)
7293 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7294 				    instance->ld_targetid_list_buf,
7295 				    instance->ld_targetid_list_buf_h);
7296 
7297 	if (instance->ctrl_info_buf)
7298 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7299 				    instance->ctrl_info_buf,
7300 				    instance->ctrl_info_buf_h);
7301 
7302 	if (instance->system_info_buf)
7303 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7304 				    instance->system_info_buf,
7305 				    instance->system_info_h);
7306 
7307 	if (instance->pd_info)
7308 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7309 				    instance->pd_info, instance->pd_info_h);
7310 
7311 	if (instance->tgt_prop)
7312 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7313 				    instance->tgt_prop, instance->tgt_prop_h);
7314 
7315 	if (instance->crash_dump_buf)
7316 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7317 				    instance->crash_dump_buf,
7318 				    instance->crash_dump_h);
7319 
7320 	if (instance->snapdump_prop)
7321 		dma_free_coherent(&pdev->dev,
7322 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7323 				  instance->snapdump_prop,
7324 				  instance->snapdump_prop_h);
7325 
7326 	if (instance->host_device_list_buf)
7327 		dma_free_coherent(&pdev->dev,
7328 				  HOST_DEVICE_LIST_SZ,
7329 				  instance->host_device_list_buf,
7330 				  instance->host_device_list_buf_h);
7331 
7332 }
7333 
7334 /*
7335  * megasas_init_ctrl_params -		Initialize controller's instance
7336  *					parameters before FW init
7337  * @instance -				Adapter soft instance
7338  * @return -				void
7339  */
megasas_init_ctrl_params(struct megasas_instance * instance)7340 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7341 {
7342 	instance->fw_crash_state = UNAVAILABLE;
7343 
7344 	megasas_poll_wait_aen = 0;
7345 	instance->issuepend_done = 1;
7346 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7347 
7348 	/*
7349 	 * Initialize locks and queues
7350 	 */
7351 	INIT_LIST_HEAD(&instance->cmd_pool);
7352 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7353 
7354 	atomic_set(&instance->fw_outstanding, 0);
7355 	atomic64_set(&instance->total_io_count, 0);
7356 
7357 	init_waitqueue_head(&instance->int_cmd_wait_q);
7358 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7359 
7360 	spin_lock_init(&instance->crashdump_lock);
7361 	spin_lock_init(&instance->mfi_pool_lock);
7362 	spin_lock_init(&instance->hba_lock);
7363 	spin_lock_init(&instance->stream_lock);
7364 	spin_lock_init(&instance->completion_lock);
7365 
7366 	mutex_init(&instance->reset_mutex);
7367 
7368 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7369 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7370 		instance->flag_ieee = 1;
7371 
7372 	megasas_dbg_lvl = 0;
7373 	instance->flag = 0;
7374 	instance->unload = 1;
7375 	instance->last_time = 0;
7376 	instance->disableOnlineCtrlReset = 1;
7377 	instance->UnevenSpanSupport = 0;
7378 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7379 	instance->msix_load_balance = false;
7380 
7381 	if (instance->adapter_type != MFI_SERIES)
7382 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7383 	else
7384 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7385 }
7386 
7387 /**
7388  * megasas_probe_one -	PCI hotplug entry point
7389  * @pdev:		PCI device structure
7390  * @id:			PCI ids of supported hotplugged adapter
7391  */
megasas_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)7392 static int megasas_probe_one(struct pci_dev *pdev,
7393 			     const struct pci_device_id *id)
7394 {
7395 	int rval, pos;
7396 	struct Scsi_Host *host;
7397 	struct megasas_instance *instance;
7398 	u16 control = 0;
7399 
7400 	switch (pdev->device) {
7401 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7402 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7403 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7404 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7405 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7406 		return 1;
7407 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7408 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7409 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7410 		break;
7411 	}
7412 
7413 	/* Reset MSI-X in the kdump kernel */
7414 	if (reset_devices) {
7415 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7416 		if (pos) {
7417 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7418 					     &control);
7419 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7420 				dev_info(&pdev->dev, "resetting MSI-X\n");
7421 				pci_write_config_word(pdev,
7422 						      pos + PCI_MSIX_FLAGS,
7423 						      control &
7424 						      ~PCI_MSIX_FLAGS_ENABLE);
7425 			}
7426 		}
7427 	}
7428 
7429 	/*
7430 	 * PCI prepping: enable device set bus mastering and dma mask
7431 	 */
7432 	rval = pci_enable_device_mem(pdev);
7433 
7434 	if (rval) {
7435 		return rval;
7436 	}
7437 
7438 	pci_set_master(pdev);
7439 
7440 	host = scsi_host_alloc(&megasas_template,
7441 			       sizeof(struct megasas_instance));
7442 
7443 	if (!host) {
7444 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7445 		goto fail_alloc_instance;
7446 	}
7447 
7448 	instance = (struct megasas_instance *)host->hostdata;
7449 	memset(instance, 0, sizeof(*instance));
7450 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7451 
7452 	/*
7453 	 * Initialize PCI related and misc parameters
7454 	 */
7455 	instance->pdev = pdev;
7456 	instance->host = host;
7457 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7458 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7459 
7460 	megasas_set_adapter_type(instance);
7461 
7462 	/*
7463 	 * Initialize MFI Firmware
7464 	 */
7465 	if (megasas_init_fw(instance))
7466 		goto fail_init_mfi;
7467 
7468 	if (instance->requestorId) {
7469 		if (instance->PlasmaFW111) {
7470 			instance->vf_affiliation_111 =
7471 				dma_alloc_coherent(&pdev->dev,
7472 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7473 					&instance->vf_affiliation_111_h,
7474 					GFP_KERNEL);
7475 			if (!instance->vf_affiliation_111)
7476 				dev_warn(&pdev->dev, "Can't allocate "
7477 				       "memory for VF affiliation buffer\n");
7478 		} else {
7479 			instance->vf_affiliation =
7480 				dma_alloc_coherent(&pdev->dev,
7481 					(MAX_LOGICAL_DRIVES + 1) *
7482 					sizeof(struct MR_LD_VF_AFFILIATION),
7483 					&instance->vf_affiliation_h,
7484 					GFP_KERNEL);
7485 			if (!instance->vf_affiliation)
7486 				dev_warn(&pdev->dev, "Can't allocate "
7487 				       "memory for VF affiliation buffer\n");
7488 		}
7489 	}
7490 
7491 	/*
7492 	 * Store instance in PCI softstate
7493 	 */
7494 	pci_set_drvdata(pdev, instance);
7495 
7496 	/*
7497 	 * Add this controller to megasas_mgmt_info structure so that it
7498 	 * can be exported to management applications
7499 	 */
7500 	megasas_mgmt_info.count++;
7501 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7502 	megasas_mgmt_info.max_index++;
7503 
7504 	/*
7505 	 * Register with SCSI mid-layer
7506 	 */
7507 	if (megasas_io_attach(instance))
7508 		goto fail_io_attach;
7509 
7510 	instance->unload = 0;
7511 	/*
7512 	 * Trigger SCSI to scan our drives
7513 	 */
7514 	if (!instance->enable_fw_dev_list ||
7515 	    (instance->host_device_list_buf->count > 0))
7516 		scsi_scan_host(host);
7517 
7518 	/*
7519 	 * Initiate AEN (Asynchronous Event Notification)
7520 	 */
7521 	if (megasas_start_aen(instance)) {
7522 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7523 		goto fail_start_aen;
7524 	}
7525 
7526 	megasas_setup_debugfs(instance);
7527 
7528 	/* Get current SR-IOV LD/VF affiliation */
7529 	if (instance->requestorId)
7530 		megasas_get_ld_vf_affiliation(instance, 1);
7531 
7532 	return 0;
7533 
7534 fail_start_aen:
7535 	instance->unload = 1;
7536 	scsi_remove_host(instance->host);
7537 fail_io_attach:
7538 	megasas_mgmt_info.count--;
7539 	megasas_mgmt_info.max_index--;
7540 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7541 
7542 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7543 		del_timer_sync(&instance->sriov_heartbeat_timer);
7544 
7545 	instance->instancet->disable_intr(instance);
7546 	megasas_destroy_irqs(instance);
7547 
7548 	if (instance->adapter_type != MFI_SERIES)
7549 		megasas_release_fusion(instance);
7550 	else
7551 		megasas_release_mfi(instance);
7552 
7553 	if (instance->msix_vectors)
7554 		pci_free_irq_vectors(instance->pdev);
7555 	instance->msix_vectors = 0;
7556 
7557 	if (instance->fw_crash_state != UNAVAILABLE)
7558 		megasas_free_host_crash_buffer(instance);
7559 
7560 	if (instance->adapter_type != MFI_SERIES)
7561 		megasas_fusion_stop_watchdog(instance);
7562 fail_init_mfi:
7563 	scsi_host_put(host);
7564 fail_alloc_instance:
7565 	pci_disable_device(pdev);
7566 
7567 	return -ENODEV;
7568 }
7569 
7570 /**
7571  * megasas_flush_cache -	Requests FW to flush all its caches
7572  * @instance:			Adapter soft state
7573  */
megasas_flush_cache(struct megasas_instance * instance)7574 static void megasas_flush_cache(struct megasas_instance *instance)
7575 {
7576 	struct megasas_cmd *cmd;
7577 	struct megasas_dcmd_frame *dcmd;
7578 
7579 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7580 		return;
7581 
7582 	cmd = megasas_get_cmd(instance);
7583 
7584 	if (!cmd)
7585 		return;
7586 
7587 	dcmd = &cmd->frame->dcmd;
7588 
7589 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7590 
7591 	dcmd->cmd = MFI_CMD_DCMD;
7592 	dcmd->cmd_status = 0x0;
7593 	dcmd->sge_count = 0;
7594 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7595 	dcmd->timeout = 0;
7596 	dcmd->pad_0 = 0;
7597 	dcmd->data_xfer_len = 0;
7598 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7599 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7600 
7601 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7602 			!= DCMD_SUCCESS) {
7603 		dev_err(&instance->pdev->dev,
7604 			"return from %s %d\n", __func__, __LINE__);
7605 		return;
7606 	}
7607 
7608 	megasas_return_cmd(instance, cmd);
7609 }
7610 
7611 /**
7612  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7613  * @instance:				Adapter soft state
7614  * @opcode:				Shutdown/Hibernate
7615  */
megasas_shutdown_controller(struct megasas_instance * instance,u32 opcode)7616 static void megasas_shutdown_controller(struct megasas_instance *instance,
7617 					u32 opcode)
7618 {
7619 	struct megasas_cmd *cmd;
7620 	struct megasas_dcmd_frame *dcmd;
7621 
7622 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7623 		return;
7624 
7625 	cmd = megasas_get_cmd(instance);
7626 
7627 	if (!cmd)
7628 		return;
7629 
7630 	if (instance->aen_cmd)
7631 		megasas_issue_blocked_abort_cmd(instance,
7632 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7633 	if (instance->map_update_cmd)
7634 		megasas_issue_blocked_abort_cmd(instance,
7635 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7636 	if (instance->jbod_seq_cmd)
7637 		megasas_issue_blocked_abort_cmd(instance,
7638 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7639 
7640 	dcmd = &cmd->frame->dcmd;
7641 
7642 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7643 
7644 	dcmd->cmd = MFI_CMD_DCMD;
7645 	dcmd->cmd_status = 0x0;
7646 	dcmd->sge_count = 0;
7647 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7648 	dcmd->timeout = 0;
7649 	dcmd->pad_0 = 0;
7650 	dcmd->data_xfer_len = 0;
7651 	dcmd->opcode = cpu_to_le32(opcode);
7652 
7653 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7654 			!= DCMD_SUCCESS) {
7655 		dev_err(&instance->pdev->dev,
7656 			"return from %s %d\n", __func__, __LINE__);
7657 		return;
7658 	}
7659 
7660 	megasas_return_cmd(instance, cmd);
7661 }
7662 
7663 #ifdef CONFIG_PM
7664 /**
7665  * megasas_suspend -	driver suspend entry point
7666  * @pdev:		PCI device structure
7667  * @state:		PCI power state to suspend routine
7668  */
7669 static int
megasas_suspend(struct pci_dev * pdev,pm_message_t state)7670 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7671 {
7672 	struct megasas_instance *instance;
7673 
7674 	instance = pci_get_drvdata(pdev);
7675 
7676 	if (!instance)
7677 		return 0;
7678 
7679 	instance->unload = 1;
7680 
7681 	dev_info(&pdev->dev, "%s is called\n", __func__);
7682 
7683 	/* Shutdown SR-IOV heartbeat timer */
7684 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7685 		del_timer_sync(&instance->sriov_heartbeat_timer);
7686 
7687 	/* Stop the FW fault detection watchdog */
7688 	if (instance->adapter_type != MFI_SERIES)
7689 		megasas_fusion_stop_watchdog(instance);
7690 
7691 	megasas_flush_cache(instance);
7692 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7693 
7694 	/* cancel the delayed work if this work still in queue */
7695 	if (instance->ev != NULL) {
7696 		struct megasas_aen_event *ev = instance->ev;
7697 		cancel_delayed_work_sync(&ev->hotplug_work);
7698 		instance->ev = NULL;
7699 	}
7700 
7701 	tasklet_kill(&instance->isr_tasklet);
7702 
7703 	pci_set_drvdata(instance->pdev, instance);
7704 	instance->instancet->disable_intr(instance);
7705 
7706 	megasas_destroy_irqs(instance);
7707 
7708 	if (instance->msix_vectors)
7709 		pci_free_irq_vectors(instance->pdev);
7710 
7711 	pci_save_state(pdev);
7712 	pci_disable_device(pdev);
7713 
7714 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7715 
7716 	return 0;
7717 }
7718 
7719 /**
7720  * megasas_resume-      driver resume entry point
7721  * @pdev:               PCI device structure
7722  */
7723 static int
megasas_resume(struct pci_dev * pdev)7724 megasas_resume(struct pci_dev *pdev)
7725 {
7726 	int rval;
7727 	struct Scsi_Host *host;
7728 	struct megasas_instance *instance;
7729 	u32 status_reg;
7730 
7731 	instance = pci_get_drvdata(pdev);
7732 
7733 	if (!instance)
7734 		return 0;
7735 
7736 	host = instance->host;
7737 	pci_set_power_state(pdev, PCI_D0);
7738 	pci_enable_wake(pdev, PCI_D0, 0);
7739 	pci_restore_state(pdev);
7740 
7741 	dev_info(&pdev->dev, "%s is called\n", __func__);
7742 	/*
7743 	 * PCI prepping: enable device set bus mastering and dma mask
7744 	 */
7745 	rval = pci_enable_device_mem(pdev);
7746 
7747 	if (rval) {
7748 		dev_err(&pdev->dev, "Enable device failed\n");
7749 		return rval;
7750 	}
7751 
7752 	pci_set_master(pdev);
7753 
7754 	/*
7755 	 * We expect the FW state to be READY
7756 	 */
7757 
7758 	if (megasas_transition_to_ready(instance, 0)) {
7759 		dev_info(&instance->pdev->dev,
7760 			 "Failed to transition controller to ready from %s!\n",
7761 			 __func__);
7762 		if (instance->adapter_type != MFI_SERIES) {
7763 			status_reg =
7764 				instance->instancet->read_fw_status_reg(instance);
7765 			if (!(status_reg & MFI_RESET_ADAPTER) ||
7766 				((megasas_adp_reset_wait_for_ready
7767 				(instance, true, 0)) == FAILED))
7768 				goto fail_ready_state;
7769 		} else {
7770 			atomic_set(&instance->fw_reset_no_pci_access, 1);
7771 			instance->instancet->adp_reset
7772 				(instance, instance->reg_set);
7773 			atomic_set(&instance->fw_reset_no_pci_access, 0);
7774 
7775 			/* waiting for about 30 seconds before retry */
7776 			ssleep(30);
7777 
7778 			if (megasas_transition_to_ready(instance, 0))
7779 				goto fail_ready_state;
7780 		}
7781 
7782 		dev_info(&instance->pdev->dev,
7783 			 "FW restarted successfully from %s!\n",
7784 			 __func__);
7785 	}
7786 	if (megasas_set_dma_mask(instance))
7787 		goto fail_set_dma_mask;
7788 
7789 	/*
7790 	 * Initialize MFI Firmware
7791 	 */
7792 
7793 	atomic_set(&instance->fw_outstanding, 0);
7794 	atomic_set(&instance->ldio_outstanding, 0);
7795 
7796 	/* Now re-enable MSI-X */
7797 	if (instance->msix_vectors)
7798 		megasas_alloc_irq_vectors(instance);
7799 
7800 	if (!instance->msix_vectors) {
7801 		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7802 					     PCI_IRQ_LEGACY);
7803 		if (rval < 0)
7804 			goto fail_reenable_msix;
7805 	}
7806 
7807 	megasas_setup_reply_map(instance);
7808 
7809 	if (instance->adapter_type != MFI_SERIES) {
7810 		megasas_reset_reply_desc(instance);
7811 		if (megasas_ioc_init_fusion(instance)) {
7812 			megasas_free_cmds(instance);
7813 			megasas_free_cmds_fusion(instance);
7814 			goto fail_init_mfi;
7815 		}
7816 		if (!megasas_get_map_info(instance))
7817 			megasas_sync_map_info(instance);
7818 	} else {
7819 		*instance->producer = 0;
7820 		*instance->consumer = 0;
7821 		if (megasas_issue_init_mfi(instance))
7822 			goto fail_init_mfi;
7823 	}
7824 
7825 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7826 		goto fail_init_mfi;
7827 
7828 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7829 		     (unsigned long)instance);
7830 
7831 	if (instance->msix_vectors ?
7832 			megasas_setup_irqs_msix(instance, 0) :
7833 			megasas_setup_irqs_ioapic(instance))
7834 		goto fail_init_mfi;
7835 
7836 	if (instance->adapter_type != MFI_SERIES)
7837 		megasas_setup_irq_poll(instance);
7838 
7839 	/* Re-launch SR-IOV heartbeat timer */
7840 	if (instance->requestorId) {
7841 		if (!megasas_sriov_start_heartbeat(instance, 0))
7842 			megasas_start_timer(instance);
7843 		else {
7844 			instance->skip_heartbeat_timer_del = 1;
7845 			goto fail_init_mfi;
7846 		}
7847 	}
7848 
7849 	instance->instancet->enable_intr(instance);
7850 	megasas_setup_jbod_map(instance);
7851 	instance->unload = 0;
7852 
7853 	/*
7854 	 * Initiate AEN (Asynchronous Event Notification)
7855 	 */
7856 	if (megasas_start_aen(instance))
7857 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7858 
7859 	/* Re-launch FW fault watchdog */
7860 	if (instance->adapter_type != MFI_SERIES)
7861 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7862 			goto fail_start_watchdog;
7863 
7864 	return 0;
7865 
7866 fail_start_watchdog:
7867 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7868 		del_timer_sync(&instance->sriov_heartbeat_timer);
7869 fail_init_mfi:
7870 	megasas_free_ctrl_dma_buffers(instance);
7871 	megasas_free_ctrl_mem(instance);
7872 	scsi_host_put(host);
7873 
7874 fail_reenable_msix:
7875 fail_set_dma_mask:
7876 fail_ready_state:
7877 
7878 	pci_disable_device(pdev);
7879 
7880 	return -ENODEV;
7881 }
7882 #else
7883 #define megasas_suspend	NULL
7884 #define megasas_resume	NULL
7885 #endif
7886 
7887 static inline int
megasas_wait_for_adapter_operational(struct megasas_instance * instance)7888 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7889 {
7890 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7891 	int i;
7892 	u8 adp_state;
7893 
7894 	for (i = 0; i < wait_time; i++) {
7895 		adp_state = atomic_read(&instance->adprecovery);
7896 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7897 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7898 			break;
7899 
7900 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7901 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7902 
7903 		msleep(1000);
7904 	}
7905 
7906 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7907 		dev_info(&instance->pdev->dev,
7908 			 "%s HBA failed to become operational, adp_state %d\n",
7909 			 __func__, adp_state);
7910 		return 1;
7911 	}
7912 
7913 	return 0;
7914 }
7915 
7916 /**
7917  * megasas_detach_one -	PCI hot"un"plug entry point
7918  * @pdev:		PCI device structure
7919  */
megasas_detach_one(struct pci_dev * pdev)7920 static void megasas_detach_one(struct pci_dev *pdev)
7921 {
7922 	int i;
7923 	struct Scsi_Host *host;
7924 	struct megasas_instance *instance;
7925 	struct fusion_context *fusion;
7926 	u32 pd_seq_map_sz;
7927 
7928 	instance = pci_get_drvdata(pdev);
7929 
7930 	if (!instance)
7931 		return;
7932 
7933 	host = instance->host;
7934 	fusion = instance->ctrl_context;
7935 
7936 	/* Shutdown SR-IOV heartbeat timer */
7937 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7938 		del_timer_sync(&instance->sriov_heartbeat_timer);
7939 
7940 	/* Stop the FW fault detection watchdog */
7941 	if (instance->adapter_type != MFI_SERIES)
7942 		megasas_fusion_stop_watchdog(instance);
7943 
7944 	if (instance->fw_crash_state != UNAVAILABLE)
7945 		megasas_free_host_crash_buffer(instance);
7946 	scsi_remove_host(instance->host);
7947 	instance->unload = 1;
7948 
7949 	if (megasas_wait_for_adapter_operational(instance))
7950 		goto skip_firing_dcmds;
7951 
7952 	megasas_flush_cache(instance);
7953 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7954 
7955 skip_firing_dcmds:
7956 	/* cancel the delayed work if this work still in queue*/
7957 	if (instance->ev != NULL) {
7958 		struct megasas_aen_event *ev = instance->ev;
7959 		cancel_delayed_work_sync(&ev->hotplug_work);
7960 		instance->ev = NULL;
7961 	}
7962 
7963 	/* cancel all wait events */
7964 	wake_up_all(&instance->int_cmd_wait_q);
7965 
7966 	tasklet_kill(&instance->isr_tasklet);
7967 
7968 	/*
7969 	 * Take the instance off the instance array. Note that we will not
7970 	 * decrement the max_index. We let this array be sparse array
7971 	 */
7972 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7973 		if (megasas_mgmt_info.instance[i] == instance) {
7974 			megasas_mgmt_info.count--;
7975 			megasas_mgmt_info.instance[i] = NULL;
7976 
7977 			break;
7978 		}
7979 	}
7980 
7981 	instance->instancet->disable_intr(instance);
7982 
7983 	megasas_destroy_irqs(instance);
7984 
7985 	if (instance->msix_vectors)
7986 		pci_free_irq_vectors(instance->pdev);
7987 
7988 	if (instance->adapter_type >= VENTURA_SERIES) {
7989 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7990 			kfree(fusion->stream_detect_by_ld[i]);
7991 		kfree(fusion->stream_detect_by_ld);
7992 		fusion->stream_detect_by_ld = NULL;
7993 	}
7994 
7995 
7996 	if (instance->adapter_type != MFI_SERIES) {
7997 		megasas_release_fusion(instance);
7998 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7999 				(sizeof(struct MR_PD_CFG_SEQ) *
8000 					(MAX_PHYSICAL_DEVICES - 1));
8001 		for (i = 0; i < 2 ; i++) {
8002 			if (fusion->ld_map[i])
8003 				dma_free_coherent(&instance->pdev->dev,
8004 						  fusion->max_map_sz,
8005 						  fusion->ld_map[i],
8006 						  fusion->ld_map_phys[i]);
8007 			if (fusion->ld_drv_map[i]) {
8008 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
8009 					vfree(fusion->ld_drv_map[i]);
8010 				else
8011 					free_pages((ulong)fusion->ld_drv_map[i],
8012 						   fusion->drv_map_pages);
8013 			}
8014 
8015 			if (fusion->pd_seq_sync[i])
8016 				dma_free_coherent(&instance->pdev->dev,
8017 					pd_seq_map_sz,
8018 					fusion->pd_seq_sync[i],
8019 					fusion->pd_seq_phys[i]);
8020 		}
8021 	} else {
8022 		megasas_release_mfi(instance);
8023 	}
8024 
8025 	if (instance->vf_affiliation)
8026 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
8027 				    sizeof(struct MR_LD_VF_AFFILIATION),
8028 				    instance->vf_affiliation,
8029 				    instance->vf_affiliation_h);
8030 
8031 	if (instance->vf_affiliation_111)
8032 		dma_free_coherent(&pdev->dev,
8033 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
8034 				    instance->vf_affiliation_111,
8035 				    instance->vf_affiliation_111_h);
8036 
8037 	if (instance->hb_host_mem)
8038 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
8039 				    instance->hb_host_mem,
8040 				    instance->hb_host_mem_h);
8041 
8042 	megasas_free_ctrl_dma_buffers(instance);
8043 
8044 	megasas_free_ctrl_mem(instance);
8045 
8046 	megasas_destroy_debugfs(instance);
8047 
8048 	scsi_host_put(host);
8049 
8050 	pci_disable_device(pdev);
8051 }
8052 
8053 /**
8054  * megasas_shutdown -	Shutdown entry point
8055  * @pdev:		Generic device structure
8056  */
megasas_shutdown(struct pci_dev * pdev)8057 static void megasas_shutdown(struct pci_dev *pdev)
8058 {
8059 	struct megasas_instance *instance = pci_get_drvdata(pdev);
8060 
8061 	if (!instance)
8062 		return;
8063 
8064 	instance->unload = 1;
8065 
8066 	if (megasas_wait_for_adapter_operational(instance))
8067 		goto skip_firing_dcmds;
8068 
8069 	megasas_flush_cache(instance);
8070 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
8071 
8072 skip_firing_dcmds:
8073 	instance->instancet->disable_intr(instance);
8074 	megasas_destroy_irqs(instance);
8075 
8076 	if (instance->msix_vectors)
8077 		pci_free_irq_vectors(instance->pdev);
8078 }
8079 
8080 /*
8081  * megasas_mgmt_open -	char node "open" entry point
8082  * @inode:	char node inode
8083  * @filep:	char node file
8084  */
megasas_mgmt_open(struct inode * inode,struct file * filep)8085 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
8086 {
8087 	/*
8088 	 * Allow only those users with admin rights
8089 	 */
8090 	if (!capable(CAP_SYS_ADMIN))
8091 		return -EACCES;
8092 
8093 	return 0;
8094 }
8095 
8096 /*
8097  * megasas_mgmt_fasync -	Async notifier registration from applications
8098  * @fd:		char node file descriptor number
8099  * @filep:	char node file
8100  * @mode:	notifier on/off
8101  *
8102  * This function adds the calling process to a driver global queue. When an
8103  * event occurs, SIGIO will be sent to all processes in this queue.
8104  */
megasas_mgmt_fasync(int fd,struct file * filep,int mode)8105 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
8106 {
8107 	int rc;
8108 
8109 	mutex_lock(&megasas_async_queue_mutex);
8110 
8111 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
8112 
8113 	mutex_unlock(&megasas_async_queue_mutex);
8114 
8115 	if (rc >= 0) {
8116 		/* For sanity check when we get ioctl */
8117 		filep->private_data = filep;
8118 		return 0;
8119 	}
8120 
8121 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
8122 
8123 	return rc;
8124 }
8125 
8126 /*
8127  * megasas_mgmt_poll -  char node "poll" entry point
8128  * @filep:	char node file
8129  * @wait:	Events to poll for
8130  */
megasas_mgmt_poll(struct file * file,poll_table * wait)8131 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8132 {
8133 	__poll_t mask;
8134 	unsigned long flags;
8135 
8136 	poll_wait(file, &megasas_poll_wait, wait);
8137 	spin_lock_irqsave(&poll_aen_lock, flags);
8138 	if (megasas_poll_wait_aen)
8139 		mask = (EPOLLIN | EPOLLRDNORM);
8140 	else
8141 		mask = 0;
8142 	megasas_poll_wait_aen = 0;
8143 	spin_unlock_irqrestore(&poll_aen_lock, flags);
8144 	return mask;
8145 }
8146 
8147 /*
8148  * megasas_set_crash_dump_params_ioctl:
8149  *		Send CRASH_DUMP_MODE DCMD to all controllers
8150  * @cmd:	MFI command frame
8151  */
8152 
megasas_set_crash_dump_params_ioctl(struct megasas_cmd * cmd)8153 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8154 {
8155 	struct megasas_instance *local_instance;
8156 	int i, error = 0;
8157 	int crash_support;
8158 
8159 	crash_support = cmd->frame->dcmd.mbox.w[0];
8160 
8161 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8162 		local_instance = megasas_mgmt_info.instance[i];
8163 		if (local_instance && local_instance->crash_dump_drv_support) {
8164 			if ((atomic_read(&local_instance->adprecovery) ==
8165 				MEGASAS_HBA_OPERATIONAL) &&
8166 				!megasas_set_crash_dump_params(local_instance,
8167 					crash_support)) {
8168 				local_instance->crash_dump_app_support =
8169 					crash_support;
8170 				dev_info(&local_instance->pdev->dev,
8171 					"Application firmware crash "
8172 					"dump mode set success\n");
8173 				error = 0;
8174 			} else {
8175 				dev_info(&local_instance->pdev->dev,
8176 					"Application firmware crash "
8177 					"dump mode set failed\n");
8178 				error = -1;
8179 			}
8180 		}
8181 	}
8182 	return error;
8183 }
8184 
8185 /**
8186  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8187  * @instance:			Adapter soft state
8188  * @user_ioc:			User's ioctl packet
8189  * @ioc:			ioctl packet
8190  */
8191 static int
megasas_mgmt_fw_ioctl(struct megasas_instance * instance,struct megasas_iocpacket __user * user_ioc,struct megasas_iocpacket * ioc)8192 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8193 		      struct megasas_iocpacket __user * user_ioc,
8194 		      struct megasas_iocpacket *ioc)
8195 {
8196 	struct megasas_sge64 *kern_sge64 = NULL;
8197 	struct megasas_sge32 *kern_sge32 = NULL;
8198 	struct megasas_cmd *cmd;
8199 	void *kbuff_arr[MAX_IOCTL_SGE];
8200 	dma_addr_t buf_handle = 0;
8201 	int error = 0, i;
8202 	void *sense = NULL;
8203 	dma_addr_t sense_handle;
8204 	void *sense_ptr;
8205 	u32 opcode = 0;
8206 	int ret = DCMD_SUCCESS;
8207 
8208 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8209 
8210 	if (ioc->sge_count > MAX_IOCTL_SGE) {
8211 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8212 		       ioc->sge_count, MAX_IOCTL_SGE);
8213 		return -EINVAL;
8214 	}
8215 
8216 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8217 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8218 	    !instance->support_nvme_passthru) ||
8219 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8220 	    !instance->support_pci_lane_margining)) {
8221 		dev_err(&instance->pdev->dev,
8222 			"Received invalid ioctl command 0x%x\n",
8223 			ioc->frame.hdr.cmd);
8224 		return -ENOTSUPP;
8225 	}
8226 
8227 	cmd = megasas_get_cmd(instance);
8228 	if (!cmd) {
8229 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8230 		return -ENOMEM;
8231 	}
8232 
8233 	/*
8234 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8235 	 * frames into our cmd's frames. cmd->frame's context will get
8236 	 * overwritten when we copy from user's frames. So set that value
8237 	 * alone separately
8238 	 */
8239 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8240 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8241 	cmd->frame->hdr.pad_0 = 0;
8242 
8243 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8244 
8245 	if (instance->consistent_mask_64bit)
8246 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8247 				       MFI_FRAME_SENSE64));
8248 	else
8249 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8250 					       MFI_FRAME_SENSE64));
8251 
8252 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8253 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8254 
8255 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8256 		mutex_lock(&instance->reset_mutex);
8257 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8258 			megasas_return_cmd(instance, cmd);
8259 			mutex_unlock(&instance->reset_mutex);
8260 			return -1;
8261 		}
8262 		mutex_unlock(&instance->reset_mutex);
8263 	}
8264 
8265 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8266 		error = megasas_set_crash_dump_params_ioctl(cmd);
8267 		megasas_return_cmd(instance, cmd);
8268 		return error;
8269 	}
8270 
8271 	/*
8272 	 * The management interface between applications and the fw uses
8273 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8274 	 * etc are accomplishes through different kinds of MFI frames. The
8275 	 * driver needs to care only about substituting user buffers with
8276 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8277 	 * struct iocpacket itself.
8278 	 */
8279 	if (instance->consistent_mask_64bit)
8280 		kern_sge64 = (struct megasas_sge64 *)
8281 			((unsigned long)cmd->frame + ioc->sgl_off);
8282 	else
8283 		kern_sge32 = (struct megasas_sge32 *)
8284 			((unsigned long)cmd->frame + ioc->sgl_off);
8285 
8286 	/*
8287 	 * For each user buffer, create a mirror buffer and copy in
8288 	 */
8289 	for (i = 0; i < ioc->sge_count; i++) {
8290 		if (!ioc->sgl[i].iov_len)
8291 			continue;
8292 
8293 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8294 						    ioc->sgl[i].iov_len,
8295 						    &buf_handle, GFP_KERNEL);
8296 		if (!kbuff_arr[i]) {
8297 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8298 			       "kernel SGL buffer for IOCTL\n");
8299 			error = -ENOMEM;
8300 			goto out;
8301 		}
8302 
8303 		/*
8304 		 * We don't change the dma_coherent_mask, so
8305 		 * dma_alloc_coherent only returns 32bit addresses
8306 		 */
8307 		if (instance->consistent_mask_64bit) {
8308 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8309 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8310 		} else {
8311 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8312 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8313 		}
8314 
8315 		/*
8316 		 * We created a kernel buffer corresponding to the
8317 		 * user buffer. Now copy in from the user buffer
8318 		 */
8319 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8320 				   (u32) (ioc->sgl[i].iov_len))) {
8321 			error = -EFAULT;
8322 			goto out;
8323 		}
8324 	}
8325 
8326 	if (ioc->sense_len) {
8327 		/* make sure the pointer is part of the frame */
8328 		if (ioc->sense_off >
8329 		    (sizeof(union megasas_frame) - sizeof(__le64))) {
8330 			error = -EINVAL;
8331 			goto out;
8332 		}
8333 
8334 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8335 					     &sense_handle, GFP_KERNEL);
8336 		if (!sense) {
8337 			error = -ENOMEM;
8338 			goto out;
8339 		}
8340 
8341 		/* always store 64 bits regardless of addressing */
8342 		sense_ptr = (void *)cmd->frame + ioc->sense_off;
8343 		put_unaligned_le64(sense_handle, sense_ptr);
8344 	}
8345 
8346 	/*
8347 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8348 	 * cmd to the SCSI mid-layer
8349 	 */
8350 	cmd->sync_cmd = 1;
8351 
8352 	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8353 	switch (ret) {
8354 	case DCMD_INIT:
8355 	case DCMD_BUSY:
8356 		cmd->sync_cmd = 0;
8357 		dev_err(&instance->pdev->dev,
8358 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8359 			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8360 			 cmd->cmd_status_drv);
8361 		error = -EBUSY;
8362 		goto out;
8363 	}
8364 
8365 	cmd->sync_cmd = 0;
8366 
8367 	if (instance->unload == 1) {
8368 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8369 			"don't submit data to application\n");
8370 		goto out;
8371 	}
8372 	/*
8373 	 * copy out the kernel buffers to user buffers
8374 	 */
8375 	for (i = 0; i < ioc->sge_count; i++) {
8376 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8377 				 ioc->sgl[i].iov_len)) {
8378 			error = -EFAULT;
8379 			goto out;
8380 		}
8381 	}
8382 
8383 	/*
8384 	 * copy out the sense
8385 	 */
8386 	if (ioc->sense_len) {
8387 		/*
8388 		 * sense_ptr points to the location that has the user
8389 		 * sense buffer address
8390 		 */
8391 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8392 				ioc->sense_off);
8393 
8394 		if (copy_to_user((void __user *)((unsigned long)
8395 				 get_unaligned((unsigned long *)sense_ptr)),
8396 				 sense, ioc->sense_len)) {
8397 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8398 					"sense data\n");
8399 			error = -EFAULT;
8400 			goto out;
8401 		}
8402 	}
8403 
8404 	/*
8405 	 * copy the status codes returned by the fw
8406 	 */
8407 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8408 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8409 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8410 		error = -EFAULT;
8411 	}
8412 
8413 out:
8414 	if (sense) {
8415 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8416 				    sense, sense_handle);
8417 	}
8418 
8419 	for (i = 0; i < ioc->sge_count; i++) {
8420 		if (kbuff_arr[i]) {
8421 			if (instance->consistent_mask_64bit)
8422 				dma_free_coherent(&instance->pdev->dev,
8423 					le32_to_cpu(kern_sge64[i].length),
8424 					kbuff_arr[i],
8425 					le64_to_cpu(kern_sge64[i].phys_addr));
8426 			else
8427 				dma_free_coherent(&instance->pdev->dev,
8428 					le32_to_cpu(kern_sge32[i].length),
8429 					kbuff_arr[i],
8430 					le32_to_cpu(kern_sge32[i].phys_addr));
8431 			kbuff_arr[i] = NULL;
8432 		}
8433 	}
8434 
8435 	megasas_return_cmd(instance, cmd);
8436 	return error;
8437 }
8438 
megasas_mgmt_ioctl_fw(struct file * file,unsigned long arg)8439 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8440 {
8441 	struct megasas_iocpacket __user *user_ioc =
8442 	    (struct megasas_iocpacket __user *)arg;
8443 	struct megasas_iocpacket *ioc;
8444 	struct megasas_instance *instance;
8445 	int error;
8446 
8447 	ioc = memdup_user(user_ioc, sizeof(*ioc));
8448 	if (IS_ERR(ioc))
8449 		return PTR_ERR(ioc);
8450 
8451 	instance = megasas_lookup_instance(ioc->host_no);
8452 	if (!instance) {
8453 		error = -ENODEV;
8454 		goto out_kfree_ioc;
8455 	}
8456 
8457 	/* Block ioctls in VF mode */
8458 	if (instance->requestorId && !allow_vf_ioctls) {
8459 		error = -ENODEV;
8460 		goto out_kfree_ioc;
8461 	}
8462 
8463 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8464 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8465 		error = -ENODEV;
8466 		goto out_kfree_ioc;
8467 	}
8468 
8469 	if (instance->unload == 1) {
8470 		error = -ENODEV;
8471 		goto out_kfree_ioc;
8472 	}
8473 
8474 	if (down_interruptible(&instance->ioctl_sem)) {
8475 		error = -ERESTARTSYS;
8476 		goto out_kfree_ioc;
8477 	}
8478 
8479 	if  (megasas_wait_for_adapter_operational(instance)) {
8480 		error = -ENODEV;
8481 		goto out_up;
8482 	}
8483 
8484 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8485 out_up:
8486 	up(&instance->ioctl_sem);
8487 
8488 out_kfree_ioc:
8489 	kfree(ioc);
8490 	return error;
8491 }
8492 
megasas_mgmt_ioctl_aen(struct file * file,unsigned long arg)8493 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8494 {
8495 	struct megasas_instance *instance;
8496 	struct megasas_aen aen;
8497 	int error;
8498 
8499 	if (file->private_data != file) {
8500 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8501 		       "called first\n");
8502 		return -EINVAL;
8503 	}
8504 
8505 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8506 		return -EFAULT;
8507 
8508 	instance = megasas_lookup_instance(aen.host_no);
8509 
8510 	if (!instance)
8511 		return -ENODEV;
8512 
8513 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8514 		return -ENODEV;
8515 	}
8516 
8517 	if (instance->unload == 1) {
8518 		return -ENODEV;
8519 	}
8520 
8521 	if  (megasas_wait_for_adapter_operational(instance))
8522 		return -ENODEV;
8523 
8524 	mutex_lock(&instance->reset_mutex);
8525 	error = megasas_register_aen(instance, aen.seq_num,
8526 				     aen.class_locale_word);
8527 	mutex_unlock(&instance->reset_mutex);
8528 	return error;
8529 }
8530 
8531 /**
8532  * megasas_mgmt_ioctl -	char node ioctl entry point
8533  * @file:	char device file pointer
8534  * @cmd:	ioctl command
8535  * @arg:	ioctl command arguments address
8536  */
8537 static long
megasas_mgmt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8538 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8539 {
8540 	switch (cmd) {
8541 	case MEGASAS_IOC_FIRMWARE:
8542 		return megasas_mgmt_ioctl_fw(file, arg);
8543 
8544 	case MEGASAS_IOC_GET_AEN:
8545 		return megasas_mgmt_ioctl_aen(file, arg);
8546 	}
8547 
8548 	return -ENOTTY;
8549 }
8550 
8551 #ifdef CONFIG_COMPAT
megasas_mgmt_compat_ioctl_fw(struct file * file,unsigned long arg)8552 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8553 {
8554 	struct compat_megasas_iocpacket __user *cioc =
8555 	    (struct compat_megasas_iocpacket __user *)arg;
8556 	struct megasas_iocpacket __user *ioc =
8557 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8558 	int i;
8559 	int error = 0;
8560 	compat_uptr_t ptr;
8561 	u32 local_sense_off;
8562 	u32 local_sense_len;
8563 	u32 user_sense_off;
8564 
8565 	if (clear_user(ioc, sizeof(*ioc)))
8566 		return -EFAULT;
8567 
8568 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8569 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8570 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8571 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8572 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8573 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8574 		return -EFAULT;
8575 
8576 	/*
8577 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8578 	 * sense_len is not null, so prepare the 64bit value under
8579 	 * the same condition.
8580 	 */
8581 	if (get_user(local_sense_off, &ioc->sense_off) ||
8582 		get_user(local_sense_len, &ioc->sense_len) ||
8583 		get_user(user_sense_off, &cioc->sense_off))
8584 		return -EFAULT;
8585 
8586 	if (local_sense_off != user_sense_off)
8587 		return -EINVAL;
8588 
8589 	if (local_sense_len) {
8590 		void __user **sense_ioc_ptr =
8591 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8592 		compat_uptr_t *sense_cioc_ptr =
8593 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8594 		if (get_user(ptr, sense_cioc_ptr) ||
8595 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8596 			return -EFAULT;
8597 	}
8598 
8599 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8600 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8601 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8602 		    copy_in_user(&ioc->sgl[i].iov_len,
8603 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8604 			return -EFAULT;
8605 	}
8606 
8607 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8608 
8609 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8610 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8611 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8612 		return -EFAULT;
8613 	}
8614 	return error;
8615 }
8616 
8617 static long
megasas_mgmt_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8618 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8619 			  unsigned long arg)
8620 {
8621 	switch (cmd) {
8622 	case MEGASAS_IOC_FIRMWARE32:
8623 		return megasas_mgmt_compat_ioctl_fw(file, arg);
8624 	case MEGASAS_IOC_GET_AEN:
8625 		return megasas_mgmt_ioctl_aen(file, arg);
8626 	}
8627 
8628 	return -ENOTTY;
8629 }
8630 #endif
8631 
8632 /*
8633  * File operations structure for management interface
8634  */
8635 static const struct file_operations megasas_mgmt_fops = {
8636 	.owner = THIS_MODULE,
8637 	.open = megasas_mgmt_open,
8638 	.fasync = megasas_mgmt_fasync,
8639 	.unlocked_ioctl = megasas_mgmt_ioctl,
8640 	.poll = megasas_mgmt_poll,
8641 #ifdef CONFIG_COMPAT
8642 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8643 #endif
8644 	.llseek = noop_llseek,
8645 };
8646 
8647 /*
8648  * PCI hotplug support registration structure
8649  */
8650 static struct pci_driver megasas_pci_driver = {
8651 
8652 	.name = "megaraid_sas",
8653 	.id_table = megasas_pci_table,
8654 	.probe = megasas_probe_one,
8655 	.remove = megasas_detach_one,
8656 	.suspend = megasas_suspend,
8657 	.resume = megasas_resume,
8658 	.shutdown = megasas_shutdown,
8659 };
8660 
8661 /*
8662  * Sysfs driver attributes
8663  */
version_show(struct device_driver * dd,char * buf)8664 static ssize_t version_show(struct device_driver *dd, char *buf)
8665 {
8666 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8667 			MEGASAS_VERSION);
8668 }
8669 static DRIVER_ATTR_RO(version);
8670 
release_date_show(struct device_driver * dd,char * buf)8671 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8672 {
8673 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8674 		MEGASAS_RELDATE);
8675 }
8676 static DRIVER_ATTR_RO(release_date);
8677 
support_poll_for_event_show(struct device_driver * dd,char * buf)8678 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8679 {
8680 	return sprintf(buf, "%u\n", support_poll_for_event);
8681 }
8682 static DRIVER_ATTR_RO(support_poll_for_event);
8683 
support_device_change_show(struct device_driver * dd,char * buf)8684 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8685 {
8686 	return sprintf(buf, "%u\n", support_device_change);
8687 }
8688 static DRIVER_ATTR_RO(support_device_change);
8689 
dbg_lvl_show(struct device_driver * dd,char * buf)8690 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8691 {
8692 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8693 }
8694 
dbg_lvl_store(struct device_driver * dd,const char * buf,size_t count)8695 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8696 			     size_t count)
8697 {
8698 	int retval = count;
8699 
8700 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8701 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8702 		retval = -EINVAL;
8703 	}
8704 	return retval;
8705 }
8706 static DRIVER_ATTR_RW(dbg_lvl);
8707 
8708 static ssize_t
support_nvme_encapsulation_show(struct device_driver * dd,char * buf)8709 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8710 {
8711 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8712 }
8713 
8714 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8715 
8716 static ssize_t
support_pci_lane_margining_show(struct device_driver * dd,char * buf)8717 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8718 {
8719 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8720 }
8721 
8722 static DRIVER_ATTR_RO(support_pci_lane_margining);
8723 
megasas_remove_scsi_device(struct scsi_device * sdev)8724 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8725 {
8726 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8727 	scsi_remove_device(sdev);
8728 	scsi_device_put(sdev);
8729 }
8730 
8731 /**
8732  * megasas_update_device_list -	Update the PD and LD device list from FW
8733  *				after an AEN event notification
8734  * @instance:			Adapter soft state
8735  * @event_type:			Indicates type of event (PD or LD event)
8736  *
8737  * @return:			Success or failure
8738  *
8739  * Issue DCMDs to Firmware to update the internal device list in driver.
8740  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8741  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8742  */
8743 static
megasas_update_device_list(struct megasas_instance * instance,int event_type)8744 int megasas_update_device_list(struct megasas_instance *instance,
8745 			       int event_type)
8746 {
8747 	int dcmd_ret = DCMD_SUCCESS;
8748 
8749 	if (instance->enable_fw_dev_list) {
8750 		dcmd_ret = megasas_host_device_list_query(instance, false);
8751 		if (dcmd_ret != DCMD_SUCCESS)
8752 			goto out;
8753 	} else {
8754 		if (event_type & SCAN_PD_CHANNEL) {
8755 			dcmd_ret = megasas_get_pd_list(instance);
8756 
8757 			if (dcmd_ret != DCMD_SUCCESS)
8758 				goto out;
8759 		}
8760 
8761 		if (event_type & SCAN_VD_CHANNEL) {
8762 			if (!instance->requestorId ||
8763 			    (instance->requestorId &&
8764 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8765 				dcmd_ret = megasas_ld_list_query(instance,
8766 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8767 				if (dcmd_ret != DCMD_SUCCESS)
8768 					goto out;
8769 			}
8770 		}
8771 	}
8772 
8773 out:
8774 	return dcmd_ret;
8775 }
8776 
8777 /**
8778  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8779  *				after an AEN event notification
8780  * @instance:			Adapter soft state
8781  * @scan_type:			Indicates type of devices (PD/LD) to add
8782  * @return			void
8783  */
8784 static
megasas_add_remove_devices(struct megasas_instance * instance,int scan_type)8785 void megasas_add_remove_devices(struct megasas_instance *instance,
8786 				int scan_type)
8787 {
8788 	int i, j;
8789 	u16 pd_index = 0;
8790 	u16 ld_index = 0;
8791 	u16 channel = 0, id = 0;
8792 	struct Scsi_Host *host;
8793 	struct scsi_device *sdev1;
8794 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8795 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8796 
8797 	host = instance->host;
8798 
8799 	if (instance->enable_fw_dev_list) {
8800 		targetid_list = instance->host_device_list_buf;
8801 		for (i = 0; i < targetid_list->count; i++) {
8802 			targetid_entry = &targetid_list->host_device_list[i];
8803 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8804 				channel = le16_to_cpu(targetid_entry->target_id) /
8805 						MEGASAS_MAX_DEV_PER_CHANNEL;
8806 				id = le16_to_cpu(targetid_entry->target_id) %
8807 						MEGASAS_MAX_DEV_PER_CHANNEL;
8808 			} else {
8809 				channel = MEGASAS_MAX_PD_CHANNELS +
8810 					  (le16_to_cpu(targetid_entry->target_id) /
8811 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8812 				id = le16_to_cpu(targetid_entry->target_id) %
8813 						MEGASAS_MAX_DEV_PER_CHANNEL;
8814 			}
8815 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8816 			if (!sdev1) {
8817 				scsi_add_device(host, channel, id, 0);
8818 			} else {
8819 				scsi_device_put(sdev1);
8820 			}
8821 		}
8822 	}
8823 
8824 	if (scan_type & SCAN_PD_CHANNEL) {
8825 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8826 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8827 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8828 				sdev1 = scsi_device_lookup(host, i, j, 0);
8829 				if (instance->pd_list[pd_index].driveState ==
8830 							MR_PD_STATE_SYSTEM) {
8831 					if (!sdev1)
8832 						scsi_add_device(host, i, j, 0);
8833 					else
8834 						scsi_device_put(sdev1);
8835 				} else {
8836 					if (sdev1)
8837 						megasas_remove_scsi_device(sdev1);
8838 				}
8839 			}
8840 		}
8841 	}
8842 
8843 	if (scan_type & SCAN_VD_CHANNEL) {
8844 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8845 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8846 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8847 				sdev1 = scsi_device_lookup(host,
8848 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8849 				if (instance->ld_ids[ld_index] != 0xff) {
8850 					if (!sdev1)
8851 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8852 					else
8853 						scsi_device_put(sdev1);
8854 				} else {
8855 					if (sdev1)
8856 						megasas_remove_scsi_device(sdev1);
8857 				}
8858 			}
8859 		}
8860 	}
8861 
8862 }
8863 
8864 static void
megasas_aen_polling(struct work_struct * work)8865 megasas_aen_polling(struct work_struct *work)
8866 {
8867 	struct megasas_aen_event *ev =
8868 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8869 	struct megasas_instance *instance = ev->instance;
8870 	union megasas_evt_class_locale class_locale;
8871 	int event_type = 0;
8872 	u32 seq_num;
8873 	u16 ld_target_id;
8874 	int error;
8875 	u8  dcmd_ret = DCMD_SUCCESS;
8876 	struct scsi_device *sdev1;
8877 
8878 	if (!instance) {
8879 		printk(KERN_ERR "invalid instance!\n");
8880 		kfree(ev);
8881 		return;
8882 	}
8883 
8884 	/* Don't run the event workqueue thread if OCR is running */
8885 	mutex_lock(&instance->reset_mutex);
8886 
8887 	instance->ev = NULL;
8888 	if (instance->evt_detail) {
8889 		megasas_decode_evt(instance);
8890 
8891 		switch (le32_to_cpu(instance->evt_detail->code)) {
8892 
8893 		case MR_EVT_PD_INSERTED:
8894 		case MR_EVT_PD_REMOVED:
8895 			event_type = SCAN_PD_CHANNEL;
8896 			break;
8897 
8898 		case MR_EVT_LD_OFFLINE:
8899 		case MR_EVT_LD_DELETED:
8900 			ld_target_id = instance->evt_detail->args.ld.target_id;
8901 			sdev1 = scsi_device_lookup(instance->host,
8902 						   MEGASAS_MAX_PD_CHANNELS +
8903 						   (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
8904 						   (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
8905 						   0);
8906 			if (sdev1)
8907 				megasas_remove_scsi_device(sdev1);
8908 
8909 			event_type = SCAN_VD_CHANNEL;
8910 			break;
8911 		case MR_EVT_LD_CREATED:
8912 			event_type = SCAN_VD_CHANNEL;
8913 			break;
8914 
8915 		case MR_EVT_CFG_CLEARED:
8916 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8917 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8918 		case MR_EVT_LD_STATE_CHANGE:
8919 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8920 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8921 				instance->host->host_no);
8922 			break;
8923 
8924 		case MR_EVT_CTRL_PROP_CHANGED:
8925 			dcmd_ret = megasas_get_ctrl_info(instance);
8926 			if (dcmd_ret == DCMD_SUCCESS &&
8927 			    instance->snapdump_wait_time) {
8928 				megasas_get_snapdump_properties(instance);
8929 				dev_info(&instance->pdev->dev,
8930 					 "Snap dump wait time\t: %d\n",
8931 					 instance->snapdump_wait_time);
8932 			}
8933 			break;
8934 		default:
8935 			event_type = 0;
8936 			break;
8937 		}
8938 	} else {
8939 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8940 		mutex_unlock(&instance->reset_mutex);
8941 		kfree(ev);
8942 		return;
8943 	}
8944 
8945 	if (event_type)
8946 		dcmd_ret = megasas_update_device_list(instance, event_type);
8947 
8948 	mutex_unlock(&instance->reset_mutex);
8949 
8950 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8951 		megasas_add_remove_devices(instance, event_type);
8952 
8953 	if (dcmd_ret == DCMD_SUCCESS)
8954 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8955 	else
8956 		seq_num = instance->last_seq_num;
8957 
8958 	/* Register AEN with FW for latest sequence number plus 1 */
8959 	class_locale.members.reserved = 0;
8960 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8961 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8962 
8963 	if (instance->aen_cmd != NULL) {
8964 		kfree(ev);
8965 		return;
8966 	}
8967 
8968 	mutex_lock(&instance->reset_mutex);
8969 	error = megasas_register_aen(instance, seq_num,
8970 					class_locale.word);
8971 	if (error)
8972 		dev_err(&instance->pdev->dev,
8973 			"register aen failed error %x\n", error);
8974 
8975 	mutex_unlock(&instance->reset_mutex);
8976 	kfree(ev);
8977 }
8978 
8979 /**
8980  * megasas_init - Driver load entry point
8981  */
megasas_init(void)8982 static int __init megasas_init(void)
8983 {
8984 	int rval;
8985 
8986 	/*
8987 	 * Booted in kdump kernel, minimize memory footprints by
8988 	 * disabling few features
8989 	 */
8990 	if (reset_devices) {
8991 		msix_vectors = 1;
8992 		rdpq_enable = 0;
8993 		dual_qdepth_disable = 1;
8994 	}
8995 
8996 	/*
8997 	 * Announce driver version and other information
8998 	 */
8999 	pr_info("megasas: %s\n", MEGASAS_VERSION);
9000 
9001 	spin_lock_init(&poll_aen_lock);
9002 
9003 	support_poll_for_event = 2;
9004 	support_device_change = 1;
9005 	support_nvme_encapsulation = true;
9006 	support_pci_lane_margining = true;
9007 
9008 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
9009 
9010 	/*
9011 	 * Register character device node
9012 	 */
9013 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
9014 
9015 	if (rval < 0) {
9016 		printk(KERN_DEBUG "megasas: failed to open device node\n");
9017 		return rval;
9018 	}
9019 
9020 	megasas_mgmt_majorno = rval;
9021 
9022 	megasas_init_debugfs();
9023 
9024 	/*
9025 	 * Register ourselves as PCI hotplug module
9026 	 */
9027 	rval = pci_register_driver(&megasas_pci_driver);
9028 
9029 	if (rval) {
9030 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
9031 		goto err_pcidrv;
9032 	}
9033 
9034 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
9035 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
9036 		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
9037 		event_log_level = MFI_EVT_CLASS_CRITICAL;
9038 	}
9039 
9040 	rval = driver_create_file(&megasas_pci_driver.driver,
9041 				  &driver_attr_version);
9042 	if (rval)
9043 		goto err_dcf_attr_ver;
9044 
9045 	rval = driver_create_file(&megasas_pci_driver.driver,
9046 				  &driver_attr_release_date);
9047 	if (rval)
9048 		goto err_dcf_rel_date;
9049 
9050 	rval = driver_create_file(&megasas_pci_driver.driver,
9051 				&driver_attr_support_poll_for_event);
9052 	if (rval)
9053 		goto err_dcf_support_poll_for_event;
9054 
9055 	rval = driver_create_file(&megasas_pci_driver.driver,
9056 				  &driver_attr_dbg_lvl);
9057 	if (rval)
9058 		goto err_dcf_dbg_lvl;
9059 	rval = driver_create_file(&megasas_pci_driver.driver,
9060 				&driver_attr_support_device_change);
9061 	if (rval)
9062 		goto err_dcf_support_device_change;
9063 
9064 	rval = driver_create_file(&megasas_pci_driver.driver,
9065 				  &driver_attr_support_nvme_encapsulation);
9066 	if (rval)
9067 		goto err_dcf_support_nvme_encapsulation;
9068 
9069 	rval = driver_create_file(&megasas_pci_driver.driver,
9070 				  &driver_attr_support_pci_lane_margining);
9071 	if (rval)
9072 		goto err_dcf_support_pci_lane_margining;
9073 
9074 	return rval;
9075 
9076 err_dcf_support_pci_lane_margining:
9077 	driver_remove_file(&megasas_pci_driver.driver,
9078 			   &driver_attr_support_nvme_encapsulation);
9079 
9080 err_dcf_support_nvme_encapsulation:
9081 	driver_remove_file(&megasas_pci_driver.driver,
9082 			   &driver_attr_support_device_change);
9083 
9084 err_dcf_support_device_change:
9085 	driver_remove_file(&megasas_pci_driver.driver,
9086 			   &driver_attr_dbg_lvl);
9087 err_dcf_dbg_lvl:
9088 	driver_remove_file(&megasas_pci_driver.driver,
9089 			&driver_attr_support_poll_for_event);
9090 err_dcf_support_poll_for_event:
9091 	driver_remove_file(&megasas_pci_driver.driver,
9092 			   &driver_attr_release_date);
9093 err_dcf_rel_date:
9094 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9095 err_dcf_attr_ver:
9096 	pci_unregister_driver(&megasas_pci_driver);
9097 err_pcidrv:
9098 	megasas_exit_debugfs();
9099 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9100 	return rval;
9101 }
9102 
9103 /**
9104  * megasas_exit - Driver unload entry point
9105  */
megasas_exit(void)9106 static void __exit megasas_exit(void)
9107 {
9108 	driver_remove_file(&megasas_pci_driver.driver,
9109 			   &driver_attr_dbg_lvl);
9110 	driver_remove_file(&megasas_pci_driver.driver,
9111 			&driver_attr_support_poll_for_event);
9112 	driver_remove_file(&megasas_pci_driver.driver,
9113 			&driver_attr_support_device_change);
9114 	driver_remove_file(&megasas_pci_driver.driver,
9115 			   &driver_attr_release_date);
9116 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9117 	driver_remove_file(&megasas_pci_driver.driver,
9118 			   &driver_attr_support_nvme_encapsulation);
9119 	driver_remove_file(&megasas_pci_driver.driver,
9120 			   &driver_attr_support_pci_lane_margining);
9121 
9122 	pci_unregister_driver(&megasas_pci_driver);
9123 	megasas_exit_debugfs();
9124 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9125 }
9126 
9127 module_init(megasas_init);
9128 module_exit(megasas_exit);
9129