1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Authors: Avago Technologies
21 * Sreenivas Bagalkote
22 * Sumant Patro
23 * Bo Yang
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_tcq.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61
62 /*
63 * Number of sectors per IO command
64 * Will be set in megasas_init_mfi if user does not provide
65 */
66 static unsigned int max_sectors;
67 module_param_named(max_sectors, max_sectors, int, 0);
68 MODULE_PARM_DESC(max_sectors,
69 "Maximum number of sectors per IO command");
70
71 static int msix_disable;
72 module_param(msix_disable, int, S_IRUGO);
73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
74
75 static unsigned int msix_vectors;
76 module_param(msix_vectors, int, S_IRUGO);
77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
78
79 static int allow_vf_ioctls;
80 module_param(allow_vf_ioctls, int, S_IRUGO);
81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
82
83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
84 module_param(throttlequeuedepth, int, S_IRUGO);
85 MODULE_PARM_DESC(throttlequeuedepth,
86 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
87
88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89 module_param(resetwaittime, int, S_IRUGO);
90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
91 "before resetting adapter. Default: 180");
92
93 int smp_affinity_enable = 1;
94 module_param(smp_affinity_enable, int, S_IRUGO);
95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
96
97 int rdpq_enable = 1;
98 module_param(rdpq_enable, int, S_IRUGO);
99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
100
101 unsigned int dual_qdepth_disable;
102 module_param(dual_qdepth_disable, int, S_IRUGO);
103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
104
105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
106 module_param(scmd_timeout, int, S_IRUGO);
107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
108
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(MEGASAS_VERSION);
111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
113
114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115 static int megasas_get_pd_list(struct megasas_instance *instance);
116 static int megasas_ld_list_query(struct megasas_instance *instance,
117 u8 query_type);
118 static int megasas_issue_init_mfi(struct megasas_instance *instance);
119 static int megasas_register_aen(struct megasas_instance *instance,
120 u32 seq_num, u32 class_locale_word);
121 static void megasas_get_pd_info(struct megasas_instance *instance,
122 struct scsi_device *sdev);
123 static int megasas_get_target_prop(struct megasas_instance *instance,
124 struct scsi_device *sdev);
125 /*
126 * PCI ID table for all supported controllers
127 */
128 static struct pci_device_id megasas_pci_table[] = {
129
130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
131 /* xscale IOP */
132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
133 /* ppc IOP */
134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
135 /* ppc IOP */
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
137 /* gen2*/
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
139 /* gen2*/
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
141 /* skinny*/
142 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
143 /* skinny*/
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
145 /* xscale IOP, vega */
146 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
147 /* xscale IOP */
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
149 /* Fusion */
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
151 /* Plasma */
152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
153 /* Invader */
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
155 /* Fury */
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
157 /* Intruder */
158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
159 /* Intruder 24 port*/
160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
161 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
162 /* VENTURA */
163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
168 {}
169 };
170
171 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
172
173 static int megasas_mgmt_majorno;
174 struct megasas_mgmt_info megasas_mgmt_info;
175 static struct fasync_struct *megasas_async_queue;
176 static DEFINE_MUTEX(megasas_async_queue_mutex);
177
178 static int megasas_poll_wait_aen;
179 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
180 static u32 support_poll_for_event;
181 u32 megasas_dbg_lvl;
182 static u32 support_device_change;
183
184 /* define lock for aen poll */
185 spinlock_t poll_aen_lock;
186
187 void
188 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
189 u8 alt_status);
190 static u32
191 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
192 static int
193 megasas_adp_reset_gen2(struct megasas_instance *instance,
194 struct megasas_register_set __iomem *reg_set);
195 static irqreturn_t megasas_isr(int irq, void *devp);
196 static u32
197 megasas_init_adapter_mfi(struct megasas_instance *instance);
198 u32
199 megasas_build_and_issue_cmd(struct megasas_instance *instance,
200 struct scsi_cmnd *scmd);
201 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
202 int
203 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
204 int seconds);
205 void megasas_fusion_ocr_wq(struct work_struct *work);
206 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
207 int initial);
208
209 void
megasas_issue_dcmd(struct megasas_instance * instance,struct megasas_cmd * cmd)210 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
211 {
212 instance->instancet->fire_cmd(instance,
213 cmd->frame_phys_addr, 0, instance->reg_set);
214 return;
215 }
216
217 /**
218 * megasas_get_cmd - Get a command from the free pool
219 * @instance: Adapter soft state
220 *
221 * Returns a free command from the pool
222 */
megasas_get_cmd(struct megasas_instance * instance)223 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
224 *instance)
225 {
226 unsigned long flags;
227 struct megasas_cmd *cmd = NULL;
228
229 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
230
231 if (!list_empty(&instance->cmd_pool)) {
232 cmd = list_entry((&instance->cmd_pool)->next,
233 struct megasas_cmd, list);
234 list_del_init(&cmd->list);
235 } else {
236 dev_err(&instance->pdev->dev, "Command pool empty!\n");
237 }
238
239 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
240 return cmd;
241 }
242
243 /**
244 * megasas_return_cmd - Return a cmd to free command pool
245 * @instance: Adapter soft state
246 * @cmd: Command packet to be returned to free command pool
247 */
248 void
megasas_return_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)249 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
250 {
251 unsigned long flags;
252 u32 blk_tags;
253 struct megasas_cmd_fusion *cmd_fusion;
254 struct fusion_context *fusion = instance->ctrl_context;
255
256 /* This flag is used only for fusion adapter.
257 * Wait for Interrupt for Polled mode DCMD
258 */
259 if (cmd->flags & DRV_DCMD_POLLED_MODE)
260 return;
261
262 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
263
264 if (fusion) {
265 blk_tags = instance->max_scsi_cmds + cmd->index;
266 cmd_fusion = fusion->cmd_list[blk_tags];
267 megasas_return_cmd_fusion(instance, cmd_fusion);
268 }
269 cmd->scmd = NULL;
270 cmd->frame_count = 0;
271 cmd->flags = 0;
272 memset(cmd->frame, 0, instance->mfi_frame_size);
273 cmd->frame->io.context = cpu_to_le32(cmd->index);
274 if (!fusion && reset_devices)
275 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
276 list_add(&cmd->list, (&instance->cmd_pool)->next);
277
278 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
279
280 }
281
282 static const char *
format_timestamp(uint32_t timestamp)283 format_timestamp(uint32_t timestamp)
284 {
285 static char buffer[32];
286
287 if ((timestamp & 0xff000000) == 0xff000000)
288 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
289 0x00ffffff);
290 else
291 snprintf(buffer, sizeof(buffer), "%us", timestamp);
292 return buffer;
293 }
294
295 static const char *
format_class(int8_t class)296 format_class(int8_t class)
297 {
298 static char buffer[6];
299
300 switch (class) {
301 case MFI_EVT_CLASS_DEBUG:
302 return "debug";
303 case MFI_EVT_CLASS_PROGRESS:
304 return "progress";
305 case MFI_EVT_CLASS_INFO:
306 return "info";
307 case MFI_EVT_CLASS_WARNING:
308 return "WARN";
309 case MFI_EVT_CLASS_CRITICAL:
310 return "CRIT";
311 case MFI_EVT_CLASS_FATAL:
312 return "FATAL";
313 case MFI_EVT_CLASS_DEAD:
314 return "DEAD";
315 default:
316 snprintf(buffer, sizeof(buffer), "%d", class);
317 return buffer;
318 }
319 }
320
321 /**
322 * megasas_decode_evt: Decode FW AEN event and print critical event
323 * for information.
324 * @instance: Adapter soft state
325 */
326 static void
megasas_decode_evt(struct megasas_instance * instance)327 megasas_decode_evt(struct megasas_instance *instance)
328 {
329 struct megasas_evt_detail *evt_detail = instance->evt_detail;
330 union megasas_evt_class_locale class_locale;
331 class_locale.word = le32_to_cpu(evt_detail->cl.word);
332
333 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
334 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
335 le32_to_cpu(evt_detail->seq_num),
336 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
337 (class_locale.members.locale),
338 format_class(class_locale.members.class),
339 evt_detail->description);
340 }
341
342 /**
343 * The following functions are defined for xscale
344 * (deviceid : 1064R, PERC5) controllers
345 */
346
347 /**
348 * megasas_enable_intr_xscale - Enables interrupts
349 * @regs: MFI register set
350 */
351 static inline void
megasas_enable_intr_xscale(struct megasas_instance * instance)352 megasas_enable_intr_xscale(struct megasas_instance *instance)
353 {
354 struct megasas_register_set __iomem *regs;
355
356 regs = instance->reg_set;
357 writel(0, &(regs)->outbound_intr_mask);
358
359 /* Dummy readl to force pci flush */
360 readl(®s->outbound_intr_mask);
361 }
362
363 /**
364 * megasas_disable_intr_xscale -Disables interrupt
365 * @regs: MFI register set
366 */
367 static inline void
megasas_disable_intr_xscale(struct megasas_instance * instance)368 megasas_disable_intr_xscale(struct megasas_instance *instance)
369 {
370 struct megasas_register_set __iomem *regs;
371 u32 mask = 0x1f;
372
373 regs = instance->reg_set;
374 writel(mask, ®s->outbound_intr_mask);
375 /* Dummy readl to force pci flush */
376 readl(®s->outbound_intr_mask);
377 }
378
379 /**
380 * megasas_read_fw_status_reg_xscale - returns the current FW status value
381 * @regs: MFI register set
382 */
383 static u32
megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)384 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
385 {
386 return readl(&(regs)->outbound_msg_0);
387 }
388 /**
389 * megasas_clear_interrupt_xscale - Check & clear interrupt
390 * @regs: MFI register set
391 */
392 static int
megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)393 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
394 {
395 u32 status;
396 u32 mfiStatus = 0;
397
398 /*
399 * Check if it is our interrupt
400 */
401 status = readl(®s->outbound_intr_status);
402
403 if (status & MFI_OB_INTR_STATUS_MASK)
404 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
405 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
406 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
407
408 /*
409 * Clear the interrupt by writing back the same value
410 */
411 if (mfiStatus)
412 writel(status, ®s->outbound_intr_status);
413
414 /* Dummy readl to force pci flush */
415 readl(®s->outbound_intr_status);
416
417 return mfiStatus;
418 }
419
420 /**
421 * megasas_fire_cmd_xscale - Sends command to the FW
422 * @frame_phys_addr : Physical address of cmd
423 * @frame_count : Number of frames for the command
424 * @regs : MFI register set
425 */
426 static inline void
megasas_fire_cmd_xscale(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)427 megasas_fire_cmd_xscale(struct megasas_instance *instance,
428 dma_addr_t frame_phys_addr,
429 u32 frame_count,
430 struct megasas_register_set __iomem *regs)
431 {
432 unsigned long flags;
433
434 spin_lock_irqsave(&instance->hba_lock, flags);
435 writel((frame_phys_addr >> 3)|(frame_count),
436 &(regs)->inbound_queue_port);
437 spin_unlock_irqrestore(&instance->hba_lock, flags);
438 }
439
440 /**
441 * megasas_adp_reset_xscale - For controller reset
442 * @regs: MFI register set
443 */
444 static int
megasas_adp_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)445 megasas_adp_reset_xscale(struct megasas_instance *instance,
446 struct megasas_register_set __iomem *regs)
447 {
448 u32 i;
449 u32 pcidata;
450
451 writel(MFI_ADP_RESET, ®s->inbound_doorbell);
452
453 for (i = 0; i < 3; i++)
454 msleep(1000); /* sleep for 3 secs */
455 pcidata = 0;
456 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
457 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
458 if (pcidata & 0x2) {
459 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
460 pcidata &= ~0x2;
461 pci_write_config_dword(instance->pdev,
462 MFI_1068_PCSR_OFFSET, pcidata);
463
464 for (i = 0; i < 2; i++)
465 msleep(1000); /* need to wait 2 secs again */
466
467 pcidata = 0;
468 pci_read_config_dword(instance->pdev,
469 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
470 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
471 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
472 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
473 pcidata = 0;
474 pci_write_config_dword(instance->pdev,
475 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
476 }
477 }
478 return 0;
479 }
480
481 /**
482 * megasas_check_reset_xscale - For controller reset check
483 * @regs: MFI register set
484 */
485 static int
megasas_check_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)486 megasas_check_reset_xscale(struct megasas_instance *instance,
487 struct megasas_register_set __iomem *regs)
488 {
489 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
490 (le32_to_cpu(*instance->consumer) ==
491 MEGASAS_ADPRESET_INPROG_SIGN))
492 return 1;
493 return 0;
494 }
495
496 static struct megasas_instance_template megasas_instance_template_xscale = {
497
498 .fire_cmd = megasas_fire_cmd_xscale,
499 .enable_intr = megasas_enable_intr_xscale,
500 .disable_intr = megasas_disable_intr_xscale,
501 .clear_intr = megasas_clear_intr_xscale,
502 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
503 .adp_reset = megasas_adp_reset_xscale,
504 .check_reset = megasas_check_reset_xscale,
505 .service_isr = megasas_isr,
506 .tasklet = megasas_complete_cmd_dpc,
507 .init_adapter = megasas_init_adapter_mfi,
508 .build_and_issue_cmd = megasas_build_and_issue_cmd,
509 .issue_dcmd = megasas_issue_dcmd,
510 };
511
512 /**
513 * This is the end of set of functions & definitions specific
514 * to xscale (deviceid : 1064R, PERC5) controllers
515 */
516
517 /**
518 * The following functions are defined for ppc (deviceid : 0x60)
519 * controllers
520 */
521
522 /**
523 * megasas_enable_intr_ppc - Enables interrupts
524 * @regs: MFI register set
525 */
526 static inline void
megasas_enable_intr_ppc(struct megasas_instance * instance)527 megasas_enable_intr_ppc(struct megasas_instance *instance)
528 {
529 struct megasas_register_set __iomem *regs;
530
531 regs = instance->reg_set;
532 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
533
534 writel(~0x80000000, &(regs)->outbound_intr_mask);
535
536 /* Dummy readl to force pci flush */
537 readl(®s->outbound_intr_mask);
538 }
539
540 /**
541 * megasas_disable_intr_ppc - Disable interrupt
542 * @regs: MFI register set
543 */
544 static inline void
megasas_disable_intr_ppc(struct megasas_instance * instance)545 megasas_disable_intr_ppc(struct megasas_instance *instance)
546 {
547 struct megasas_register_set __iomem *regs;
548 u32 mask = 0xFFFFFFFF;
549
550 regs = instance->reg_set;
551 writel(mask, ®s->outbound_intr_mask);
552 /* Dummy readl to force pci flush */
553 readl(®s->outbound_intr_mask);
554 }
555
556 /**
557 * megasas_read_fw_status_reg_ppc - returns the current FW status value
558 * @regs: MFI register set
559 */
560 static u32
megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)561 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
562 {
563 return readl(&(regs)->outbound_scratch_pad);
564 }
565
566 /**
567 * megasas_clear_interrupt_ppc - Check & clear interrupt
568 * @regs: MFI register set
569 */
570 static int
megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)571 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
572 {
573 u32 status, mfiStatus = 0;
574
575 /*
576 * Check if it is our interrupt
577 */
578 status = readl(®s->outbound_intr_status);
579
580 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
581 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
582
583 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
584 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
585
586 /*
587 * Clear the interrupt by writing back the same value
588 */
589 writel(status, ®s->outbound_doorbell_clear);
590
591 /* Dummy readl to force pci flush */
592 readl(®s->outbound_doorbell_clear);
593
594 return mfiStatus;
595 }
596
597 /**
598 * megasas_fire_cmd_ppc - Sends command to the FW
599 * @frame_phys_addr : Physical address of cmd
600 * @frame_count : Number of frames for the command
601 * @regs : MFI register set
602 */
603 static inline void
megasas_fire_cmd_ppc(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)604 megasas_fire_cmd_ppc(struct megasas_instance *instance,
605 dma_addr_t frame_phys_addr,
606 u32 frame_count,
607 struct megasas_register_set __iomem *regs)
608 {
609 unsigned long flags;
610
611 spin_lock_irqsave(&instance->hba_lock, flags);
612 writel((frame_phys_addr | (frame_count<<1))|1,
613 &(regs)->inbound_queue_port);
614 spin_unlock_irqrestore(&instance->hba_lock, flags);
615 }
616
617 /**
618 * megasas_check_reset_ppc - For controller reset check
619 * @regs: MFI register set
620 */
621 static int
megasas_check_reset_ppc(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)622 megasas_check_reset_ppc(struct megasas_instance *instance,
623 struct megasas_register_set __iomem *regs)
624 {
625 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
626 return 1;
627
628 return 0;
629 }
630
631 static struct megasas_instance_template megasas_instance_template_ppc = {
632
633 .fire_cmd = megasas_fire_cmd_ppc,
634 .enable_intr = megasas_enable_intr_ppc,
635 .disable_intr = megasas_disable_intr_ppc,
636 .clear_intr = megasas_clear_intr_ppc,
637 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
638 .adp_reset = megasas_adp_reset_xscale,
639 .check_reset = megasas_check_reset_ppc,
640 .service_isr = megasas_isr,
641 .tasklet = megasas_complete_cmd_dpc,
642 .init_adapter = megasas_init_adapter_mfi,
643 .build_and_issue_cmd = megasas_build_and_issue_cmd,
644 .issue_dcmd = megasas_issue_dcmd,
645 };
646
647 /**
648 * megasas_enable_intr_skinny - Enables interrupts
649 * @regs: MFI register set
650 */
651 static inline void
megasas_enable_intr_skinny(struct megasas_instance * instance)652 megasas_enable_intr_skinny(struct megasas_instance *instance)
653 {
654 struct megasas_register_set __iomem *regs;
655
656 regs = instance->reg_set;
657 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
658
659 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
660
661 /* Dummy readl to force pci flush */
662 readl(®s->outbound_intr_mask);
663 }
664
665 /**
666 * megasas_disable_intr_skinny - Disables interrupt
667 * @regs: MFI register set
668 */
669 static inline void
megasas_disable_intr_skinny(struct megasas_instance * instance)670 megasas_disable_intr_skinny(struct megasas_instance *instance)
671 {
672 struct megasas_register_set __iomem *regs;
673 u32 mask = 0xFFFFFFFF;
674
675 regs = instance->reg_set;
676 writel(mask, ®s->outbound_intr_mask);
677 /* Dummy readl to force pci flush */
678 readl(®s->outbound_intr_mask);
679 }
680
681 /**
682 * megasas_read_fw_status_reg_skinny - returns the current FW status value
683 * @regs: MFI register set
684 */
685 static u32
megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem * regs)686 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
687 {
688 return readl(&(regs)->outbound_scratch_pad);
689 }
690
691 /**
692 * megasas_clear_interrupt_skinny - Check & clear interrupt
693 * @regs: MFI register set
694 */
695 static int
megasas_clear_intr_skinny(struct megasas_register_set __iomem * regs)696 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
697 {
698 u32 status;
699 u32 mfiStatus = 0;
700
701 /*
702 * Check if it is our interrupt
703 */
704 status = readl(®s->outbound_intr_status);
705
706 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
707 return 0;
708 }
709
710 /*
711 * Check if it is our interrupt
712 */
713 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
714 MFI_STATE_FAULT) {
715 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
716 } else
717 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
718
719 /*
720 * Clear the interrupt by writing back the same value
721 */
722 writel(status, ®s->outbound_intr_status);
723
724 /*
725 * dummy read to flush PCI
726 */
727 readl(®s->outbound_intr_status);
728
729 return mfiStatus;
730 }
731
732 /**
733 * megasas_fire_cmd_skinny - Sends command to the FW
734 * @frame_phys_addr : Physical address of cmd
735 * @frame_count : Number of frames for the command
736 * @regs : MFI register set
737 */
738 static inline void
megasas_fire_cmd_skinny(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)739 megasas_fire_cmd_skinny(struct megasas_instance *instance,
740 dma_addr_t frame_phys_addr,
741 u32 frame_count,
742 struct megasas_register_set __iomem *regs)
743 {
744 unsigned long flags;
745
746 spin_lock_irqsave(&instance->hba_lock, flags);
747 writel(upper_32_bits(frame_phys_addr),
748 &(regs)->inbound_high_queue_port);
749 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
750 &(regs)->inbound_low_queue_port);
751 mmiowb();
752 spin_unlock_irqrestore(&instance->hba_lock, flags);
753 }
754
755 /**
756 * megasas_check_reset_skinny - For controller reset check
757 * @regs: MFI register set
758 */
759 static int
megasas_check_reset_skinny(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)760 megasas_check_reset_skinny(struct megasas_instance *instance,
761 struct megasas_register_set __iomem *regs)
762 {
763 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
764 return 1;
765
766 return 0;
767 }
768
769 static struct megasas_instance_template megasas_instance_template_skinny = {
770
771 .fire_cmd = megasas_fire_cmd_skinny,
772 .enable_intr = megasas_enable_intr_skinny,
773 .disable_intr = megasas_disable_intr_skinny,
774 .clear_intr = megasas_clear_intr_skinny,
775 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
776 .adp_reset = megasas_adp_reset_gen2,
777 .check_reset = megasas_check_reset_skinny,
778 .service_isr = megasas_isr,
779 .tasklet = megasas_complete_cmd_dpc,
780 .init_adapter = megasas_init_adapter_mfi,
781 .build_and_issue_cmd = megasas_build_and_issue_cmd,
782 .issue_dcmd = megasas_issue_dcmd,
783 };
784
785
786 /**
787 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
788 * controllers
789 */
790
791 /**
792 * megasas_enable_intr_gen2 - Enables interrupts
793 * @regs: MFI register set
794 */
795 static inline void
megasas_enable_intr_gen2(struct megasas_instance * instance)796 megasas_enable_intr_gen2(struct megasas_instance *instance)
797 {
798 struct megasas_register_set __iomem *regs;
799
800 regs = instance->reg_set;
801 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
802
803 /* write ~0x00000005 (4 & 1) to the intr mask*/
804 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
805
806 /* Dummy readl to force pci flush */
807 readl(®s->outbound_intr_mask);
808 }
809
810 /**
811 * megasas_disable_intr_gen2 - Disables interrupt
812 * @regs: MFI register set
813 */
814 static inline void
megasas_disable_intr_gen2(struct megasas_instance * instance)815 megasas_disable_intr_gen2(struct megasas_instance *instance)
816 {
817 struct megasas_register_set __iomem *regs;
818 u32 mask = 0xFFFFFFFF;
819
820 regs = instance->reg_set;
821 writel(mask, ®s->outbound_intr_mask);
822 /* Dummy readl to force pci flush */
823 readl(®s->outbound_intr_mask);
824 }
825
826 /**
827 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
828 * @regs: MFI register set
829 */
830 static u32
megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem * regs)831 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
832 {
833 return readl(&(regs)->outbound_scratch_pad);
834 }
835
836 /**
837 * megasas_clear_interrupt_gen2 - Check & clear interrupt
838 * @regs: MFI register set
839 */
840 static int
megasas_clear_intr_gen2(struct megasas_register_set __iomem * regs)841 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
842 {
843 u32 status;
844 u32 mfiStatus = 0;
845
846 /*
847 * Check if it is our interrupt
848 */
849 status = readl(®s->outbound_intr_status);
850
851 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
852 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
853 }
854 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
855 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
856 }
857
858 /*
859 * Clear the interrupt by writing back the same value
860 */
861 if (mfiStatus)
862 writel(status, ®s->outbound_doorbell_clear);
863
864 /* Dummy readl to force pci flush */
865 readl(®s->outbound_intr_status);
866
867 return mfiStatus;
868 }
869 /**
870 * megasas_fire_cmd_gen2 - Sends command to the FW
871 * @frame_phys_addr : Physical address of cmd
872 * @frame_count : Number of frames for the command
873 * @regs : MFI register set
874 */
875 static inline void
megasas_fire_cmd_gen2(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)876 megasas_fire_cmd_gen2(struct megasas_instance *instance,
877 dma_addr_t frame_phys_addr,
878 u32 frame_count,
879 struct megasas_register_set __iomem *regs)
880 {
881 unsigned long flags;
882
883 spin_lock_irqsave(&instance->hba_lock, flags);
884 writel((frame_phys_addr | (frame_count<<1))|1,
885 &(regs)->inbound_queue_port);
886 spin_unlock_irqrestore(&instance->hba_lock, flags);
887 }
888
889 /**
890 * megasas_adp_reset_gen2 - For controller reset
891 * @regs: MFI register set
892 */
893 static int
megasas_adp_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * reg_set)894 megasas_adp_reset_gen2(struct megasas_instance *instance,
895 struct megasas_register_set __iomem *reg_set)
896 {
897 u32 retry = 0 ;
898 u32 HostDiag;
899 u32 __iomem *seq_offset = ®_set->seq_offset;
900 u32 __iomem *hostdiag_offset = ®_set->host_diag;
901
902 if (instance->instancet == &megasas_instance_template_skinny) {
903 seq_offset = ®_set->fusion_seq_offset;
904 hostdiag_offset = ®_set->fusion_host_diag;
905 }
906
907 writel(0, seq_offset);
908 writel(4, seq_offset);
909 writel(0xb, seq_offset);
910 writel(2, seq_offset);
911 writel(7, seq_offset);
912 writel(0xd, seq_offset);
913
914 msleep(1000);
915
916 HostDiag = (u32)readl(hostdiag_offset);
917
918 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
919 msleep(100);
920 HostDiag = (u32)readl(hostdiag_offset);
921 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
922 retry, HostDiag);
923
924 if (retry++ >= 100)
925 return 1;
926
927 }
928
929 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
930
931 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
932
933 ssleep(10);
934
935 HostDiag = (u32)readl(hostdiag_offset);
936 while (HostDiag & DIAG_RESET_ADAPTER) {
937 msleep(100);
938 HostDiag = (u32)readl(hostdiag_offset);
939 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
940 retry, HostDiag);
941
942 if (retry++ >= 1000)
943 return 1;
944
945 }
946 return 0;
947 }
948
949 /**
950 * megasas_check_reset_gen2 - For controller reset check
951 * @regs: MFI register set
952 */
953 static int
megasas_check_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)954 megasas_check_reset_gen2(struct megasas_instance *instance,
955 struct megasas_register_set __iomem *regs)
956 {
957 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
958 return 1;
959
960 return 0;
961 }
962
963 static struct megasas_instance_template megasas_instance_template_gen2 = {
964
965 .fire_cmd = megasas_fire_cmd_gen2,
966 .enable_intr = megasas_enable_intr_gen2,
967 .disable_intr = megasas_disable_intr_gen2,
968 .clear_intr = megasas_clear_intr_gen2,
969 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
970 .adp_reset = megasas_adp_reset_gen2,
971 .check_reset = megasas_check_reset_gen2,
972 .service_isr = megasas_isr,
973 .tasklet = megasas_complete_cmd_dpc,
974 .init_adapter = megasas_init_adapter_mfi,
975 .build_and_issue_cmd = megasas_build_and_issue_cmd,
976 .issue_dcmd = megasas_issue_dcmd,
977 };
978
979 /**
980 * This is the end of set of functions & definitions
981 * specific to gen2 (deviceid : 0x78, 0x79) controllers
982 */
983
984 /*
985 * Template added for TB (Fusion)
986 */
987 extern struct megasas_instance_template megasas_instance_template_fusion;
988
989 /**
990 * megasas_issue_polled - Issues a polling command
991 * @instance: Adapter soft state
992 * @cmd: Command packet to be issued
993 *
994 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
995 */
996 int
megasas_issue_polled(struct megasas_instance * instance,struct megasas_cmd * cmd)997 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
998 {
999 struct megasas_header *frame_hdr = &cmd->frame->hdr;
1000
1001 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1002 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1003
1004 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1005 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1006 __func__, __LINE__);
1007 return DCMD_NOT_FIRED;
1008 }
1009
1010 instance->instancet->issue_dcmd(instance, cmd);
1011
1012 return wait_and_poll(instance, cmd, instance->requestorId ?
1013 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1014 }
1015
1016 /**
1017 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1018 * @instance: Adapter soft state
1019 * @cmd: Command to be issued
1020 * @timeout: Timeout in seconds
1021 *
1022 * This function waits on an event for the command to be returned from ISR.
1023 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1024 * Used to issue ioctl commands.
1025 */
1026 int
megasas_issue_blocked_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,int timeout)1027 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1028 struct megasas_cmd *cmd, int timeout)
1029 {
1030 int ret = 0;
1031 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1032
1033 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1034 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1035 __func__, __LINE__);
1036 return DCMD_NOT_FIRED;
1037 }
1038
1039 instance->instancet->issue_dcmd(instance, cmd);
1040
1041 if (timeout) {
1042 ret = wait_event_timeout(instance->int_cmd_wait_q,
1043 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1044 if (!ret) {
1045 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1046 __func__, __LINE__);
1047 return DCMD_TIMEOUT;
1048 }
1049 } else
1050 wait_event(instance->int_cmd_wait_q,
1051 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1052
1053 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1054 DCMD_SUCCESS : DCMD_FAILED;
1055 }
1056
1057 /**
1058 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1059 * @instance: Adapter soft state
1060 * @cmd_to_abort: Previously issued cmd to be aborted
1061 * @timeout: Timeout in seconds
1062 *
1063 * MFI firmware can abort previously issued AEN comamnd (automatic event
1064 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1065 * cmd and waits for return status.
1066 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1067 */
1068 static int
megasas_issue_blocked_abort_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort,int timeout)1069 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1070 struct megasas_cmd *cmd_to_abort, int timeout)
1071 {
1072 struct megasas_cmd *cmd;
1073 struct megasas_abort_frame *abort_fr;
1074 int ret = 0;
1075
1076 cmd = megasas_get_cmd(instance);
1077
1078 if (!cmd)
1079 return -1;
1080
1081 abort_fr = &cmd->frame->abort;
1082
1083 /*
1084 * Prepare and issue the abort frame
1085 */
1086 abort_fr->cmd = MFI_CMD_ABORT;
1087 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1088 abort_fr->flags = cpu_to_le16(0);
1089 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1090 abort_fr->abort_mfi_phys_addr_lo =
1091 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1092 abort_fr->abort_mfi_phys_addr_hi =
1093 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1094
1095 cmd->sync_cmd = 1;
1096 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1097
1098 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1099 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1100 __func__, __LINE__);
1101 return DCMD_NOT_FIRED;
1102 }
1103
1104 instance->instancet->issue_dcmd(instance, cmd);
1105
1106 if (timeout) {
1107 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1108 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1109 if (!ret) {
1110 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1111 __func__, __LINE__);
1112 return DCMD_TIMEOUT;
1113 }
1114 } else
1115 wait_event(instance->abort_cmd_wait_q,
1116 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1117
1118 cmd->sync_cmd = 0;
1119
1120 megasas_return_cmd(instance, cmd);
1121 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1122 DCMD_SUCCESS : DCMD_FAILED;
1123 }
1124
1125 /**
1126 * megasas_make_sgl32 - Prepares 32-bit SGL
1127 * @instance: Adapter soft state
1128 * @scp: SCSI command from the mid-layer
1129 * @mfi_sgl: SGL to be filled in
1130 *
1131 * If successful, this function returns the number of SG elements. Otherwise,
1132 * it returnes -1.
1133 */
1134 static int
megasas_make_sgl32(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1135 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1136 union megasas_sgl *mfi_sgl)
1137 {
1138 int i;
1139 int sge_count;
1140 struct scatterlist *os_sgl;
1141
1142 sge_count = scsi_dma_map(scp);
1143 BUG_ON(sge_count < 0);
1144
1145 if (sge_count) {
1146 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1147 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1148 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1149 }
1150 }
1151 return sge_count;
1152 }
1153
1154 /**
1155 * megasas_make_sgl64 - Prepares 64-bit SGL
1156 * @instance: Adapter soft state
1157 * @scp: SCSI command from the mid-layer
1158 * @mfi_sgl: SGL to be filled in
1159 *
1160 * If successful, this function returns the number of SG elements. Otherwise,
1161 * it returnes -1.
1162 */
1163 static int
megasas_make_sgl64(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1164 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1165 union megasas_sgl *mfi_sgl)
1166 {
1167 int i;
1168 int sge_count;
1169 struct scatterlist *os_sgl;
1170
1171 sge_count = scsi_dma_map(scp);
1172 BUG_ON(sge_count < 0);
1173
1174 if (sge_count) {
1175 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1176 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1177 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1178 }
1179 }
1180 return sge_count;
1181 }
1182
1183 /**
1184 * megasas_make_sgl_skinny - Prepares IEEE SGL
1185 * @instance: Adapter soft state
1186 * @scp: SCSI command from the mid-layer
1187 * @mfi_sgl: SGL to be filled in
1188 *
1189 * If successful, this function returns the number of SG elements. Otherwise,
1190 * it returnes -1.
1191 */
1192 static int
megasas_make_sgl_skinny(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1193 megasas_make_sgl_skinny(struct megasas_instance *instance,
1194 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1195 {
1196 int i;
1197 int sge_count;
1198 struct scatterlist *os_sgl;
1199
1200 sge_count = scsi_dma_map(scp);
1201
1202 if (sge_count) {
1203 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1204 mfi_sgl->sge_skinny[i].length =
1205 cpu_to_le32(sg_dma_len(os_sgl));
1206 mfi_sgl->sge_skinny[i].phys_addr =
1207 cpu_to_le64(sg_dma_address(os_sgl));
1208 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1209 }
1210 }
1211 return sge_count;
1212 }
1213
1214 /**
1215 * megasas_get_frame_count - Computes the number of frames
1216 * @frame_type : type of frame- io or pthru frame
1217 * @sge_count : number of sg elements
1218 *
1219 * Returns the number of frames required for numnber of sge's (sge_count)
1220 */
1221
megasas_get_frame_count(struct megasas_instance * instance,u8 sge_count,u8 frame_type)1222 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1223 u8 sge_count, u8 frame_type)
1224 {
1225 int num_cnt;
1226 int sge_bytes;
1227 u32 sge_sz;
1228 u32 frame_count = 0;
1229
1230 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1231 sizeof(struct megasas_sge32);
1232
1233 if (instance->flag_ieee) {
1234 sge_sz = sizeof(struct megasas_sge_skinny);
1235 }
1236
1237 /*
1238 * Main frame can contain 2 SGEs for 64-bit SGLs and
1239 * 3 SGEs for 32-bit SGLs for ldio &
1240 * 1 SGEs for 64-bit SGLs and
1241 * 2 SGEs for 32-bit SGLs for pthru frame
1242 */
1243 if (unlikely(frame_type == PTHRU_FRAME)) {
1244 if (instance->flag_ieee == 1) {
1245 num_cnt = sge_count - 1;
1246 } else if (IS_DMA64)
1247 num_cnt = sge_count - 1;
1248 else
1249 num_cnt = sge_count - 2;
1250 } else {
1251 if (instance->flag_ieee == 1) {
1252 num_cnt = sge_count - 1;
1253 } else if (IS_DMA64)
1254 num_cnt = sge_count - 2;
1255 else
1256 num_cnt = sge_count - 3;
1257 }
1258
1259 if (num_cnt > 0) {
1260 sge_bytes = sge_sz * num_cnt;
1261
1262 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1263 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1264 }
1265 /* Main frame */
1266 frame_count += 1;
1267
1268 if (frame_count > 7)
1269 frame_count = 8;
1270 return frame_count;
1271 }
1272
1273 /**
1274 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1275 * @instance: Adapter soft state
1276 * @scp: SCSI command
1277 * @cmd: Command to be prepared in
1278 *
1279 * This function prepares CDB commands. These are typcially pass-through
1280 * commands to the devices.
1281 */
1282 static int
megasas_build_dcdb(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1283 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1284 struct megasas_cmd *cmd)
1285 {
1286 u32 is_logical;
1287 u32 device_id;
1288 u16 flags = 0;
1289 struct megasas_pthru_frame *pthru;
1290
1291 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1292 device_id = MEGASAS_DEV_INDEX(scp);
1293 pthru = (struct megasas_pthru_frame *)cmd->frame;
1294
1295 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1296 flags = MFI_FRAME_DIR_WRITE;
1297 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1298 flags = MFI_FRAME_DIR_READ;
1299 else if (scp->sc_data_direction == PCI_DMA_NONE)
1300 flags = MFI_FRAME_DIR_NONE;
1301
1302 if (instance->flag_ieee == 1) {
1303 flags |= MFI_FRAME_IEEE;
1304 }
1305
1306 /*
1307 * Prepare the DCDB frame
1308 */
1309 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1310 pthru->cmd_status = 0x0;
1311 pthru->scsi_status = 0x0;
1312 pthru->target_id = device_id;
1313 pthru->lun = scp->device->lun;
1314 pthru->cdb_len = scp->cmd_len;
1315 pthru->timeout = 0;
1316 pthru->pad_0 = 0;
1317 pthru->flags = cpu_to_le16(flags);
1318 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1319
1320 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1321
1322 /*
1323 * If the command is for the tape device, set the
1324 * pthru timeout to the os layer timeout value.
1325 */
1326 if (scp->device->type == TYPE_TAPE) {
1327 if ((scp->request->timeout / HZ) > 0xFFFF)
1328 pthru->timeout = cpu_to_le16(0xFFFF);
1329 else
1330 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1331 }
1332
1333 /*
1334 * Construct SGL
1335 */
1336 if (instance->flag_ieee == 1) {
1337 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1338 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1339 &pthru->sgl);
1340 } else if (IS_DMA64) {
1341 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1342 pthru->sge_count = megasas_make_sgl64(instance, scp,
1343 &pthru->sgl);
1344 } else
1345 pthru->sge_count = megasas_make_sgl32(instance, scp,
1346 &pthru->sgl);
1347
1348 if (pthru->sge_count > instance->max_num_sge) {
1349 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1350 pthru->sge_count);
1351 return 0;
1352 }
1353
1354 /*
1355 * Sense info specific
1356 */
1357 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1358 pthru->sense_buf_phys_addr_hi =
1359 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1360 pthru->sense_buf_phys_addr_lo =
1361 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1362
1363 /*
1364 * Compute the total number of frames this command consumes. FW uses
1365 * this number to pull sufficient number of frames from host memory.
1366 */
1367 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1368 PTHRU_FRAME);
1369
1370 return cmd->frame_count;
1371 }
1372
1373 /**
1374 * megasas_build_ldio - Prepares IOs to logical devices
1375 * @instance: Adapter soft state
1376 * @scp: SCSI command
1377 * @cmd: Command to be prepared
1378 *
1379 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1380 */
1381 static int
megasas_build_ldio(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1382 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1383 struct megasas_cmd *cmd)
1384 {
1385 u32 device_id;
1386 u8 sc = scp->cmnd[0];
1387 u16 flags = 0;
1388 struct megasas_io_frame *ldio;
1389
1390 device_id = MEGASAS_DEV_INDEX(scp);
1391 ldio = (struct megasas_io_frame *)cmd->frame;
1392
1393 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1394 flags = MFI_FRAME_DIR_WRITE;
1395 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1396 flags = MFI_FRAME_DIR_READ;
1397
1398 if (instance->flag_ieee == 1) {
1399 flags |= MFI_FRAME_IEEE;
1400 }
1401
1402 /*
1403 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1404 */
1405 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1406 ldio->cmd_status = 0x0;
1407 ldio->scsi_status = 0x0;
1408 ldio->target_id = device_id;
1409 ldio->timeout = 0;
1410 ldio->reserved_0 = 0;
1411 ldio->pad_0 = 0;
1412 ldio->flags = cpu_to_le16(flags);
1413 ldio->start_lba_hi = 0;
1414 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1415
1416 /*
1417 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1418 */
1419 if (scp->cmd_len == 6) {
1420 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1421 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1422 ((u32) scp->cmnd[2] << 8) |
1423 (u32) scp->cmnd[3]);
1424
1425 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1426 }
1427
1428 /*
1429 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1430 */
1431 else if (scp->cmd_len == 10) {
1432 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1433 ((u32) scp->cmnd[7] << 8));
1434 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1435 ((u32) scp->cmnd[3] << 16) |
1436 ((u32) scp->cmnd[4] << 8) |
1437 (u32) scp->cmnd[5]);
1438 }
1439
1440 /*
1441 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1442 */
1443 else if (scp->cmd_len == 12) {
1444 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1445 ((u32) scp->cmnd[7] << 16) |
1446 ((u32) scp->cmnd[8] << 8) |
1447 (u32) scp->cmnd[9]);
1448
1449 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1450 ((u32) scp->cmnd[3] << 16) |
1451 ((u32) scp->cmnd[4] << 8) |
1452 (u32) scp->cmnd[5]);
1453 }
1454
1455 /*
1456 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1457 */
1458 else if (scp->cmd_len == 16) {
1459 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1460 ((u32) scp->cmnd[11] << 16) |
1461 ((u32) scp->cmnd[12] << 8) |
1462 (u32) scp->cmnd[13]);
1463
1464 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1465 ((u32) scp->cmnd[7] << 16) |
1466 ((u32) scp->cmnd[8] << 8) |
1467 (u32) scp->cmnd[9]);
1468
1469 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1470 ((u32) scp->cmnd[3] << 16) |
1471 ((u32) scp->cmnd[4] << 8) |
1472 (u32) scp->cmnd[5]);
1473
1474 }
1475
1476 /*
1477 * Construct SGL
1478 */
1479 if (instance->flag_ieee) {
1480 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1481 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1482 &ldio->sgl);
1483 } else if (IS_DMA64) {
1484 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1485 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1486 } else
1487 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1488
1489 if (ldio->sge_count > instance->max_num_sge) {
1490 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1491 ldio->sge_count);
1492 return 0;
1493 }
1494
1495 /*
1496 * Sense info specific
1497 */
1498 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1499 ldio->sense_buf_phys_addr_hi = 0;
1500 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1501
1502 /*
1503 * Compute the total number of frames this command consumes. FW uses
1504 * this number to pull sufficient number of frames from host memory.
1505 */
1506 cmd->frame_count = megasas_get_frame_count(instance,
1507 ldio->sge_count, IO_FRAME);
1508
1509 return cmd->frame_count;
1510 }
1511
1512 /**
1513 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1514 * and whether it's RW or non RW
1515 * @scmd: SCSI command
1516 *
1517 */
megasas_cmd_type(struct scsi_cmnd * cmd)1518 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1519 {
1520 int ret;
1521
1522 switch (cmd->cmnd[0]) {
1523 case READ_10:
1524 case WRITE_10:
1525 case READ_12:
1526 case WRITE_12:
1527 case READ_6:
1528 case WRITE_6:
1529 case READ_16:
1530 case WRITE_16:
1531 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1532 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1533 break;
1534 default:
1535 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1536 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1537 }
1538 return ret;
1539 }
1540
1541 /**
1542 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1543 * in FW
1544 * @instance: Adapter soft state
1545 */
1546 static inline void
megasas_dump_pending_frames(struct megasas_instance * instance)1547 megasas_dump_pending_frames(struct megasas_instance *instance)
1548 {
1549 struct megasas_cmd *cmd;
1550 int i,n;
1551 union megasas_sgl *mfi_sgl;
1552 struct megasas_io_frame *ldio;
1553 struct megasas_pthru_frame *pthru;
1554 u32 sgcount;
1555 u16 max_cmd = instance->max_fw_cmds;
1556
1557 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1558 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1559 if (IS_DMA64)
1560 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1561 else
1562 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1563
1564 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1565 for (i = 0; i < max_cmd; i++) {
1566 cmd = instance->cmd_list[i];
1567 if (!cmd->scmd)
1568 continue;
1569 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1570 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1571 ldio = (struct megasas_io_frame *)cmd->frame;
1572 mfi_sgl = &ldio->sgl;
1573 sgcount = ldio->sge_count;
1574 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1575 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1576 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1577 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1578 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1579 } else {
1580 pthru = (struct megasas_pthru_frame *) cmd->frame;
1581 mfi_sgl = &pthru->sgl;
1582 sgcount = pthru->sge_count;
1583 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1584 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1585 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1586 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1587 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1588 }
1589 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1590 for (n = 0; n < sgcount; n++) {
1591 if (IS_DMA64)
1592 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1593 le32_to_cpu(mfi_sgl->sge64[n].length),
1594 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1595 else
1596 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1597 le32_to_cpu(mfi_sgl->sge32[n].length),
1598 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1599 }
1600 }
1601 } /*for max_cmd*/
1602 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1603 for (i = 0; i < max_cmd; i++) {
1604
1605 cmd = instance->cmd_list[i];
1606
1607 if (cmd->sync_cmd == 1)
1608 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1609 }
1610 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1611 }
1612
1613 u32
megasas_build_and_issue_cmd(struct megasas_instance * instance,struct scsi_cmnd * scmd)1614 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1615 struct scsi_cmnd *scmd)
1616 {
1617 struct megasas_cmd *cmd;
1618 u32 frame_count;
1619
1620 cmd = megasas_get_cmd(instance);
1621 if (!cmd)
1622 return SCSI_MLQUEUE_HOST_BUSY;
1623
1624 /*
1625 * Logical drive command
1626 */
1627 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1628 frame_count = megasas_build_ldio(instance, scmd, cmd);
1629 else
1630 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1631
1632 if (!frame_count)
1633 goto out_return_cmd;
1634
1635 cmd->scmd = scmd;
1636 scmd->SCp.ptr = (char *)cmd;
1637
1638 /*
1639 * Issue the command to the FW
1640 */
1641 atomic_inc(&instance->fw_outstanding);
1642
1643 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1644 cmd->frame_count-1, instance->reg_set);
1645
1646 return 0;
1647 out_return_cmd:
1648 megasas_return_cmd(instance, cmd);
1649 return SCSI_MLQUEUE_HOST_BUSY;
1650 }
1651
1652
1653 /**
1654 * megasas_queue_command - Queue entry point
1655 * @scmd: SCSI command to be queued
1656 * @done: Callback entry point
1657 */
1658 static int
megasas_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1659 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1660 {
1661 struct megasas_instance *instance;
1662 struct MR_PRIV_DEVICE *mr_device_priv_data;
1663
1664 instance = (struct megasas_instance *)
1665 scmd->device->host->hostdata;
1666
1667 if (instance->unload == 1) {
1668 scmd->result = DID_NO_CONNECT << 16;
1669 scmd->scsi_done(scmd);
1670 return 0;
1671 }
1672
1673 if (instance->issuepend_done == 0)
1674 return SCSI_MLQUEUE_HOST_BUSY;
1675
1676
1677 /* Check for an mpio path and adjust behavior */
1678 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1679 if (megasas_check_mpio_paths(instance, scmd) ==
1680 (DID_REQUEUE << 16)) {
1681 return SCSI_MLQUEUE_HOST_BUSY;
1682 } else {
1683 scmd->result = DID_NO_CONNECT << 16;
1684 scmd->scsi_done(scmd);
1685 return 0;
1686 }
1687 }
1688
1689 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1690 scmd->result = DID_NO_CONNECT << 16;
1691 scmd->scsi_done(scmd);
1692 return 0;
1693 }
1694
1695 mr_device_priv_data = scmd->device->hostdata;
1696 if (!mr_device_priv_data) {
1697 scmd->result = DID_NO_CONNECT << 16;
1698 scmd->scsi_done(scmd);
1699 return 0;
1700 }
1701
1702 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1703 return SCSI_MLQUEUE_HOST_BUSY;
1704
1705 if (mr_device_priv_data->tm_busy)
1706 return SCSI_MLQUEUE_DEVICE_BUSY;
1707
1708
1709 scmd->result = 0;
1710
1711 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1712 (scmd->device->id >= instance->fw_supported_vd_count ||
1713 scmd->device->lun)) {
1714 scmd->result = DID_BAD_TARGET << 16;
1715 goto out_done;
1716 }
1717
1718 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1719 MEGASAS_IS_LOGICAL(scmd->device) &&
1720 (!instance->fw_sync_cache_support)) {
1721 scmd->result = DID_OK << 16;
1722 goto out_done;
1723 }
1724
1725 return instance->instancet->build_and_issue_cmd(instance, scmd);
1726
1727 out_done:
1728 scmd->scsi_done(scmd);
1729 return 0;
1730 }
1731
megasas_lookup_instance(u16 host_no)1732 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1733 {
1734 int i;
1735
1736 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1737
1738 if ((megasas_mgmt_info.instance[i]) &&
1739 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1740 return megasas_mgmt_info.instance[i];
1741 }
1742
1743 return NULL;
1744 }
1745
1746 /*
1747 * megasas_set_dynamic_target_properties -
1748 * Device property set by driver may not be static and it is required to be
1749 * updated after OCR
1750 *
1751 * set tm_capable.
1752 * set dma alignment (only for eedp protection enable vd).
1753 *
1754 * @sdev: OS provided scsi device
1755 *
1756 * Returns void
1757 */
megasas_set_dynamic_target_properties(struct scsi_device * sdev)1758 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1759 {
1760 u16 pd_index = 0, ld;
1761 u32 device_id;
1762 struct megasas_instance *instance;
1763 struct fusion_context *fusion;
1764 struct MR_PRIV_DEVICE *mr_device_priv_data;
1765 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1766 struct MR_LD_RAID *raid;
1767 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1768
1769 instance = megasas_lookup_instance(sdev->host->host_no);
1770 fusion = instance->ctrl_context;
1771 mr_device_priv_data = sdev->hostdata;
1772
1773 if (!fusion || !mr_device_priv_data)
1774 return;
1775
1776 if (MEGASAS_IS_LOGICAL(sdev)) {
1777 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1778 + sdev->id;
1779 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1780 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1781 if (ld >= instance->fw_supported_vd_count)
1782 return;
1783 raid = MR_LdRaidGet(ld, local_map_ptr);
1784
1785 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1786 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1787
1788 mr_device_priv_data->is_tm_capable =
1789 raid->capability.tmCapable;
1790 } else if (instance->use_seqnum_jbod_fp) {
1791 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1792 sdev->id;
1793 pd_sync = (void *)fusion->pd_seq_sync
1794 [(instance->pd_seq_map_id - 1) & 1];
1795 mr_device_priv_data->is_tm_capable =
1796 pd_sync->seq[pd_index].capability.tmCapable;
1797 }
1798 }
1799
1800 /*
1801 * megasas_set_nvme_device_properties -
1802 * set nomerges=2
1803 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1804 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1805 *
1806 * MR firmware provides value in KB. Caller of this function converts
1807 * kb into bytes.
1808 *
1809 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1810 * MR firmware provides value 128 as (32 * 4K) = 128K.
1811 *
1812 * @sdev: scsi device
1813 * @max_io_size: maximum io transfer size
1814 *
1815 */
1816 static inline void
megasas_set_nvme_device_properties(struct scsi_device * sdev,u32 max_io_size)1817 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1818 {
1819 struct megasas_instance *instance;
1820 u32 mr_nvme_pg_size;
1821
1822 instance = (struct megasas_instance *)sdev->host->hostdata;
1823 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1824 MR_DEFAULT_NVME_PAGE_SIZE);
1825
1826 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1827
1828 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1829 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1830 }
1831
1832
1833 /*
1834 * megasas_set_static_target_properties -
1835 * Device property set by driver are static and it is not required to be
1836 * updated after OCR.
1837 *
1838 * set io timeout
1839 * set device queue depth
1840 * set nvme device properties. see - megasas_set_nvme_device_properties
1841 *
1842 * @sdev: scsi device
1843 * @is_target_prop true, if fw provided target properties.
1844 */
megasas_set_static_target_properties(struct scsi_device * sdev,bool is_target_prop)1845 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1846 bool is_target_prop)
1847 {
1848 u16 target_index = 0;
1849 u8 interface_type;
1850 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1851 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1852 u32 tgt_device_qd;
1853 struct megasas_instance *instance;
1854 struct MR_PRIV_DEVICE *mr_device_priv_data;
1855
1856 instance = megasas_lookup_instance(sdev->host->host_no);
1857 mr_device_priv_data = sdev->hostdata;
1858 interface_type = mr_device_priv_data->interface_type;
1859
1860 /*
1861 * The RAID firmware may require extended timeouts.
1862 */
1863 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1864
1865 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1866
1867 switch (interface_type) {
1868 case SAS_PD:
1869 device_qd = MEGASAS_SAS_QD;
1870 break;
1871 case SATA_PD:
1872 device_qd = MEGASAS_SATA_QD;
1873 break;
1874 case NVME_PD:
1875 device_qd = MEGASAS_NVME_QD;
1876 break;
1877 }
1878
1879 if (is_target_prop) {
1880 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1881 if (tgt_device_qd &&
1882 (tgt_device_qd <= instance->host->can_queue))
1883 device_qd = tgt_device_qd;
1884
1885 /* max_io_size_kb will be set to non zero for
1886 * nvme based vd and syspd.
1887 */
1888 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1889 }
1890
1891 if (instance->nvme_page_size && max_io_size_kb)
1892 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1893
1894 scsi_change_queue_depth(sdev, device_qd);
1895
1896 }
1897
1898
megasas_slave_configure(struct scsi_device * sdev)1899 static int megasas_slave_configure(struct scsi_device *sdev)
1900 {
1901 u16 pd_index = 0;
1902 struct megasas_instance *instance;
1903 int ret_target_prop = DCMD_FAILED;
1904 bool is_target_prop = false;
1905
1906 instance = megasas_lookup_instance(sdev->host->host_no);
1907 if (instance->pd_list_not_supported) {
1908 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1909 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1910 sdev->id;
1911 if (instance->pd_list[pd_index].driveState !=
1912 MR_PD_STATE_SYSTEM)
1913 return -ENXIO;
1914 }
1915 }
1916
1917 mutex_lock(&instance->hba_mutex);
1918 /* Send DCMD to Firmware and cache the information */
1919 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1920 megasas_get_pd_info(instance, sdev);
1921
1922 /* Some ventura firmware may not have instance->nvme_page_size set.
1923 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1924 */
1925 if ((instance->tgt_prop) && (instance->nvme_page_size))
1926 ret_target_prop = megasas_get_target_prop(instance, sdev);
1927
1928 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1929 megasas_set_static_target_properties(sdev, is_target_prop);
1930
1931 mutex_unlock(&instance->hba_mutex);
1932
1933 /* This sdev property may change post OCR */
1934 megasas_set_dynamic_target_properties(sdev);
1935
1936 return 0;
1937 }
1938
megasas_slave_alloc(struct scsi_device * sdev)1939 static int megasas_slave_alloc(struct scsi_device *sdev)
1940 {
1941 u16 pd_index = 0;
1942 struct megasas_instance *instance ;
1943 struct MR_PRIV_DEVICE *mr_device_priv_data;
1944
1945 instance = megasas_lookup_instance(sdev->host->host_no);
1946 if (!MEGASAS_IS_LOGICAL(sdev)) {
1947 /*
1948 * Open the OS scan to the SYSTEM PD
1949 */
1950 pd_index =
1951 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1952 sdev->id;
1953 if ((instance->pd_list_not_supported ||
1954 instance->pd_list[pd_index].driveState ==
1955 MR_PD_STATE_SYSTEM)) {
1956 goto scan_target;
1957 }
1958 return -ENXIO;
1959 }
1960
1961 scan_target:
1962 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1963 GFP_KERNEL);
1964 if (!mr_device_priv_data)
1965 return -ENOMEM;
1966 sdev->hostdata = mr_device_priv_data;
1967
1968 atomic_set(&mr_device_priv_data->r1_ldio_hint,
1969 instance->r1_ldio_hint_default);
1970 return 0;
1971 }
1972
megasas_slave_destroy(struct scsi_device * sdev)1973 static void megasas_slave_destroy(struct scsi_device *sdev)
1974 {
1975 kfree(sdev->hostdata);
1976 sdev->hostdata = NULL;
1977 }
1978
1979 /*
1980 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1981 * kill adapter
1982 * @instance: Adapter soft state
1983 *
1984 */
megasas_complete_outstanding_ioctls(struct megasas_instance * instance)1985 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1986 {
1987 int i;
1988 struct megasas_cmd *cmd_mfi;
1989 struct megasas_cmd_fusion *cmd_fusion;
1990 struct fusion_context *fusion = instance->ctrl_context;
1991
1992 /* Find all outstanding ioctls */
1993 if (fusion) {
1994 for (i = 0; i < instance->max_fw_cmds; i++) {
1995 cmd_fusion = fusion->cmd_list[i];
1996 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1997 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1998 if (cmd_mfi->sync_cmd &&
1999 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2000 cmd_mfi->frame->hdr.cmd_status =
2001 MFI_STAT_WRONG_STATE;
2002 megasas_complete_cmd(instance,
2003 cmd_mfi, DID_OK);
2004 }
2005 }
2006 }
2007 } else {
2008 for (i = 0; i < instance->max_fw_cmds; i++) {
2009 cmd_mfi = instance->cmd_list[i];
2010 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2011 MFI_CMD_ABORT)
2012 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2013 }
2014 }
2015 }
2016
2017
megaraid_sas_kill_hba(struct megasas_instance * instance)2018 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2019 {
2020 /* Set critical error to block I/O & ioctls in case caller didn't */
2021 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2022 /* Wait 1 second to ensure IO or ioctls in build have posted */
2023 msleep(1000);
2024 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2025 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2026 (instance->adapter_type != MFI_SERIES)) {
2027 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2028 /* Flush */
2029 readl(&instance->reg_set->doorbell);
2030 if (instance->requestorId && instance->peerIsPresent)
2031 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2032 } else {
2033 writel(MFI_STOP_ADP,
2034 &instance->reg_set->inbound_doorbell);
2035 }
2036 /* Complete outstanding ioctls when adapter is killed */
2037 megasas_complete_outstanding_ioctls(instance);
2038 }
2039
2040 /**
2041 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2042 * restored to max value
2043 * @instance: Adapter soft state
2044 *
2045 */
2046 void
megasas_check_and_restore_queue_depth(struct megasas_instance * instance)2047 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2048 {
2049 unsigned long flags;
2050
2051 if (instance->flag & MEGASAS_FW_BUSY
2052 && time_after(jiffies, instance->last_time + 5 * HZ)
2053 && atomic_read(&instance->fw_outstanding) <
2054 instance->throttlequeuedepth + 1) {
2055
2056 spin_lock_irqsave(instance->host->host_lock, flags);
2057 instance->flag &= ~MEGASAS_FW_BUSY;
2058
2059 instance->host->can_queue = instance->cur_can_queue;
2060 spin_unlock_irqrestore(instance->host->host_lock, flags);
2061 }
2062 }
2063
2064 /**
2065 * megasas_complete_cmd_dpc - Returns FW's controller structure
2066 * @instance_addr: Address of adapter soft state
2067 *
2068 * Tasklet to complete cmds
2069 */
megasas_complete_cmd_dpc(unsigned long instance_addr)2070 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2071 {
2072 u32 producer;
2073 u32 consumer;
2074 u32 context;
2075 struct megasas_cmd *cmd;
2076 struct megasas_instance *instance =
2077 (struct megasas_instance *)instance_addr;
2078 unsigned long flags;
2079
2080 /* If we have already declared adapter dead, donot complete cmds */
2081 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2082 return;
2083
2084 spin_lock_irqsave(&instance->completion_lock, flags);
2085
2086 producer = le32_to_cpu(*instance->producer);
2087 consumer = le32_to_cpu(*instance->consumer);
2088
2089 while (consumer != producer) {
2090 context = le32_to_cpu(instance->reply_queue[consumer]);
2091 if (context >= instance->max_fw_cmds) {
2092 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2093 context);
2094 BUG();
2095 }
2096
2097 cmd = instance->cmd_list[context];
2098
2099 megasas_complete_cmd(instance, cmd, DID_OK);
2100
2101 consumer++;
2102 if (consumer == (instance->max_fw_cmds + 1)) {
2103 consumer = 0;
2104 }
2105 }
2106
2107 *instance->consumer = cpu_to_le32(producer);
2108
2109 spin_unlock_irqrestore(&instance->completion_lock, flags);
2110
2111 /*
2112 * Check if we can restore can_queue
2113 */
2114 megasas_check_and_restore_queue_depth(instance);
2115 }
2116
2117 /**
2118 * megasas_start_timer - Initializes a timer object
2119 * @instance: Adapter soft state
2120 * @timer: timer object to be initialized
2121 * @fn: timer function
2122 * @interval: time interval between timer function call
2123 *
2124 */
megasas_start_timer(struct megasas_instance * instance,struct timer_list * timer,void * fn,unsigned long interval)2125 void megasas_start_timer(struct megasas_instance *instance,
2126 struct timer_list *timer,
2127 void *fn, unsigned long interval)
2128 {
2129 init_timer(timer);
2130 timer->expires = jiffies + interval;
2131 timer->data = (unsigned long)instance;
2132 timer->function = fn;
2133 add_timer(timer);
2134 }
2135
2136 static void
2137 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2138
2139 static void
2140 process_fw_state_change_wq(struct work_struct *work);
2141
megasas_do_ocr(struct megasas_instance * instance)2142 void megasas_do_ocr(struct megasas_instance *instance)
2143 {
2144 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2145 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2146 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2147 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2148 }
2149 instance->instancet->disable_intr(instance);
2150 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2151 instance->issuepend_done = 0;
2152
2153 atomic_set(&instance->fw_outstanding, 0);
2154 megasas_internal_reset_defer_cmds(instance);
2155 process_fw_state_change_wq(&instance->work_init);
2156 }
2157
megasas_get_ld_vf_affiliation_111(struct megasas_instance * instance,int initial)2158 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2159 int initial)
2160 {
2161 struct megasas_cmd *cmd;
2162 struct megasas_dcmd_frame *dcmd;
2163 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2164 dma_addr_t new_affiliation_111_h;
2165 int ld, retval = 0;
2166 u8 thisVf;
2167
2168 cmd = megasas_get_cmd(instance);
2169
2170 if (!cmd) {
2171 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2172 "Failed to get cmd for scsi%d\n",
2173 instance->host->host_no);
2174 return -ENOMEM;
2175 }
2176
2177 dcmd = &cmd->frame->dcmd;
2178
2179 if (!instance->vf_affiliation_111) {
2180 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2181 "affiliation for scsi%d\n", instance->host->host_no);
2182 megasas_return_cmd(instance, cmd);
2183 return -ENOMEM;
2184 }
2185
2186 if (initial)
2187 memset(instance->vf_affiliation_111, 0,
2188 sizeof(struct MR_LD_VF_AFFILIATION_111));
2189 else {
2190 new_affiliation_111 =
2191 pci_alloc_consistent(instance->pdev,
2192 sizeof(struct MR_LD_VF_AFFILIATION_111),
2193 &new_affiliation_111_h);
2194 if (!new_affiliation_111) {
2195 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2196 "memory for new affiliation for scsi%d\n",
2197 instance->host->host_no);
2198 megasas_return_cmd(instance, cmd);
2199 return -ENOMEM;
2200 }
2201 memset(new_affiliation_111, 0,
2202 sizeof(struct MR_LD_VF_AFFILIATION_111));
2203 }
2204
2205 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2206
2207 dcmd->cmd = MFI_CMD_DCMD;
2208 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2209 dcmd->sge_count = 1;
2210 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2211 dcmd->timeout = 0;
2212 dcmd->pad_0 = 0;
2213 dcmd->data_xfer_len =
2214 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2215 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2216
2217 if (initial)
2218 dcmd->sgl.sge32[0].phys_addr =
2219 cpu_to_le32(instance->vf_affiliation_111_h);
2220 else
2221 dcmd->sgl.sge32[0].phys_addr =
2222 cpu_to_le32(new_affiliation_111_h);
2223
2224 dcmd->sgl.sge32[0].length = cpu_to_le32(
2225 sizeof(struct MR_LD_VF_AFFILIATION_111));
2226
2227 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2228 "scsi%d\n", instance->host->host_no);
2229
2230 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2231 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2232 " failed with status 0x%x for scsi%d\n",
2233 dcmd->cmd_status, instance->host->host_no);
2234 retval = 1; /* Do a scan if we couldn't get affiliation */
2235 goto out;
2236 }
2237
2238 if (!initial) {
2239 thisVf = new_affiliation_111->thisVf;
2240 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2241 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2242 new_affiliation_111->map[ld].policy[thisVf]) {
2243 dev_warn(&instance->pdev->dev, "SR-IOV: "
2244 "Got new LD/VF affiliation for scsi%d\n",
2245 instance->host->host_no);
2246 memcpy(instance->vf_affiliation_111,
2247 new_affiliation_111,
2248 sizeof(struct MR_LD_VF_AFFILIATION_111));
2249 retval = 1;
2250 goto out;
2251 }
2252 }
2253 out:
2254 if (new_affiliation_111) {
2255 pci_free_consistent(instance->pdev,
2256 sizeof(struct MR_LD_VF_AFFILIATION_111),
2257 new_affiliation_111,
2258 new_affiliation_111_h);
2259 }
2260
2261 megasas_return_cmd(instance, cmd);
2262
2263 return retval;
2264 }
2265
megasas_get_ld_vf_affiliation_12(struct megasas_instance * instance,int initial)2266 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2267 int initial)
2268 {
2269 struct megasas_cmd *cmd;
2270 struct megasas_dcmd_frame *dcmd;
2271 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2272 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2273 dma_addr_t new_affiliation_h;
2274 int i, j, retval = 0, found = 0, doscan = 0;
2275 u8 thisVf;
2276
2277 cmd = megasas_get_cmd(instance);
2278
2279 if (!cmd) {
2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2281 "Failed to get cmd for scsi%d\n",
2282 instance->host->host_no);
2283 return -ENOMEM;
2284 }
2285
2286 dcmd = &cmd->frame->dcmd;
2287
2288 if (!instance->vf_affiliation) {
2289 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2290 "affiliation for scsi%d\n", instance->host->host_no);
2291 megasas_return_cmd(instance, cmd);
2292 return -ENOMEM;
2293 }
2294
2295 if (initial)
2296 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2297 sizeof(struct MR_LD_VF_AFFILIATION));
2298 else {
2299 new_affiliation =
2300 pci_alloc_consistent(instance->pdev,
2301 (MAX_LOGICAL_DRIVES + 1) *
2302 sizeof(struct MR_LD_VF_AFFILIATION),
2303 &new_affiliation_h);
2304 if (!new_affiliation) {
2305 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2306 "memory for new affiliation for scsi%d\n",
2307 instance->host->host_no);
2308 megasas_return_cmd(instance, cmd);
2309 return -ENOMEM;
2310 }
2311 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2312 sizeof(struct MR_LD_VF_AFFILIATION));
2313 }
2314
2315 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2316
2317 dcmd->cmd = MFI_CMD_DCMD;
2318 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2319 dcmd->sge_count = 1;
2320 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2321 dcmd->timeout = 0;
2322 dcmd->pad_0 = 0;
2323 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2324 sizeof(struct MR_LD_VF_AFFILIATION));
2325 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2326
2327 if (initial)
2328 dcmd->sgl.sge32[0].phys_addr =
2329 cpu_to_le32(instance->vf_affiliation_h);
2330 else
2331 dcmd->sgl.sge32[0].phys_addr =
2332 cpu_to_le32(new_affiliation_h);
2333
2334 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2335 sizeof(struct MR_LD_VF_AFFILIATION));
2336
2337 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2338 "scsi%d\n", instance->host->host_no);
2339
2340
2341 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2342 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2343 " failed with status 0x%x for scsi%d\n",
2344 dcmd->cmd_status, instance->host->host_no);
2345 retval = 1; /* Do a scan if we couldn't get affiliation */
2346 goto out;
2347 }
2348
2349 if (!initial) {
2350 if (!new_affiliation->ldCount) {
2351 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2352 "affiliation for passive path for scsi%d\n",
2353 instance->host->host_no);
2354 retval = 1;
2355 goto out;
2356 }
2357 newmap = new_affiliation->map;
2358 savedmap = instance->vf_affiliation->map;
2359 thisVf = new_affiliation->thisVf;
2360 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2361 found = 0;
2362 for (j = 0; j < instance->vf_affiliation->ldCount;
2363 j++) {
2364 if (newmap->ref.targetId ==
2365 savedmap->ref.targetId) {
2366 found = 1;
2367 if (newmap->policy[thisVf] !=
2368 savedmap->policy[thisVf]) {
2369 doscan = 1;
2370 goto out;
2371 }
2372 }
2373 savedmap = (struct MR_LD_VF_MAP *)
2374 ((unsigned char *)savedmap +
2375 savedmap->size);
2376 }
2377 if (!found && newmap->policy[thisVf] !=
2378 MR_LD_ACCESS_HIDDEN) {
2379 doscan = 1;
2380 goto out;
2381 }
2382 newmap = (struct MR_LD_VF_MAP *)
2383 ((unsigned char *)newmap + newmap->size);
2384 }
2385
2386 newmap = new_affiliation->map;
2387 savedmap = instance->vf_affiliation->map;
2388
2389 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2390 found = 0;
2391 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2392 if (savedmap->ref.targetId ==
2393 newmap->ref.targetId) {
2394 found = 1;
2395 if (savedmap->policy[thisVf] !=
2396 newmap->policy[thisVf]) {
2397 doscan = 1;
2398 goto out;
2399 }
2400 }
2401 newmap = (struct MR_LD_VF_MAP *)
2402 ((unsigned char *)newmap +
2403 newmap->size);
2404 }
2405 if (!found && savedmap->policy[thisVf] !=
2406 MR_LD_ACCESS_HIDDEN) {
2407 doscan = 1;
2408 goto out;
2409 }
2410 savedmap = (struct MR_LD_VF_MAP *)
2411 ((unsigned char *)savedmap +
2412 savedmap->size);
2413 }
2414 }
2415 out:
2416 if (doscan) {
2417 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2418 "affiliation for scsi%d\n", instance->host->host_no);
2419 memcpy(instance->vf_affiliation, new_affiliation,
2420 new_affiliation->size);
2421 retval = 1;
2422 }
2423
2424 if (new_affiliation)
2425 pci_free_consistent(instance->pdev,
2426 (MAX_LOGICAL_DRIVES + 1) *
2427 sizeof(struct MR_LD_VF_AFFILIATION),
2428 new_affiliation, new_affiliation_h);
2429 megasas_return_cmd(instance, cmd);
2430
2431 return retval;
2432 }
2433
2434 /* This function will get the current SR-IOV LD/VF affiliation */
megasas_get_ld_vf_affiliation(struct megasas_instance * instance,int initial)2435 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2436 int initial)
2437 {
2438 int retval;
2439
2440 if (instance->PlasmaFW111)
2441 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2442 else
2443 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2444 return retval;
2445 }
2446
2447 /* This function will tell FW to start the SR-IOV heartbeat */
megasas_sriov_start_heartbeat(struct megasas_instance * instance,int initial)2448 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2449 int initial)
2450 {
2451 struct megasas_cmd *cmd;
2452 struct megasas_dcmd_frame *dcmd;
2453 int retval = 0;
2454
2455 cmd = megasas_get_cmd(instance);
2456
2457 if (!cmd) {
2458 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2459 "Failed to get cmd for scsi%d\n",
2460 instance->host->host_no);
2461 return -ENOMEM;
2462 }
2463
2464 dcmd = &cmd->frame->dcmd;
2465
2466 if (initial) {
2467 instance->hb_host_mem =
2468 pci_zalloc_consistent(instance->pdev,
2469 sizeof(struct MR_CTRL_HB_HOST_MEM),
2470 &instance->hb_host_mem_h);
2471 if (!instance->hb_host_mem) {
2472 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2473 " memory for heartbeat host memory for scsi%d\n",
2474 instance->host->host_no);
2475 retval = -ENOMEM;
2476 goto out;
2477 }
2478 }
2479
2480 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2481
2482 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2483 dcmd->cmd = MFI_CMD_DCMD;
2484 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2485 dcmd->sge_count = 1;
2486 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2487 dcmd->timeout = 0;
2488 dcmd->pad_0 = 0;
2489 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2490 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2491 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2492 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2493
2494 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2495 instance->host->host_no);
2496
2497 if ((instance->adapter_type != MFI_SERIES) &&
2498 !instance->mask_interrupts)
2499 retval = megasas_issue_blocked_cmd(instance, cmd,
2500 MEGASAS_ROUTINE_WAIT_TIME_VF);
2501 else
2502 retval = megasas_issue_polled(instance, cmd);
2503
2504 if (retval) {
2505 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2506 "_MEM_ALLOC DCMD %s for scsi%d\n",
2507 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2508 "timed out" : "failed", instance->host->host_no);
2509 retval = 1;
2510 }
2511
2512 out:
2513 megasas_return_cmd(instance, cmd);
2514
2515 return retval;
2516 }
2517
2518 /* Handler for SR-IOV heartbeat */
megasas_sriov_heartbeat_handler(unsigned long instance_addr)2519 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2520 {
2521 struct megasas_instance *instance =
2522 (struct megasas_instance *)instance_addr;
2523
2524 if (instance->hb_host_mem->HB.fwCounter !=
2525 instance->hb_host_mem->HB.driverCounter) {
2526 instance->hb_host_mem->HB.driverCounter =
2527 instance->hb_host_mem->HB.fwCounter;
2528 mod_timer(&instance->sriov_heartbeat_timer,
2529 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2530 } else {
2531 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2532 "completed for scsi%d\n", instance->host->host_no);
2533 schedule_work(&instance->work_init);
2534 }
2535 }
2536
2537 /**
2538 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2539 * @instance: Adapter soft state
2540 *
2541 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2542 * complete all its outstanding commands. Returns error if one or more IOs
2543 * are pending after this time period. It also marks the controller dead.
2544 */
megasas_wait_for_outstanding(struct megasas_instance * instance)2545 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2546 {
2547 int i, sl, outstanding;
2548 u32 reset_index;
2549 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2550 unsigned long flags;
2551 struct list_head clist_local;
2552 struct megasas_cmd *reset_cmd;
2553 u32 fw_state;
2554
2555 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2556 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2557 __func__, __LINE__);
2558 return FAILED;
2559 }
2560
2561 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2562
2563 INIT_LIST_HEAD(&clist_local);
2564 spin_lock_irqsave(&instance->hba_lock, flags);
2565 list_splice_init(&instance->internal_reset_pending_q,
2566 &clist_local);
2567 spin_unlock_irqrestore(&instance->hba_lock, flags);
2568
2569 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2570 for (i = 0; i < wait_time; i++) {
2571 msleep(1000);
2572 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2573 break;
2574 }
2575
2576 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2577 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2578 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2579 return FAILED;
2580 }
2581
2582 reset_index = 0;
2583 while (!list_empty(&clist_local)) {
2584 reset_cmd = list_entry((&clist_local)->next,
2585 struct megasas_cmd, list);
2586 list_del_init(&reset_cmd->list);
2587 if (reset_cmd->scmd) {
2588 reset_cmd->scmd->result = DID_REQUEUE << 16;
2589 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2590 reset_index, reset_cmd,
2591 reset_cmd->scmd->cmnd[0]);
2592
2593 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2594 megasas_return_cmd(instance, reset_cmd);
2595 } else if (reset_cmd->sync_cmd) {
2596 dev_notice(&instance->pdev->dev, "%p synch cmds"
2597 "reset queue\n",
2598 reset_cmd);
2599
2600 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2601 instance->instancet->fire_cmd(instance,
2602 reset_cmd->frame_phys_addr,
2603 0, instance->reg_set);
2604 } else {
2605 dev_notice(&instance->pdev->dev, "%p unexpected"
2606 "cmds lst\n",
2607 reset_cmd);
2608 }
2609 reset_index++;
2610 }
2611
2612 return SUCCESS;
2613 }
2614
2615 for (i = 0; i < resetwaittime; i++) {
2616 outstanding = atomic_read(&instance->fw_outstanding);
2617
2618 if (!outstanding)
2619 break;
2620
2621 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2622 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2623 "commands to complete\n",i,outstanding);
2624 /*
2625 * Call cmd completion routine. Cmd to be
2626 * be completed directly without depending on isr.
2627 */
2628 megasas_complete_cmd_dpc((unsigned long)instance);
2629 }
2630
2631 msleep(1000);
2632 }
2633
2634 i = 0;
2635 outstanding = atomic_read(&instance->fw_outstanding);
2636 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2637
2638 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2639 goto no_outstanding;
2640
2641 if (instance->disableOnlineCtrlReset)
2642 goto kill_hba_and_failed;
2643 do {
2644 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2645 dev_info(&instance->pdev->dev,
2646 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2647 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2648 if (i == 3)
2649 goto kill_hba_and_failed;
2650 megasas_do_ocr(instance);
2651
2652 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2653 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2654 __func__, __LINE__);
2655 return FAILED;
2656 }
2657 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2658 __func__, __LINE__);
2659
2660 for (sl = 0; sl < 10; sl++)
2661 msleep(500);
2662
2663 outstanding = atomic_read(&instance->fw_outstanding);
2664
2665 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2666 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2667 goto no_outstanding;
2668 }
2669 i++;
2670 } while (i <= 3);
2671
2672 no_outstanding:
2673
2674 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2675 __func__, __LINE__);
2676 return SUCCESS;
2677
2678 kill_hba_and_failed:
2679
2680 /* Reset not supported, kill adapter */
2681 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2682 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2683 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2684 atomic_read(&instance->fw_outstanding));
2685 megasas_dump_pending_frames(instance);
2686 megaraid_sas_kill_hba(instance);
2687
2688 return FAILED;
2689 }
2690
2691 /**
2692 * megasas_generic_reset - Generic reset routine
2693 * @scmd: Mid-layer SCSI command
2694 *
2695 * This routine implements a generic reset handler for device, bus and host
2696 * reset requests. Device, bus and host specific reset handlers can use this
2697 * function after they do their specific tasks.
2698 */
megasas_generic_reset(struct scsi_cmnd * scmd)2699 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2700 {
2701 int ret_val;
2702 struct megasas_instance *instance;
2703
2704 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2705
2706 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2707 scmd->cmnd[0], scmd->retries);
2708
2709 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2710 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2711 return FAILED;
2712 }
2713
2714 ret_val = megasas_wait_for_outstanding(instance);
2715 if (ret_val == SUCCESS)
2716 dev_notice(&instance->pdev->dev, "reset successful\n");
2717 else
2718 dev_err(&instance->pdev->dev, "failed to do reset\n");
2719
2720 return ret_val;
2721 }
2722
2723 /**
2724 * megasas_reset_timer - quiesce the adapter if required
2725 * @scmd: scsi cmnd
2726 *
2727 * Sets the FW busy flag and reduces the host->can_queue if the
2728 * cmd has not been completed within the timeout period.
2729 */
2730 static enum
megasas_reset_timer(struct scsi_cmnd * scmd)2731 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2732 {
2733 struct megasas_instance *instance;
2734 unsigned long flags;
2735
2736 if (time_after(jiffies, scmd->jiffies_at_alloc +
2737 (scmd_timeout * 2) * HZ)) {
2738 return BLK_EH_NOT_HANDLED;
2739 }
2740
2741 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2742 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2743 /* FW is busy, throttle IO */
2744 spin_lock_irqsave(instance->host->host_lock, flags);
2745
2746 instance->host->can_queue = instance->throttlequeuedepth;
2747 instance->last_time = jiffies;
2748 instance->flag |= MEGASAS_FW_BUSY;
2749
2750 spin_unlock_irqrestore(instance->host->host_lock, flags);
2751 }
2752 return BLK_EH_RESET_TIMER;
2753 }
2754
2755 /**
2756 * megasas_dump_frame - This function will dump MPT/MFI frame
2757 */
2758 static inline void
megasas_dump_frame(void * mpi_request,int sz)2759 megasas_dump_frame(void *mpi_request, int sz)
2760 {
2761 int i;
2762 __le32 *mfp = (__le32 *)mpi_request;
2763
2764 printk(KERN_INFO "IO request frame:\n\t");
2765 for (i = 0; i < sz / sizeof(__le32); i++) {
2766 if (i && ((i % 8) == 0))
2767 printk("\n\t");
2768 printk("%08x ", le32_to_cpu(mfp[i]));
2769 }
2770 printk("\n");
2771 }
2772
2773 /**
2774 * megasas_reset_bus_host - Bus & host reset handler entry point
2775 */
megasas_reset_bus_host(struct scsi_cmnd * scmd)2776 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2777 {
2778 int ret;
2779 struct megasas_instance *instance;
2780
2781 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2782
2783 scmd_printk(KERN_INFO, scmd,
2784 "Controller reset is requested due to IO timeout\n"
2785 "SCSI command pointer: (%p)\t SCSI host state: %d\t"
2786 " SCSI host busy: %d\t FW outstanding: %d\n",
2787 scmd, scmd->device->host->shost_state,
2788 atomic_read((atomic_t *)&scmd->device->host->host_busy),
2789 atomic_read(&instance->fw_outstanding));
2790
2791 /*
2792 * First wait for all commands to complete
2793 */
2794 if (instance->adapter_type == MFI_SERIES) {
2795 ret = megasas_generic_reset(scmd);
2796 } else {
2797 struct megasas_cmd_fusion *cmd;
2798 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2799 if (cmd)
2800 megasas_dump_frame(cmd->io_request,
2801 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2802 ret = megasas_reset_fusion(scmd->device->host,
2803 SCSIIO_TIMEOUT_OCR);
2804 }
2805
2806 return ret;
2807 }
2808
2809 /**
2810 * megasas_task_abort - Issues task abort request to firmware
2811 * (supported only for fusion adapters)
2812 * @scmd: SCSI command pointer
2813 */
megasas_task_abort(struct scsi_cmnd * scmd)2814 static int megasas_task_abort(struct scsi_cmnd *scmd)
2815 {
2816 int ret;
2817 struct megasas_instance *instance;
2818
2819 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2820
2821 if (instance->adapter_type != MFI_SERIES)
2822 ret = megasas_task_abort_fusion(scmd);
2823 else {
2824 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2825 ret = FAILED;
2826 }
2827
2828 return ret;
2829 }
2830
2831 /**
2832 * megasas_reset_target: Issues target reset request to firmware
2833 * (supported only for fusion adapters)
2834 * @scmd: SCSI command pointer
2835 */
megasas_reset_target(struct scsi_cmnd * scmd)2836 static int megasas_reset_target(struct scsi_cmnd *scmd)
2837 {
2838 int ret;
2839 struct megasas_instance *instance;
2840
2841 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2842
2843 if (instance->adapter_type != MFI_SERIES)
2844 ret = megasas_reset_target_fusion(scmd);
2845 else {
2846 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2847 ret = FAILED;
2848 }
2849
2850 return ret;
2851 }
2852
2853 /**
2854 * megasas_bios_param - Returns disk geometry for a disk
2855 * @sdev: device handle
2856 * @bdev: block device
2857 * @capacity: drive capacity
2858 * @geom: geometry parameters
2859 */
2860 static int
megasas_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])2861 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2862 sector_t capacity, int geom[])
2863 {
2864 int heads;
2865 int sectors;
2866 sector_t cylinders;
2867 unsigned long tmp;
2868
2869 /* Default heads (64) & sectors (32) */
2870 heads = 64;
2871 sectors = 32;
2872
2873 tmp = heads * sectors;
2874 cylinders = capacity;
2875
2876 sector_div(cylinders, tmp);
2877
2878 /*
2879 * Handle extended translation size for logical drives > 1Gb
2880 */
2881
2882 if (capacity >= 0x200000) {
2883 heads = 255;
2884 sectors = 63;
2885 tmp = heads*sectors;
2886 cylinders = capacity;
2887 sector_div(cylinders, tmp);
2888 }
2889
2890 geom[0] = heads;
2891 geom[1] = sectors;
2892 geom[2] = cylinders;
2893
2894 return 0;
2895 }
2896
2897 static void megasas_aen_polling(struct work_struct *work);
2898
2899 /**
2900 * megasas_service_aen - Processes an event notification
2901 * @instance: Adapter soft state
2902 * @cmd: AEN command completed by the ISR
2903 *
2904 * For AEN, driver sends a command down to FW that is held by the FW till an
2905 * event occurs. When an event of interest occurs, FW completes the command
2906 * that it was previously holding.
2907 *
2908 * This routines sends SIGIO signal to processes that have registered with the
2909 * driver for AEN.
2910 */
2911 static void
megasas_service_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)2912 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2913 {
2914 unsigned long flags;
2915
2916 /*
2917 * Don't signal app if it is just an aborted previously registered aen
2918 */
2919 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2920 spin_lock_irqsave(&poll_aen_lock, flags);
2921 megasas_poll_wait_aen = 1;
2922 spin_unlock_irqrestore(&poll_aen_lock, flags);
2923 wake_up(&megasas_poll_wait);
2924 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2925 }
2926 else
2927 cmd->abort_aen = 0;
2928
2929 instance->aen_cmd = NULL;
2930
2931 megasas_return_cmd(instance, cmd);
2932
2933 if ((instance->unload == 0) &&
2934 ((instance->issuepend_done == 1))) {
2935 struct megasas_aen_event *ev;
2936
2937 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2938 if (!ev) {
2939 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2940 } else {
2941 ev->instance = instance;
2942 instance->ev = ev;
2943 INIT_DELAYED_WORK(&ev->hotplug_work,
2944 megasas_aen_polling);
2945 schedule_delayed_work(&ev->hotplug_work, 0);
2946 }
2947 }
2948 }
2949
2950 static ssize_t
megasas_fw_crash_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)2951 megasas_fw_crash_buffer_store(struct device *cdev,
2952 struct device_attribute *attr, const char *buf, size_t count)
2953 {
2954 struct Scsi_Host *shost = class_to_shost(cdev);
2955 struct megasas_instance *instance =
2956 (struct megasas_instance *) shost->hostdata;
2957 int val = 0;
2958 unsigned long flags;
2959
2960 if (kstrtoint(buf, 0, &val) != 0)
2961 return -EINVAL;
2962
2963 spin_lock_irqsave(&instance->crashdump_lock, flags);
2964 instance->fw_crash_buffer_offset = val;
2965 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2966 return strlen(buf);
2967 }
2968
2969 static ssize_t
megasas_fw_crash_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)2970 megasas_fw_crash_buffer_show(struct device *cdev,
2971 struct device_attribute *attr, char *buf)
2972 {
2973 struct Scsi_Host *shost = class_to_shost(cdev);
2974 struct megasas_instance *instance =
2975 (struct megasas_instance *) shost->hostdata;
2976 u32 size;
2977 unsigned long buff_addr;
2978 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2979 unsigned long chunk_left_bytes;
2980 unsigned long src_addr;
2981 unsigned long flags;
2982 u32 buff_offset;
2983
2984 spin_lock_irqsave(&instance->crashdump_lock, flags);
2985 buff_offset = instance->fw_crash_buffer_offset;
2986 if (!instance->crash_dump_buf &&
2987 !((instance->fw_crash_state == AVAILABLE) ||
2988 (instance->fw_crash_state == COPYING))) {
2989 dev_err(&instance->pdev->dev,
2990 "Firmware crash dump is not available\n");
2991 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2992 return -EINVAL;
2993 }
2994
2995 buff_addr = (unsigned long) buf;
2996
2997 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2998 dev_err(&instance->pdev->dev,
2999 "Firmware crash dump offset is out of range\n");
3000 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3001 return 0;
3002 }
3003
3004 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3005 chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3006 size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3007 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3008
3009 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3010 (buff_offset % dmachunk);
3011 memcpy(buf, (void *)src_addr, size);
3012 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3013
3014 return size;
3015 }
3016
3017 static ssize_t
megasas_fw_crash_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3018 megasas_fw_crash_buffer_size_show(struct device *cdev,
3019 struct device_attribute *attr, char *buf)
3020 {
3021 struct Scsi_Host *shost = class_to_shost(cdev);
3022 struct megasas_instance *instance =
3023 (struct megasas_instance *) shost->hostdata;
3024
3025 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3026 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3027 }
3028
3029 static ssize_t
megasas_fw_crash_state_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3030 megasas_fw_crash_state_store(struct device *cdev,
3031 struct device_attribute *attr, const char *buf, size_t count)
3032 {
3033 struct Scsi_Host *shost = class_to_shost(cdev);
3034 struct megasas_instance *instance =
3035 (struct megasas_instance *) shost->hostdata;
3036 int val = 0;
3037 unsigned long flags;
3038
3039 if (kstrtoint(buf, 0, &val) != 0)
3040 return -EINVAL;
3041
3042 if ((val <= AVAILABLE || val > COPY_ERROR)) {
3043 dev_err(&instance->pdev->dev, "application updates invalid "
3044 "firmware crash state\n");
3045 return -EINVAL;
3046 }
3047
3048 instance->fw_crash_state = val;
3049
3050 if ((val == COPIED) || (val == COPY_ERROR)) {
3051 spin_lock_irqsave(&instance->crashdump_lock, flags);
3052 megasas_free_host_crash_buffer(instance);
3053 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3054 if (val == COPY_ERROR)
3055 dev_info(&instance->pdev->dev, "application failed to "
3056 "copy Firmware crash dump\n");
3057 else
3058 dev_info(&instance->pdev->dev, "Firmware crash dump "
3059 "copied successfully\n");
3060 }
3061 return strlen(buf);
3062 }
3063
3064 static ssize_t
megasas_fw_crash_state_show(struct device * cdev,struct device_attribute * attr,char * buf)3065 megasas_fw_crash_state_show(struct device *cdev,
3066 struct device_attribute *attr, char *buf)
3067 {
3068 struct Scsi_Host *shost = class_to_shost(cdev);
3069 struct megasas_instance *instance =
3070 (struct megasas_instance *) shost->hostdata;
3071
3072 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3073 }
3074
3075 static ssize_t
megasas_page_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3076 megasas_page_size_show(struct device *cdev,
3077 struct device_attribute *attr, char *buf)
3078 {
3079 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3080 }
3081
3082 static ssize_t
megasas_ldio_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3083 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3084 char *buf)
3085 {
3086 struct Scsi_Host *shost = class_to_shost(cdev);
3087 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3088
3089 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3090 }
3091
3092 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3093 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3094 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3095 megasas_fw_crash_buffer_size_show, NULL);
3096 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3097 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3098 static DEVICE_ATTR(page_size, S_IRUGO,
3099 megasas_page_size_show, NULL);
3100 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3101 megasas_ldio_outstanding_show, NULL);
3102
3103 struct device_attribute *megaraid_host_attrs[] = {
3104 &dev_attr_fw_crash_buffer_size,
3105 &dev_attr_fw_crash_buffer,
3106 &dev_attr_fw_crash_state,
3107 &dev_attr_page_size,
3108 &dev_attr_ldio_outstanding,
3109 NULL,
3110 };
3111
3112 /*
3113 * Scsi host template for megaraid_sas driver
3114 */
3115 static struct scsi_host_template megasas_template = {
3116
3117 .module = THIS_MODULE,
3118 .name = "Avago SAS based MegaRAID driver",
3119 .proc_name = "megaraid_sas",
3120 .slave_configure = megasas_slave_configure,
3121 .slave_alloc = megasas_slave_alloc,
3122 .slave_destroy = megasas_slave_destroy,
3123 .queuecommand = megasas_queue_command,
3124 .eh_target_reset_handler = megasas_reset_target,
3125 .eh_abort_handler = megasas_task_abort,
3126 .eh_host_reset_handler = megasas_reset_bus_host,
3127 .eh_timed_out = megasas_reset_timer,
3128 .shost_attrs = megaraid_host_attrs,
3129 .bios_param = megasas_bios_param,
3130 .use_clustering = ENABLE_CLUSTERING,
3131 .change_queue_depth = scsi_change_queue_depth,
3132 .no_write_same = 1,
3133 };
3134
3135 /**
3136 * megasas_complete_int_cmd - Completes an internal command
3137 * @instance: Adapter soft state
3138 * @cmd: Command to be completed
3139 *
3140 * The megasas_issue_blocked_cmd() function waits for a command to complete
3141 * after it issues a command. This function wakes up that waiting routine by
3142 * calling wake_up() on the wait queue.
3143 */
3144 static void
megasas_complete_int_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)3145 megasas_complete_int_cmd(struct megasas_instance *instance,
3146 struct megasas_cmd *cmd)
3147 {
3148 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3149 wake_up(&instance->int_cmd_wait_q);
3150 }
3151
3152 /**
3153 * megasas_complete_abort - Completes aborting a command
3154 * @instance: Adapter soft state
3155 * @cmd: Cmd that was issued to abort another cmd
3156 *
3157 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3158 * after it issues an abort on a previously issued command. This function
3159 * wakes up all functions waiting on the same wait queue.
3160 */
3161 static void
megasas_complete_abort(struct megasas_instance * instance,struct megasas_cmd * cmd)3162 megasas_complete_abort(struct megasas_instance *instance,
3163 struct megasas_cmd *cmd)
3164 {
3165 if (cmd->sync_cmd) {
3166 cmd->sync_cmd = 0;
3167 cmd->cmd_status_drv = 0;
3168 wake_up(&instance->abort_cmd_wait_q);
3169 }
3170 }
3171
3172 /**
3173 * megasas_complete_cmd - Completes a command
3174 * @instance: Adapter soft state
3175 * @cmd: Command to be completed
3176 * @alt_status: If non-zero, use this value as status to
3177 * SCSI mid-layer instead of the value returned
3178 * by the FW. This should be used if caller wants
3179 * an alternate status (as in the case of aborted
3180 * commands)
3181 */
3182 void
megasas_complete_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,u8 alt_status)3183 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3184 u8 alt_status)
3185 {
3186 int exception = 0;
3187 struct megasas_header *hdr = &cmd->frame->hdr;
3188 unsigned long flags;
3189 struct fusion_context *fusion = instance->ctrl_context;
3190 u32 opcode, status;
3191
3192 /* flag for the retry reset */
3193 cmd->retry_for_fw_reset = 0;
3194
3195 if (cmd->scmd)
3196 cmd->scmd->SCp.ptr = NULL;
3197
3198 switch (hdr->cmd) {
3199 case MFI_CMD_INVALID:
3200 /* Some older 1068 controller FW may keep a pended
3201 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3202 when booting the kdump kernel. Ignore this command to
3203 prevent a kernel panic on shutdown of the kdump kernel. */
3204 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3205 "completed\n");
3206 dev_warn(&instance->pdev->dev, "If you have a controller "
3207 "other than PERC5, please upgrade your firmware\n");
3208 break;
3209 case MFI_CMD_PD_SCSI_IO:
3210 case MFI_CMD_LD_SCSI_IO:
3211
3212 /*
3213 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3214 * issued either through an IO path or an IOCTL path. If it
3215 * was via IOCTL, we will send it to internal completion.
3216 */
3217 if (cmd->sync_cmd) {
3218 cmd->sync_cmd = 0;
3219 megasas_complete_int_cmd(instance, cmd);
3220 break;
3221 }
3222
3223 case MFI_CMD_LD_READ:
3224 case MFI_CMD_LD_WRITE:
3225
3226 if (alt_status) {
3227 cmd->scmd->result = alt_status << 16;
3228 exception = 1;
3229 }
3230
3231 if (exception) {
3232
3233 atomic_dec(&instance->fw_outstanding);
3234
3235 scsi_dma_unmap(cmd->scmd);
3236 cmd->scmd->scsi_done(cmd->scmd);
3237 megasas_return_cmd(instance, cmd);
3238
3239 break;
3240 }
3241
3242 switch (hdr->cmd_status) {
3243
3244 case MFI_STAT_OK:
3245 cmd->scmd->result = DID_OK << 16;
3246 break;
3247
3248 case MFI_STAT_SCSI_IO_FAILED:
3249 case MFI_STAT_LD_INIT_IN_PROGRESS:
3250 cmd->scmd->result =
3251 (DID_ERROR << 16) | hdr->scsi_status;
3252 break;
3253
3254 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3255
3256 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3257
3258 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3259 memset(cmd->scmd->sense_buffer, 0,
3260 SCSI_SENSE_BUFFERSIZE);
3261 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3262 hdr->sense_len);
3263
3264 cmd->scmd->result |= DRIVER_SENSE << 24;
3265 }
3266
3267 break;
3268
3269 case MFI_STAT_LD_OFFLINE:
3270 case MFI_STAT_DEVICE_NOT_FOUND:
3271 cmd->scmd->result = DID_BAD_TARGET << 16;
3272 break;
3273
3274 default:
3275 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3276 hdr->cmd_status);
3277 cmd->scmd->result = DID_ERROR << 16;
3278 break;
3279 }
3280
3281 atomic_dec(&instance->fw_outstanding);
3282
3283 scsi_dma_unmap(cmd->scmd);
3284 cmd->scmd->scsi_done(cmd->scmd);
3285 megasas_return_cmd(instance, cmd);
3286
3287 break;
3288
3289 case MFI_CMD_SMP:
3290 case MFI_CMD_STP:
3291 case MFI_CMD_DCMD:
3292 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3293 /* Check for LD map update */
3294 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3295 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3296 fusion->fast_path_io = 0;
3297 spin_lock_irqsave(instance->host->host_lock, flags);
3298 instance->map_update_cmd = NULL;
3299 if (cmd->frame->hdr.cmd_status != 0) {
3300 if (cmd->frame->hdr.cmd_status !=
3301 MFI_STAT_NOT_FOUND)
3302 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3303 cmd->frame->hdr.cmd_status);
3304 else {
3305 megasas_return_cmd(instance, cmd);
3306 spin_unlock_irqrestore(
3307 instance->host->host_lock,
3308 flags);
3309 break;
3310 }
3311 } else
3312 instance->map_id++;
3313 megasas_return_cmd(instance, cmd);
3314
3315 /*
3316 * Set fast path IO to ZERO.
3317 * Validate Map will set proper value.
3318 * Meanwhile all IOs will go as LD IO.
3319 */
3320 if (MR_ValidateMapInfo(instance))
3321 fusion->fast_path_io = 1;
3322 else
3323 fusion->fast_path_io = 0;
3324 megasas_sync_map_info(instance);
3325 spin_unlock_irqrestore(instance->host->host_lock,
3326 flags);
3327 break;
3328 }
3329 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3330 opcode == MR_DCMD_CTRL_EVENT_GET) {
3331 spin_lock_irqsave(&poll_aen_lock, flags);
3332 megasas_poll_wait_aen = 0;
3333 spin_unlock_irqrestore(&poll_aen_lock, flags);
3334 }
3335
3336 /* FW has an updated PD sequence */
3337 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3338 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3339
3340 spin_lock_irqsave(instance->host->host_lock, flags);
3341 status = cmd->frame->hdr.cmd_status;
3342 instance->jbod_seq_cmd = NULL;
3343 megasas_return_cmd(instance, cmd);
3344
3345 if (status == MFI_STAT_OK) {
3346 instance->pd_seq_map_id++;
3347 /* Re-register a pd sync seq num cmd */
3348 if (megasas_sync_pd_seq_num(instance, true))
3349 instance->use_seqnum_jbod_fp = false;
3350 } else
3351 instance->use_seqnum_jbod_fp = false;
3352
3353 spin_unlock_irqrestore(instance->host->host_lock, flags);
3354 break;
3355 }
3356
3357 /*
3358 * See if got an event notification
3359 */
3360 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3361 megasas_service_aen(instance, cmd);
3362 else
3363 megasas_complete_int_cmd(instance, cmd);
3364
3365 break;
3366
3367 case MFI_CMD_ABORT:
3368 /*
3369 * Cmd issued to abort another cmd returned
3370 */
3371 megasas_complete_abort(instance, cmd);
3372 break;
3373
3374 default:
3375 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3376 hdr->cmd);
3377 break;
3378 }
3379 }
3380
3381 /**
3382 * megasas_issue_pending_cmds_again - issue all pending cmds
3383 * in FW again because of the fw reset
3384 * @instance: Adapter soft state
3385 */
3386 static inline void
megasas_issue_pending_cmds_again(struct megasas_instance * instance)3387 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3388 {
3389 struct megasas_cmd *cmd;
3390 struct list_head clist_local;
3391 union megasas_evt_class_locale class_locale;
3392 unsigned long flags;
3393 u32 seq_num;
3394
3395 INIT_LIST_HEAD(&clist_local);
3396 spin_lock_irqsave(&instance->hba_lock, flags);
3397 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3398 spin_unlock_irqrestore(&instance->hba_lock, flags);
3399
3400 while (!list_empty(&clist_local)) {
3401 cmd = list_entry((&clist_local)->next,
3402 struct megasas_cmd, list);
3403 list_del_init(&cmd->list);
3404
3405 if (cmd->sync_cmd || cmd->scmd) {
3406 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3407 "detected to be pending while HBA reset\n",
3408 cmd, cmd->scmd, cmd->sync_cmd);
3409
3410 cmd->retry_for_fw_reset++;
3411
3412 if (cmd->retry_for_fw_reset == 3) {
3413 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3414 "was tried multiple times during reset."
3415 "Shutting down the HBA\n",
3416 cmd, cmd->scmd, cmd->sync_cmd);
3417 instance->instancet->disable_intr(instance);
3418 atomic_set(&instance->fw_reset_no_pci_access, 1);
3419 megaraid_sas_kill_hba(instance);
3420 return;
3421 }
3422 }
3423
3424 if (cmd->sync_cmd == 1) {
3425 if (cmd->scmd) {
3426 dev_notice(&instance->pdev->dev, "unexpected"
3427 "cmd attached to internal command!\n");
3428 }
3429 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3430 "on the internal reset queue,"
3431 "issue it again.\n", cmd);
3432 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3433 instance->instancet->fire_cmd(instance,
3434 cmd->frame_phys_addr,
3435 0, instance->reg_set);
3436 } else if (cmd->scmd) {
3437 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3438 "detected on the internal queue, issue again.\n",
3439 cmd, cmd->scmd->cmnd[0]);
3440
3441 atomic_inc(&instance->fw_outstanding);
3442 instance->instancet->fire_cmd(instance,
3443 cmd->frame_phys_addr,
3444 cmd->frame_count-1, instance->reg_set);
3445 } else {
3446 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3447 "internal reset defer list while re-issue!!\n",
3448 cmd);
3449 }
3450 }
3451
3452 if (instance->aen_cmd) {
3453 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3454 megasas_return_cmd(instance, instance->aen_cmd);
3455
3456 instance->aen_cmd = NULL;
3457 }
3458
3459 /*
3460 * Initiate AEN (Asynchronous Event Notification)
3461 */
3462 seq_num = instance->last_seq_num;
3463 class_locale.members.reserved = 0;
3464 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3465 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3466
3467 megasas_register_aen(instance, seq_num, class_locale.word);
3468 }
3469
3470 /**
3471 * Move the internal reset pending commands to a deferred queue.
3472 *
3473 * We move the commands pending at internal reset time to a
3474 * pending queue. This queue would be flushed after successful
3475 * completion of the internal reset sequence. if the internal reset
3476 * did not complete in time, the kernel reset handler would flush
3477 * these commands.
3478 **/
3479 static void
megasas_internal_reset_defer_cmds(struct megasas_instance * instance)3480 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3481 {
3482 struct megasas_cmd *cmd;
3483 int i;
3484 u16 max_cmd = instance->max_fw_cmds;
3485 u32 defer_index;
3486 unsigned long flags;
3487
3488 defer_index = 0;
3489 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3490 for (i = 0; i < max_cmd; i++) {
3491 cmd = instance->cmd_list[i];
3492 if (cmd->sync_cmd == 1 || cmd->scmd) {
3493 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3494 "on the defer queue as internal\n",
3495 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3496
3497 if (!list_empty(&cmd->list)) {
3498 dev_notice(&instance->pdev->dev, "ERROR while"
3499 " moving this cmd:%p, %d %p, it was"
3500 "discovered on some list?\n",
3501 cmd, cmd->sync_cmd, cmd->scmd);
3502
3503 list_del_init(&cmd->list);
3504 }
3505 defer_index++;
3506 list_add_tail(&cmd->list,
3507 &instance->internal_reset_pending_q);
3508 }
3509 }
3510 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3511 }
3512
3513
3514 static void
process_fw_state_change_wq(struct work_struct * work)3515 process_fw_state_change_wq(struct work_struct *work)
3516 {
3517 struct megasas_instance *instance =
3518 container_of(work, struct megasas_instance, work_init);
3519 u32 wait;
3520 unsigned long flags;
3521
3522 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3523 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3524 atomic_read(&instance->adprecovery));
3525 return ;
3526 }
3527
3528 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3529 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3530 "state, restarting it...\n");
3531
3532 instance->instancet->disable_intr(instance);
3533 atomic_set(&instance->fw_outstanding, 0);
3534
3535 atomic_set(&instance->fw_reset_no_pci_access, 1);
3536 instance->instancet->adp_reset(instance, instance->reg_set);
3537 atomic_set(&instance->fw_reset_no_pci_access, 0);
3538
3539 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3540 "initiating next stage...\n");
3541
3542 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3543 "state 2 starting...\n");
3544
3545 /* waiting for about 20 second before start the second init */
3546 for (wait = 0; wait < 30; wait++) {
3547 msleep(1000);
3548 }
3549
3550 if (megasas_transition_to_ready(instance, 1)) {
3551 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3552
3553 atomic_set(&instance->fw_reset_no_pci_access, 1);
3554 megaraid_sas_kill_hba(instance);
3555 return ;
3556 }
3557
3558 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3559 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3560 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3561 ) {
3562 *instance->consumer = *instance->producer;
3563 } else {
3564 *instance->consumer = 0;
3565 *instance->producer = 0;
3566 }
3567
3568 megasas_issue_init_mfi(instance);
3569
3570 spin_lock_irqsave(&instance->hba_lock, flags);
3571 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3572 spin_unlock_irqrestore(&instance->hba_lock, flags);
3573 instance->instancet->enable_intr(instance);
3574
3575 megasas_issue_pending_cmds_again(instance);
3576 instance->issuepend_done = 1;
3577 }
3578 }
3579
3580 /**
3581 * megasas_deplete_reply_queue - Processes all completed commands
3582 * @instance: Adapter soft state
3583 * @alt_status: Alternate status to be returned to
3584 * SCSI mid-layer instead of the status
3585 * returned by the FW
3586 * Note: this must be called with hba lock held
3587 */
3588 static int
megasas_deplete_reply_queue(struct megasas_instance * instance,u8 alt_status)3589 megasas_deplete_reply_queue(struct megasas_instance *instance,
3590 u8 alt_status)
3591 {
3592 u32 mfiStatus;
3593 u32 fw_state;
3594
3595 if ((mfiStatus = instance->instancet->check_reset(instance,
3596 instance->reg_set)) == 1) {
3597 return IRQ_HANDLED;
3598 }
3599
3600 if ((mfiStatus = instance->instancet->clear_intr(
3601 instance->reg_set)
3602 ) == 0) {
3603 /* Hardware may not set outbound_intr_status in MSI-X mode */
3604 if (!instance->msix_vectors)
3605 return IRQ_NONE;
3606 }
3607
3608 instance->mfiStatus = mfiStatus;
3609
3610 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3611 fw_state = instance->instancet->read_fw_status_reg(
3612 instance->reg_set) & MFI_STATE_MASK;
3613
3614 if (fw_state != MFI_STATE_FAULT) {
3615 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3616 fw_state);
3617 }
3618
3619 if ((fw_state == MFI_STATE_FAULT) &&
3620 (instance->disableOnlineCtrlReset == 0)) {
3621 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3622
3623 if ((instance->pdev->device ==
3624 PCI_DEVICE_ID_LSI_SAS1064R) ||
3625 (instance->pdev->device ==
3626 PCI_DEVICE_ID_DELL_PERC5) ||
3627 (instance->pdev->device ==
3628 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3629
3630 *instance->consumer =
3631 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3632 }
3633
3634
3635 instance->instancet->disable_intr(instance);
3636 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3637 instance->issuepend_done = 0;
3638
3639 atomic_set(&instance->fw_outstanding, 0);
3640 megasas_internal_reset_defer_cmds(instance);
3641
3642 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3643 fw_state, atomic_read(&instance->adprecovery));
3644
3645 schedule_work(&instance->work_init);
3646 return IRQ_HANDLED;
3647
3648 } else {
3649 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3650 fw_state, instance->disableOnlineCtrlReset);
3651 }
3652 }
3653
3654 tasklet_schedule(&instance->isr_tasklet);
3655 return IRQ_HANDLED;
3656 }
3657 /**
3658 * megasas_isr - isr entry point
3659 */
megasas_isr(int irq,void * devp)3660 static irqreturn_t megasas_isr(int irq, void *devp)
3661 {
3662 struct megasas_irq_context *irq_context = devp;
3663 struct megasas_instance *instance = irq_context->instance;
3664 unsigned long flags;
3665 irqreturn_t rc;
3666
3667 if (atomic_read(&instance->fw_reset_no_pci_access))
3668 return IRQ_HANDLED;
3669
3670 spin_lock_irqsave(&instance->hba_lock, flags);
3671 rc = megasas_deplete_reply_queue(instance, DID_OK);
3672 spin_unlock_irqrestore(&instance->hba_lock, flags);
3673
3674 return rc;
3675 }
3676
3677 /**
3678 * megasas_transition_to_ready - Move the FW to READY state
3679 * @instance: Adapter soft state
3680 *
3681 * During the initialization, FW passes can potentially be in any one of
3682 * several possible states. If the FW in operational, waiting-for-handshake
3683 * states, driver must take steps to bring it to ready state. Otherwise, it
3684 * has to wait for the ready state.
3685 */
3686 int
megasas_transition_to_ready(struct megasas_instance * instance,int ocr)3687 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3688 {
3689 int i;
3690 u8 max_wait;
3691 u32 fw_state;
3692 u32 cur_state;
3693 u32 abs_state, curr_abs_state;
3694
3695 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3696 fw_state = abs_state & MFI_STATE_MASK;
3697
3698 if (fw_state != MFI_STATE_READY)
3699 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3700 " state\n");
3701
3702 while (fw_state != MFI_STATE_READY) {
3703
3704 switch (fw_state) {
3705
3706 case MFI_STATE_FAULT:
3707 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3708 if (ocr) {
3709 max_wait = MEGASAS_RESET_WAIT_TIME;
3710 cur_state = MFI_STATE_FAULT;
3711 break;
3712 } else
3713 return -ENODEV;
3714
3715 case MFI_STATE_WAIT_HANDSHAKE:
3716 /*
3717 * Set the CLR bit in inbound doorbell
3718 */
3719 if ((instance->pdev->device ==
3720 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3721 (instance->pdev->device ==
3722 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3723 (instance->adapter_type != MFI_SERIES))
3724 writel(
3725 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3726 &instance->reg_set->doorbell);
3727 else
3728 writel(
3729 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3730 &instance->reg_set->inbound_doorbell);
3731
3732 max_wait = MEGASAS_RESET_WAIT_TIME;
3733 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3734 break;
3735
3736 case MFI_STATE_BOOT_MESSAGE_PENDING:
3737 if ((instance->pdev->device ==
3738 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3739 (instance->pdev->device ==
3740 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3741 (instance->adapter_type != MFI_SERIES))
3742 writel(MFI_INIT_HOTPLUG,
3743 &instance->reg_set->doorbell);
3744 else
3745 writel(MFI_INIT_HOTPLUG,
3746 &instance->reg_set->inbound_doorbell);
3747
3748 max_wait = MEGASAS_RESET_WAIT_TIME;
3749 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3750 break;
3751
3752 case MFI_STATE_OPERATIONAL:
3753 /*
3754 * Bring it to READY state; assuming max wait 10 secs
3755 */
3756 instance->instancet->disable_intr(instance);
3757 if ((instance->pdev->device ==
3758 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3759 (instance->pdev->device ==
3760 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3761 (instance->adapter_type != MFI_SERIES)) {
3762 writel(MFI_RESET_FLAGS,
3763 &instance->reg_set->doorbell);
3764
3765 if (instance->adapter_type != MFI_SERIES) {
3766 for (i = 0; i < (10 * 1000); i += 20) {
3767 if (readl(
3768 &instance->
3769 reg_set->
3770 doorbell) & 1)
3771 msleep(20);
3772 else
3773 break;
3774 }
3775 }
3776 } else
3777 writel(MFI_RESET_FLAGS,
3778 &instance->reg_set->inbound_doorbell);
3779
3780 max_wait = MEGASAS_RESET_WAIT_TIME;
3781 cur_state = MFI_STATE_OPERATIONAL;
3782 break;
3783
3784 case MFI_STATE_UNDEFINED:
3785 /*
3786 * This state should not last for more than 2 seconds
3787 */
3788 max_wait = MEGASAS_RESET_WAIT_TIME;
3789 cur_state = MFI_STATE_UNDEFINED;
3790 break;
3791
3792 case MFI_STATE_BB_INIT:
3793 max_wait = MEGASAS_RESET_WAIT_TIME;
3794 cur_state = MFI_STATE_BB_INIT;
3795 break;
3796
3797 case MFI_STATE_FW_INIT:
3798 max_wait = MEGASAS_RESET_WAIT_TIME;
3799 cur_state = MFI_STATE_FW_INIT;
3800 break;
3801
3802 case MFI_STATE_FW_INIT_2:
3803 max_wait = MEGASAS_RESET_WAIT_TIME;
3804 cur_state = MFI_STATE_FW_INIT_2;
3805 break;
3806
3807 case MFI_STATE_DEVICE_SCAN:
3808 max_wait = MEGASAS_RESET_WAIT_TIME;
3809 cur_state = MFI_STATE_DEVICE_SCAN;
3810 break;
3811
3812 case MFI_STATE_FLUSH_CACHE:
3813 max_wait = MEGASAS_RESET_WAIT_TIME;
3814 cur_state = MFI_STATE_FLUSH_CACHE;
3815 break;
3816
3817 default:
3818 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3819 fw_state);
3820 return -ENODEV;
3821 }
3822
3823 /*
3824 * The cur_state should not last for more than max_wait secs
3825 */
3826 for (i = 0; i < max_wait * 50; i++) {
3827 curr_abs_state = instance->instancet->
3828 read_fw_status_reg(instance->reg_set);
3829
3830 if (abs_state == curr_abs_state) {
3831 msleep(20);
3832 } else
3833 break;
3834 }
3835
3836 /*
3837 * Return error if fw_state hasn't changed after max_wait
3838 */
3839 if (curr_abs_state == abs_state) {
3840 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3841 "in %d secs\n", fw_state, max_wait);
3842 return -ENODEV;
3843 }
3844
3845 abs_state = curr_abs_state;
3846 fw_state = curr_abs_state & MFI_STATE_MASK;
3847 }
3848 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3849
3850 return 0;
3851 }
3852
3853 /**
3854 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3855 * @instance: Adapter soft state
3856 */
megasas_teardown_frame_pool(struct megasas_instance * instance)3857 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3858 {
3859 int i;
3860 u16 max_cmd = instance->max_mfi_cmds;
3861 struct megasas_cmd *cmd;
3862
3863 if (!instance->frame_dma_pool)
3864 return;
3865
3866 /*
3867 * Return all frames to pool
3868 */
3869 for (i = 0; i < max_cmd; i++) {
3870
3871 cmd = instance->cmd_list[i];
3872
3873 if (cmd->frame)
3874 dma_pool_free(instance->frame_dma_pool, cmd->frame,
3875 cmd->frame_phys_addr);
3876
3877 if (cmd->sense)
3878 dma_pool_free(instance->sense_dma_pool, cmd->sense,
3879 cmd->sense_phys_addr);
3880 }
3881
3882 /*
3883 * Now destroy the pool itself
3884 */
3885 dma_pool_destroy(instance->frame_dma_pool);
3886 dma_pool_destroy(instance->sense_dma_pool);
3887
3888 instance->frame_dma_pool = NULL;
3889 instance->sense_dma_pool = NULL;
3890 }
3891
3892 /**
3893 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3894 * @instance: Adapter soft state
3895 *
3896 * Each command packet has an embedded DMA memory buffer that is used for
3897 * filling MFI frame and the SG list that immediately follows the frame. This
3898 * function creates those DMA memory buffers for each command packet by using
3899 * PCI pool facility.
3900 */
megasas_create_frame_pool(struct megasas_instance * instance)3901 static int megasas_create_frame_pool(struct megasas_instance *instance)
3902 {
3903 int i;
3904 u16 max_cmd;
3905 u32 sge_sz;
3906 u32 frame_count;
3907 struct megasas_cmd *cmd;
3908
3909 max_cmd = instance->max_mfi_cmds;
3910
3911 /*
3912 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3913 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3914 */
3915 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3916 sizeof(struct megasas_sge32);
3917
3918 if (instance->flag_ieee)
3919 sge_sz = sizeof(struct megasas_sge_skinny);
3920
3921 /*
3922 * For MFI controllers.
3923 * max_num_sge = 60
3924 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
3925 * Total 960 byte (15 MFI frame of 64 byte)
3926 *
3927 * Fusion adapter require only 3 extra frame.
3928 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3929 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3930 * Total 192 byte (3 MFI frame of 64 byte)
3931 */
3932 frame_count = (instance->adapter_type == MFI_SERIES) ?
3933 (15 + 1) : (3 + 1);
3934 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3935 /*
3936 * Use DMA pool facility provided by PCI layer
3937 */
3938 instance->frame_dma_pool = dma_pool_create("megasas frame pool",
3939 &instance->pdev->dev,
3940 instance->mfi_frame_size, 256, 0);
3941
3942 if (!instance->frame_dma_pool) {
3943 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3944 return -ENOMEM;
3945 }
3946
3947 instance->sense_dma_pool = dma_pool_create("megasas sense pool",
3948 &instance->pdev->dev, 128,
3949 4, 0);
3950
3951 if (!instance->sense_dma_pool) {
3952 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3953
3954 dma_pool_destroy(instance->frame_dma_pool);
3955 instance->frame_dma_pool = NULL;
3956
3957 return -ENOMEM;
3958 }
3959
3960 /*
3961 * Allocate and attach a frame to each of the commands in cmd_list.
3962 * By making cmd->index as the context instead of the &cmd, we can
3963 * always use 32bit context regardless of the architecture
3964 */
3965 for (i = 0; i < max_cmd; i++) {
3966
3967 cmd = instance->cmd_list[i];
3968
3969 cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
3970 GFP_KERNEL, &cmd->frame_phys_addr);
3971
3972 cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
3973 GFP_KERNEL, &cmd->sense_phys_addr);
3974
3975 /*
3976 * megasas_teardown_frame_pool() takes care of freeing
3977 * whatever has been allocated
3978 */
3979 if (!cmd->frame || !cmd->sense) {
3980 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
3981 megasas_teardown_frame_pool(instance);
3982 return -ENOMEM;
3983 }
3984
3985 memset(cmd->frame, 0, instance->mfi_frame_size);
3986 cmd->frame->io.context = cpu_to_le32(cmd->index);
3987 cmd->frame->io.pad_0 = 0;
3988 if ((instance->adapter_type == MFI_SERIES) && reset_devices)
3989 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3990 }
3991
3992 return 0;
3993 }
3994
3995 /**
3996 * megasas_free_cmds - Free all the cmds in the free cmd pool
3997 * @instance: Adapter soft state
3998 */
megasas_free_cmds(struct megasas_instance * instance)3999 void megasas_free_cmds(struct megasas_instance *instance)
4000 {
4001 int i;
4002
4003 /* First free the MFI frame pool */
4004 megasas_teardown_frame_pool(instance);
4005
4006 /* Free all the commands in the cmd_list */
4007 for (i = 0; i < instance->max_mfi_cmds; i++)
4008
4009 kfree(instance->cmd_list[i]);
4010
4011 /* Free the cmd_list buffer itself */
4012 kfree(instance->cmd_list);
4013 instance->cmd_list = NULL;
4014
4015 INIT_LIST_HEAD(&instance->cmd_pool);
4016 }
4017
4018 /**
4019 * megasas_alloc_cmds - Allocates the command packets
4020 * @instance: Adapter soft state
4021 *
4022 * Each command that is issued to the FW, whether IO commands from the OS or
4023 * internal commands like IOCTLs, are wrapped in local data structure called
4024 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4025 * the FW.
4026 *
4027 * Each frame has a 32-bit field called context (tag). This context is used
4028 * to get back the megasas_cmd from the frame when a frame gets completed in
4029 * the ISR. Typically the address of the megasas_cmd itself would be used as
4030 * the context. But we wanted to keep the differences between 32 and 64 bit
4031 * systems to the mininum. We always use 32 bit integers for the context. In
4032 * this driver, the 32 bit values are the indices into an array cmd_list.
4033 * This array is used only to look up the megasas_cmd given the context. The
4034 * free commands themselves are maintained in a linked list called cmd_pool.
4035 */
megasas_alloc_cmds(struct megasas_instance * instance)4036 int megasas_alloc_cmds(struct megasas_instance *instance)
4037 {
4038 int i;
4039 int j;
4040 u16 max_cmd;
4041 struct megasas_cmd *cmd;
4042 struct fusion_context *fusion;
4043
4044 fusion = instance->ctrl_context;
4045 max_cmd = instance->max_mfi_cmds;
4046
4047 /*
4048 * instance->cmd_list is an array of struct megasas_cmd pointers.
4049 * Allocate the dynamic array first and then allocate individual
4050 * commands.
4051 */
4052 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4053
4054 if (!instance->cmd_list) {
4055 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4056 return -ENOMEM;
4057 }
4058
4059 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4060
4061 for (i = 0; i < max_cmd; i++) {
4062 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4063 GFP_KERNEL);
4064
4065 if (!instance->cmd_list[i]) {
4066
4067 for (j = 0; j < i; j++)
4068 kfree(instance->cmd_list[j]);
4069
4070 kfree(instance->cmd_list);
4071 instance->cmd_list = NULL;
4072
4073 return -ENOMEM;
4074 }
4075 }
4076
4077 for (i = 0; i < max_cmd; i++) {
4078 cmd = instance->cmd_list[i];
4079 memset(cmd, 0, sizeof(struct megasas_cmd));
4080 cmd->index = i;
4081 cmd->scmd = NULL;
4082 cmd->instance = instance;
4083
4084 list_add_tail(&cmd->list, &instance->cmd_pool);
4085 }
4086
4087 /*
4088 * Create a frame pool and assign one frame to each cmd
4089 */
4090 if (megasas_create_frame_pool(instance)) {
4091 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4092 megasas_free_cmds(instance);
4093 return -ENOMEM;
4094 }
4095
4096 return 0;
4097 }
4098
4099 /*
4100 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4101 * @instance: Adapter soft state
4102 *
4103 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4104 * or FW is not under OCR.
4105 */
4106 inline int
dcmd_timeout_ocr_possible(struct megasas_instance * instance)4107 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4108
4109 if (instance->adapter_type == MFI_SERIES)
4110 return KILL_ADAPTER;
4111 else if (instance->unload ||
4112 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4113 &instance->reset_flags))
4114 return IGNORE_TIMEOUT;
4115 else
4116 return INITIATE_OCR;
4117 }
4118
4119 static void
megasas_get_pd_info(struct megasas_instance * instance,struct scsi_device * sdev)4120 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4121 {
4122 int ret;
4123 struct megasas_cmd *cmd;
4124 struct megasas_dcmd_frame *dcmd;
4125
4126 struct MR_PRIV_DEVICE *mr_device_priv_data;
4127 u16 device_id = 0;
4128
4129 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4130 cmd = megasas_get_cmd(instance);
4131
4132 if (!cmd) {
4133 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4134 return;
4135 }
4136
4137 dcmd = &cmd->frame->dcmd;
4138
4139 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4140 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4141
4142 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4143 dcmd->cmd = MFI_CMD_DCMD;
4144 dcmd->cmd_status = 0xFF;
4145 dcmd->sge_count = 1;
4146 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4147 dcmd->timeout = 0;
4148 dcmd->pad_0 = 0;
4149 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4150 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4151 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4152 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4153
4154 if ((instance->adapter_type != MFI_SERIES) &&
4155 !instance->mask_interrupts)
4156 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4157 else
4158 ret = megasas_issue_polled(instance, cmd);
4159
4160 switch (ret) {
4161 case DCMD_SUCCESS:
4162 mr_device_priv_data = sdev->hostdata;
4163 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4164 mr_device_priv_data->interface_type =
4165 instance->pd_info->state.ddf.pdType.intf;
4166 break;
4167
4168 case DCMD_TIMEOUT:
4169
4170 switch (dcmd_timeout_ocr_possible(instance)) {
4171 case INITIATE_OCR:
4172 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4173 megasas_reset_fusion(instance->host,
4174 MFI_IO_TIMEOUT_OCR);
4175 break;
4176 case KILL_ADAPTER:
4177 megaraid_sas_kill_hba(instance);
4178 break;
4179 case IGNORE_TIMEOUT:
4180 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4181 __func__, __LINE__);
4182 break;
4183 }
4184
4185 break;
4186 }
4187
4188 if (ret != DCMD_TIMEOUT)
4189 megasas_return_cmd(instance, cmd);
4190
4191 return;
4192 }
4193 /*
4194 * megasas_get_pd_list_info - Returns FW's pd_list structure
4195 * @instance: Adapter soft state
4196 * @pd_list: pd_list structure
4197 *
4198 * Issues an internal command (DCMD) to get the FW's controller PD
4199 * list structure. This information is mainly used to find out SYSTEM
4200 * supported by the FW.
4201 */
4202 static int
megasas_get_pd_list(struct megasas_instance * instance)4203 megasas_get_pd_list(struct megasas_instance *instance)
4204 {
4205 int ret = 0, pd_index = 0;
4206 struct megasas_cmd *cmd;
4207 struct megasas_dcmd_frame *dcmd;
4208 struct MR_PD_LIST *ci;
4209 struct MR_PD_ADDRESS *pd_addr;
4210 dma_addr_t ci_h = 0;
4211
4212 if (instance->pd_list_not_supported) {
4213 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4214 "not supported by firmware\n");
4215 return ret;
4216 }
4217
4218 cmd = megasas_get_cmd(instance);
4219
4220 if (!cmd) {
4221 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4222 return -ENOMEM;
4223 }
4224
4225 dcmd = &cmd->frame->dcmd;
4226
4227 ci = pci_alloc_consistent(instance->pdev,
4228 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4229
4230 if (!ci) {
4231 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4232 megasas_return_cmd(instance, cmd);
4233 return -ENOMEM;
4234 }
4235
4236 memset(ci, 0, sizeof(*ci));
4237 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4238
4239 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4240 dcmd->mbox.b[1] = 0;
4241 dcmd->cmd = MFI_CMD_DCMD;
4242 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4243 dcmd->sge_count = 1;
4244 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4245 dcmd->timeout = 0;
4246 dcmd->pad_0 = 0;
4247 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4248 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4249 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4250 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4251
4252 if ((instance->adapter_type != MFI_SERIES) &&
4253 !instance->mask_interrupts)
4254 ret = megasas_issue_blocked_cmd(instance, cmd,
4255 MFI_IO_TIMEOUT_SECS);
4256 else
4257 ret = megasas_issue_polled(instance, cmd);
4258
4259 switch (ret) {
4260 case DCMD_FAILED:
4261 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4262 "failed/not supported by firmware\n");
4263
4264 if (instance->adapter_type != MFI_SERIES)
4265 megaraid_sas_kill_hba(instance);
4266 else
4267 instance->pd_list_not_supported = 1;
4268 break;
4269 case DCMD_TIMEOUT:
4270
4271 switch (dcmd_timeout_ocr_possible(instance)) {
4272 case INITIATE_OCR:
4273 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4274 /*
4275 * DCMD failed from AEN path.
4276 * AEN path already hold reset_mutex to avoid PCI access
4277 * while OCR is in progress.
4278 */
4279 mutex_unlock(&instance->reset_mutex);
4280 megasas_reset_fusion(instance->host,
4281 MFI_IO_TIMEOUT_OCR);
4282 mutex_lock(&instance->reset_mutex);
4283 break;
4284 case KILL_ADAPTER:
4285 megaraid_sas_kill_hba(instance);
4286 break;
4287 case IGNORE_TIMEOUT:
4288 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4289 __func__, __LINE__);
4290 break;
4291 }
4292
4293 break;
4294
4295 case DCMD_SUCCESS:
4296 pd_addr = ci->addr;
4297
4298 if ((le32_to_cpu(ci->count) >
4299 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4300 break;
4301
4302 memset(instance->local_pd_list, 0,
4303 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4304
4305 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4306 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4307 le16_to_cpu(pd_addr->deviceId);
4308 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4309 pd_addr->scsiDevType;
4310 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4311 MR_PD_STATE_SYSTEM;
4312 pd_addr++;
4313 }
4314
4315 memcpy(instance->pd_list, instance->local_pd_list,
4316 sizeof(instance->pd_list));
4317 break;
4318
4319 }
4320
4321 pci_free_consistent(instance->pdev,
4322 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4323 ci, ci_h);
4324
4325 if (ret != DCMD_TIMEOUT)
4326 megasas_return_cmd(instance, cmd);
4327
4328 return ret;
4329 }
4330
4331 /*
4332 * megasas_get_ld_list_info - Returns FW's ld_list structure
4333 * @instance: Adapter soft state
4334 * @ld_list: ld_list structure
4335 *
4336 * Issues an internal command (DCMD) to get the FW's controller PD
4337 * list structure. This information is mainly used to find out SYSTEM
4338 * supported by the FW.
4339 */
4340 static int
megasas_get_ld_list(struct megasas_instance * instance)4341 megasas_get_ld_list(struct megasas_instance *instance)
4342 {
4343 int ret = 0, ld_index = 0, ids = 0;
4344 struct megasas_cmd *cmd;
4345 struct megasas_dcmd_frame *dcmd;
4346 struct MR_LD_LIST *ci;
4347 dma_addr_t ci_h = 0;
4348 u32 ld_count;
4349
4350 cmd = megasas_get_cmd(instance);
4351
4352 if (!cmd) {
4353 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4354 return -ENOMEM;
4355 }
4356
4357 dcmd = &cmd->frame->dcmd;
4358
4359 ci = pci_alloc_consistent(instance->pdev,
4360 sizeof(struct MR_LD_LIST),
4361 &ci_h);
4362
4363 if (!ci) {
4364 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4365 megasas_return_cmd(instance, cmd);
4366 return -ENOMEM;
4367 }
4368
4369 memset(ci, 0, sizeof(*ci));
4370 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4371
4372 if (instance->supportmax256vd)
4373 dcmd->mbox.b[0] = 1;
4374 dcmd->cmd = MFI_CMD_DCMD;
4375 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4376 dcmd->sge_count = 1;
4377 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4378 dcmd->timeout = 0;
4379 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4380 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4381 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4382 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4383 dcmd->pad_0 = 0;
4384
4385 if ((instance->adapter_type != MFI_SERIES) &&
4386 !instance->mask_interrupts)
4387 ret = megasas_issue_blocked_cmd(instance, cmd,
4388 MFI_IO_TIMEOUT_SECS);
4389 else
4390 ret = megasas_issue_polled(instance, cmd);
4391
4392 ld_count = le32_to_cpu(ci->ldCount);
4393
4394 switch (ret) {
4395 case DCMD_FAILED:
4396 megaraid_sas_kill_hba(instance);
4397 break;
4398 case DCMD_TIMEOUT:
4399
4400 switch (dcmd_timeout_ocr_possible(instance)) {
4401 case INITIATE_OCR:
4402 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4403 /*
4404 * DCMD failed from AEN path.
4405 * AEN path already hold reset_mutex to avoid PCI access
4406 * while OCR is in progress.
4407 */
4408 mutex_unlock(&instance->reset_mutex);
4409 megasas_reset_fusion(instance->host,
4410 MFI_IO_TIMEOUT_OCR);
4411 mutex_lock(&instance->reset_mutex);
4412 break;
4413 case KILL_ADAPTER:
4414 megaraid_sas_kill_hba(instance);
4415 break;
4416 case IGNORE_TIMEOUT:
4417 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4418 __func__, __LINE__);
4419 break;
4420 }
4421
4422 break;
4423
4424 case DCMD_SUCCESS:
4425 if (ld_count > instance->fw_supported_vd_count)
4426 break;
4427
4428 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4429
4430 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4431 if (ci->ldList[ld_index].state != 0) {
4432 ids = ci->ldList[ld_index].ref.targetId;
4433 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4434 }
4435 }
4436
4437 break;
4438 }
4439
4440 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4441
4442 if (ret != DCMD_TIMEOUT)
4443 megasas_return_cmd(instance, cmd);
4444
4445 return ret;
4446 }
4447
4448 /**
4449 * megasas_ld_list_query - Returns FW's ld_list structure
4450 * @instance: Adapter soft state
4451 * @ld_list: ld_list structure
4452 *
4453 * Issues an internal command (DCMD) to get the FW's controller PD
4454 * list structure. This information is mainly used to find out SYSTEM
4455 * supported by the FW.
4456 */
4457 static int
megasas_ld_list_query(struct megasas_instance * instance,u8 query_type)4458 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4459 {
4460 int ret = 0, ld_index = 0, ids = 0;
4461 struct megasas_cmd *cmd;
4462 struct megasas_dcmd_frame *dcmd;
4463 struct MR_LD_TARGETID_LIST *ci;
4464 dma_addr_t ci_h = 0;
4465 u32 tgtid_count;
4466
4467 cmd = megasas_get_cmd(instance);
4468
4469 if (!cmd) {
4470 dev_warn(&instance->pdev->dev,
4471 "megasas_ld_list_query: Failed to get cmd\n");
4472 return -ENOMEM;
4473 }
4474
4475 dcmd = &cmd->frame->dcmd;
4476
4477 ci = pci_alloc_consistent(instance->pdev,
4478 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4479
4480 if (!ci) {
4481 dev_warn(&instance->pdev->dev,
4482 "Failed to alloc mem for ld_list_query\n");
4483 megasas_return_cmd(instance, cmd);
4484 return -ENOMEM;
4485 }
4486
4487 memset(ci, 0, sizeof(*ci));
4488 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4489
4490 dcmd->mbox.b[0] = query_type;
4491 if (instance->supportmax256vd)
4492 dcmd->mbox.b[2] = 1;
4493
4494 dcmd->cmd = MFI_CMD_DCMD;
4495 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4496 dcmd->sge_count = 1;
4497 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4498 dcmd->timeout = 0;
4499 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4500 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4501 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4502 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4503 dcmd->pad_0 = 0;
4504
4505 if ((instance->adapter_type != MFI_SERIES) &&
4506 !instance->mask_interrupts)
4507 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4508 else
4509 ret = megasas_issue_polled(instance, cmd);
4510
4511 switch (ret) {
4512 case DCMD_FAILED:
4513 dev_info(&instance->pdev->dev,
4514 "DCMD not supported by firmware - %s %d\n",
4515 __func__, __LINE__);
4516 ret = megasas_get_ld_list(instance);
4517 break;
4518 case DCMD_TIMEOUT:
4519 switch (dcmd_timeout_ocr_possible(instance)) {
4520 case INITIATE_OCR:
4521 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4522 /*
4523 * DCMD failed from AEN path.
4524 * AEN path already hold reset_mutex to avoid PCI access
4525 * while OCR is in progress.
4526 */
4527 mutex_unlock(&instance->reset_mutex);
4528 megasas_reset_fusion(instance->host,
4529 MFI_IO_TIMEOUT_OCR);
4530 mutex_lock(&instance->reset_mutex);
4531 break;
4532 case KILL_ADAPTER:
4533 megaraid_sas_kill_hba(instance);
4534 break;
4535 case IGNORE_TIMEOUT:
4536 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4537 __func__, __LINE__);
4538 break;
4539 }
4540
4541 break;
4542 case DCMD_SUCCESS:
4543 tgtid_count = le32_to_cpu(ci->count);
4544
4545 if ((tgtid_count > (instance->fw_supported_vd_count)))
4546 break;
4547
4548 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4549 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4550 ids = ci->targetId[ld_index];
4551 instance->ld_ids[ids] = ci->targetId[ld_index];
4552 }
4553
4554 break;
4555 }
4556
4557 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4558 ci, ci_h);
4559
4560 if (ret != DCMD_TIMEOUT)
4561 megasas_return_cmd(instance, cmd);
4562
4563 return ret;
4564 }
4565
4566 /*
4567 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4568 * instance : Controller's instance
4569 */
megasas_update_ext_vd_details(struct megasas_instance * instance)4570 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4571 {
4572 struct fusion_context *fusion;
4573 u32 ventura_map_sz = 0;
4574
4575 fusion = instance->ctrl_context;
4576 /* For MFI based controllers return dummy success */
4577 if (!fusion)
4578 return;
4579
4580 instance->supportmax256vd =
4581 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4582 /* Below is additional check to address future FW enhancement */
4583 if (instance->ctrl_info->max_lds > 64)
4584 instance->supportmax256vd = 1;
4585
4586 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4587 * MEGASAS_MAX_DEV_PER_CHANNEL;
4588 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4589 * MEGASAS_MAX_DEV_PER_CHANNEL;
4590 if (instance->supportmax256vd) {
4591 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4592 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4593 } else {
4594 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4595 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4596 }
4597
4598 dev_info(&instance->pdev->dev,
4599 "firmware type\t: %s\n",
4600 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4601 "Legacy(64 VD) firmware");
4602
4603 if (instance->max_raid_mapsize) {
4604 ventura_map_sz = instance->max_raid_mapsize *
4605 MR_MIN_MAP_SIZE; /* 64k */
4606 fusion->current_map_sz = ventura_map_sz;
4607 fusion->max_map_sz = ventura_map_sz;
4608 } else {
4609 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4610 (sizeof(struct MR_LD_SPAN_MAP) *
4611 (instance->fw_supported_vd_count - 1));
4612 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4613
4614 fusion->max_map_sz =
4615 max(fusion->old_map_sz, fusion->new_map_sz);
4616
4617 if (instance->supportmax256vd)
4618 fusion->current_map_sz = fusion->new_map_sz;
4619 else
4620 fusion->current_map_sz = fusion->old_map_sz;
4621 }
4622 /* irrespective of FW raid maps, driver raid map is constant */
4623 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4624 }
4625
4626 /**
4627 * megasas_get_controller_info - Returns FW's controller structure
4628 * @instance: Adapter soft state
4629 *
4630 * Issues an internal command (DCMD) to get the FW's controller structure.
4631 * This information is mainly used to find out the maximum IO transfer per
4632 * command supported by the FW.
4633 */
4634 int
megasas_get_ctrl_info(struct megasas_instance * instance)4635 megasas_get_ctrl_info(struct megasas_instance *instance)
4636 {
4637 int ret = 0;
4638 struct megasas_cmd *cmd;
4639 struct megasas_dcmd_frame *dcmd;
4640 struct megasas_ctrl_info *ci;
4641 struct megasas_ctrl_info *ctrl_info;
4642 dma_addr_t ci_h = 0;
4643
4644 ctrl_info = instance->ctrl_info;
4645
4646 cmd = megasas_get_cmd(instance);
4647
4648 if (!cmd) {
4649 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4650 return -ENOMEM;
4651 }
4652
4653 dcmd = &cmd->frame->dcmd;
4654
4655 ci = pci_alloc_consistent(instance->pdev,
4656 sizeof(struct megasas_ctrl_info), &ci_h);
4657
4658 if (!ci) {
4659 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4660 megasas_return_cmd(instance, cmd);
4661 return -ENOMEM;
4662 }
4663
4664 memset(ci, 0, sizeof(*ci));
4665 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4666
4667 dcmd->cmd = MFI_CMD_DCMD;
4668 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4669 dcmd->sge_count = 1;
4670 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4671 dcmd->timeout = 0;
4672 dcmd->pad_0 = 0;
4673 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4674 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4675 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4676 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4677 dcmd->mbox.b[0] = 1;
4678
4679 if ((instance->adapter_type != MFI_SERIES) &&
4680 !instance->mask_interrupts)
4681 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4682 else
4683 ret = megasas_issue_polled(instance, cmd);
4684
4685 switch (ret) {
4686 case DCMD_SUCCESS:
4687 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4688 /* Save required controller information in
4689 * CPU endianness format.
4690 */
4691 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4692 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4693 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4694 le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
4695
4696 /* Update the latest Ext VD info.
4697 * From Init path, store current firmware details.
4698 * From OCR path, detect any firmware properties changes.
4699 * in case of Firmware upgrade without system reboot.
4700 */
4701 megasas_update_ext_vd_details(instance);
4702 instance->use_seqnum_jbod_fp =
4703 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4704 instance->support_morethan256jbod =
4705 ctrl_info->adapter_operations4.support_pd_map_target_id;
4706
4707 /*Check whether controller is iMR or MR */
4708 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4709 dev_info(&instance->pdev->dev,
4710 "controller type\t: %s(%dMB)\n",
4711 instance->is_imr ? "iMR" : "MR",
4712 le16_to_cpu(ctrl_info->memory_size));
4713
4714 instance->disableOnlineCtrlReset =
4715 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4716 instance->secure_jbod_support =
4717 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4718 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4719 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4720 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4721 instance->secure_jbod_support ? "Yes" : "No");
4722 break;
4723
4724 case DCMD_TIMEOUT:
4725 switch (dcmd_timeout_ocr_possible(instance)) {
4726 case INITIATE_OCR:
4727 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4728 megasas_reset_fusion(instance->host,
4729 MFI_IO_TIMEOUT_OCR);
4730 break;
4731 case KILL_ADAPTER:
4732 megaraid_sas_kill_hba(instance);
4733 break;
4734 case IGNORE_TIMEOUT:
4735 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4736 __func__, __LINE__);
4737 break;
4738 }
4739 case DCMD_FAILED:
4740 megaraid_sas_kill_hba(instance);
4741 break;
4742
4743 }
4744
4745 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4746 ci, ci_h);
4747
4748 megasas_return_cmd(instance, cmd);
4749
4750
4751 return ret;
4752 }
4753
4754 /*
4755 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4756 * to firmware
4757 *
4758 * @instance: Adapter soft state
4759 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4760 MR_CRASH_BUF_TURN_OFF = 0
4761 MR_CRASH_BUF_TURN_ON = 1
4762 * @return 0 on success non-zero on failure.
4763 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4764 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4765 * that driver supports crash dump feature. This DCMD will be sent only if
4766 * crash dump feature is supported by the FW.
4767 *
4768 */
megasas_set_crash_dump_params(struct megasas_instance * instance,u8 crash_buf_state)4769 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4770 u8 crash_buf_state)
4771 {
4772 int ret = 0;
4773 struct megasas_cmd *cmd;
4774 struct megasas_dcmd_frame *dcmd;
4775
4776 cmd = megasas_get_cmd(instance);
4777
4778 if (!cmd) {
4779 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4780 return -ENOMEM;
4781 }
4782
4783
4784 dcmd = &cmd->frame->dcmd;
4785
4786 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4787 dcmd->mbox.b[0] = crash_buf_state;
4788 dcmd->cmd = MFI_CMD_DCMD;
4789 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4790 dcmd->sge_count = 1;
4791 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4792 dcmd->timeout = 0;
4793 dcmd->pad_0 = 0;
4794 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4795 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4796 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4797 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4798
4799 if ((instance->adapter_type != MFI_SERIES) &&
4800 !instance->mask_interrupts)
4801 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4802 else
4803 ret = megasas_issue_polled(instance, cmd);
4804
4805 if (ret == DCMD_TIMEOUT) {
4806 switch (dcmd_timeout_ocr_possible(instance)) {
4807 case INITIATE_OCR:
4808 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4809 megasas_reset_fusion(instance->host,
4810 MFI_IO_TIMEOUT_OCR);
4811 break;
4812 case KILL_ADAPTER:
4813 megaraid_sas_kill_hba(instance);
4814 break;
4815 case IGNORE_TIMEOUT:
4816 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4817 __func__, __LINE__);
4818 break;
4819 }
4820 } else
4821 megasas_return_cmd(instance, cmd);
4822
4823 return ret;
4824 }
4825
4826 /**
4827 * megasas_issue_init_mfi - Initializes the FW
4828 * @instance: Adapter soft state
4829 *
4830 * Issues the INIT MFI cmd
4831 */
4832 static int
megasas_issue_init_mfi(struct megasas_instance * instance)4833 megasas_issue_init_mfi(struct megasas_instance *instance)
4834 {
4835 __le32 context;
4836 struct megasas_cmd *cmd;
4837 struct megasas_init_frame *init_frame;
4838 struct megasas_init_queue_info *initq_info;
4839 dma_addr_t init_frame_h;
4840 dma_addr_t initq_info_h;
4841
4842 /*
4843 * Prepare a init frame. Note the init frame points to queue info
4844 * structure. Each frame has SGL allocated after first 64 bytes. For
4845 * this frame - since we don't need any SGL - we use SGL's space as
4846 * queue info structure
4847 *
4848 * We will not get a NULL command below. We just created the pool.
4849 */
4850 cmd = megasas_get_cmd(instance);
4851
4852 init_frame = (struct megasas_init_frame *)cmd->frame;
4853 initq_info = (struct megasas_init_queue_info *)
4854 ((unsigned long)init_frame + 64);
4855
4856 init_frame_h = cmd->frame_phys_addr;
4857 initq_info_h = init_frame_h + 64;
4858
4859 context = init_frame->context;
4860 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4861 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4862 init_frame->context = context;
4863
4864 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4865 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4866
4867 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4868 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4869
4870 init_frame->cmd = MFI_CMD_INIT;
4871 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4872 init_frame->queue_info_new_phys_addr_lo =
4873 cpu_to_le32(lower_32_bits(initq_info_h));
4874 init_frame->queue_info_new_phys_addr_hi =
4875 cpu_to_le32(upper_32_bits(initq_info_h));
4876
4877 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4878
4879 /*
4880 * disable the intr before firing the init frame to FW
4881 */
4882 instance->instancet->disable_intr(instance);
4883
4884 /*
4885 * Issue the init frame in polled mode
4886 */
4887
4888 if (megasas_issue_polled(instance, cmd)) {
4889 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4890 megasas_return_cmd(instance, cmd);
4891 goto fail_fw_init;
4892 }
4893
4894 megasas_return_cmd(instance, cmd);
4895
4896 return 0;
4897
4898 fail_fw_init:
4899 return -EINVAL;
4900 }
4901
4902 static u32
megasas_init_adapter_mfi(struct megasas_instance * instance)4903 megasas_init_adapter_mfi(struct megasas_instance *instance)
4904 {
4905 struct megasas_register_set __iomem *reg_set;
4906 u32 context_sz;
4907 u32 reply_q_sz;
4908
4909 reg_set = instance->reg_set;
4910
4911 /*
4912 * Get various operational parameters from status register
4913 */
4914 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4915 /*
4916 * Reduce the max supported cmds by 1. This is to ensure that the
4917 * reply_q_sz (1 more than the max cmd that driver may send)
4918 * does not exceed max cmds that the FW can support
4919 */
4920 instance->max_fw_cmds = instance->max_fw_cmds-1;
4921 instance->max_mfi_cmds = instance->max_fw_cmds;
4922 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4923 0x10;
4924 /*
4925 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4926 * are reserved for IOCTL + driver's internal DCMDs.
4927 */
4928 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4929 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4930 instance->max_scsi_cmds = (instance->max_fw_cmds -
4931 MEGASAS_SKINNY_INT_CMDS);
4932 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4933 } else {
4934 instance->max_scsi_cmds = (instance->max_fw_cmds -
4935 MEGASAS_INT_CMDS);
4936 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4937 }
4938
4939 instance->cur_can_queue = instance->max_scsi_cmds;
4940 /*
4941 * Create a pool of commands
4942 */
4943 if (megasas_alloc_cmds(instance))
4944 goto fail_alloc_cmds;
4945
4946 /*
4947 * Allocate memory for reply queue. Length of reply queue should
4948 * be _one_ more than the maximum commands handled by the firmware.
4949 *
4950 * Note: When FW completes commands, it places corresponding contex
4951 * values in this circular reply queue. This circular queue is a fairly
4952 * typical producer-consumer queue. FW is the producer (of completed
4953 * commands) and the driver is the consumer.
4954 */
4955 context_sz = sizeof(u32);
4956 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4957
4958 instance->reply_queue = pci_alloc_consistent(instance->pdev,
4959 reply_q_sz,
4960 &instance->reply_queue_h);
4961
4962 if (!instance->reply_queue) {
4963 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4964 goto fail_reply_queue;
4965 }
4966
4967 if (megasas_issue_init_mfi(instance))
4968 goto fail_fw_init;
4969
4970 if (megasas_get_ctrl_info(instance)) {
4971 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4972 "Fail from %s %d\n", instance->unique_id,
4973 __func__, __LINE__);
4974 goto fail_fw_init;
4975 }
4976
4977 instance->fw_support_ieee = 0;
4978 instance->fw_support_ieee =
4979 (instance->instancet->read_fw_status_reg(reg_set) &
4980 0x04000000);
4981
4982 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4983 instance->fw_support_ieee);
4984
4985 if (instance->fw_support_ieee)
4986 instance->flag_ieee = 1;
4987
4988 return 0;
4989
4990 fail_fw_init:
4991
4992 pci_free_consistent(instance->pdev, reply_q_sz,
4993 instance->reply_queue, instance->reply_queue_h);
4994 fail_reply_queue:
4995 megasas_free_cmds(instance);
4996
4997 fail_alloc_cmds:
4998 return 1;
4999 }
5000
5001 /*
5002 * megasas_setup_irqs_ioapic - register legacy interrupts.
5003 * @instance: Adapter soft state
5004 *
5005 * Do not enable interrupt, only setup ISRs.
5006 *
5007 * Return 0 on success.
5008 */
5009 static int
megasas_setup_irqs_ioapic(struct megasas_instance * instance)5010 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5011 {
5012 struct pci_dev *pdev;
5013
5014 pdev = instance->pdev;
5015 instance->irq_context[0].instance = instance;
5016 instance->irq_context[0].MSIxIndex = 0;
5017 if (request_irq(pci_irq_vector(pdev, 0),
5018 instance->instancet->service_isr, IRQF_SHARED,
5019 "megasas", &instance->irq_context[0])) {
5020 dev_err(&instance->pdev->dev,
5021 "Failed to register IRQ from %s %d\n",
5022 __func__, __LINE__);
5023 return -1;
5024 }
5025 return 0;
5026 }
5027
5028 /**
5029 * megasas_setup_irqs_msix - register MSI-x interrupts.
5030 * @instance: Adapter soft state
5031 * @is_probe: Driver probe check
5032 *
5033 * Do not enable interrupt, only setup ISRs.
5034 *
5035 * Return 0 on success.
5036 */
5037 static int
megasas_setup_irqs_msix(struct megasas_instance * instance,u8 is_probe)5038 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5039 {
5040 int i, j;
5041 struct pci_dev *pdev;
5042
5043 pdev = instance->pdev;
5044
5045 /* Try MSI-x */
5046 for (i = 0; i < instance->msix_vectors; i++) {
5047 instance->irq_context[i].instance = instance;
5048 instance->irq_context[i].MSIxIndex = i;
5049 if (request_irq(pci_irq_vector(pdev, i),
5050 instance->instancet->service_isr, 0, "megasas",
5051 &instance->irq_context[i])) {
5052 dev_err(&instance->pdev->dev,
5053 "Failed to register IRQ for vector %d.\n", i);
5054 for (j = 0; j < i; j++)
5055 free_irq(pci_irq_vector(pdev, j),
5056 &instance->irq_context[j]);
5057 /* Retry irq register for IO_APIC*/
5058 instance->msix_vectors = 0;
5059 if (is_probe) {
5060 pci_free_irq_vectors(instance->pdev);
5061 return megasas_setup_irqs_ioapic(instance);
5062 } else {
5063 return -1;
5064 }
5065 }
5066 }
5067 return 0;
5068 }
5069
5070 /*
5071 * megasas_destroy_irqs- unregister interrupts.
5072 * @instance: Adapter soft state
5073 * return: void
5074 */
5075 static void
megasas_destroy_irqs(struct megasas_instance * instance)5076 megasas_destroy_irqs(struct megasas_instance *instance) {
5077
5078 int i;
5079
5080 if (instance->msix_vectors)
5081 for (i = 0; i < instance->msix_vectors; i++) {
5082 free_irq(pci_irq_vector(instance->pdev, i),
5083 &instance->irq_context[i]);
5084 }
5085 else
5086 free_irq(pci_irq_vector(instance->pdev, 0),
5087 &instance->irq_context[0]);
5088 }
5089
5090 /**
5091 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5092 * @instance: Adapter soft state
5093 * @is_probe: Driver probe check
5094 *
5095 * Return 0 on success.
5096 */
5097 void
megasas_setup_jbod_map(struct megasas_instance * instance)5098 megasas_setup_jbod_map(struct megasas_instance *instance)
5099 {
5100 int i;
5101 struct fusion_context *fusion = instance->ctrl_context;
5102 u32 pd_seq_map_sz;
5103
5104 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5105 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5106
5107 if (reset_devices || !fusion ||
5108 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
5109 dev_info(&instance->pdev->dev,
5110 "Jbod map is not supported %s %d\n",
5111 __func__, __LINE__);
5112 instance->use_seqnum_jbod_fp = false;
5113 return;
5114 }
5115
5116 if (fusion->pd_seq_sync[0])
5117 goto skip_alloc;
5118
5119 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5120 fusion->pd_seq_sync[i] = dma_alloc_coherent
5121 (&instance->pdev->dev, pd_seq_map_sz,
5122 &fusion->pd_seq_phys[i], GFP_KERNEL);
5123 if (!fusion->pd_seq_sync[i]) {
5124 dev_err(&instance->pdev->dev,
5125 "Failed to allocate memory from %s %d\n",
5126 __func__, __LINE__);
5127 if (i == 1) {
5128 dma_free_coherent(&instance->pdev->dev,
5129 pd_seq_map_sz, fusion->pd_seq_sync[0],
5130 fusion->pd_seq_phys[0]);
5131 fusion->pd_seq_sync[0] = NULL;
5132 }
5133 instance->use_seqnum_jbod_fp = false;
5134 return;
5135 }
5136 }
5137
5138 skip_alloc:
5139 if (!megasas_sync_pd_seq_num(instance, false) &&
5140 !megasas_sync_pd_seq_num(instance, true))
5141 instance->use_seqnum_jbod_fp = true;
5142 else
5143 instance->use_seqnum_jbod_fp = false;
5144 }
5145
megasas_setup_reply_map(struct megasas_instance * instance)5146 static void megasas_setup_reply_map(struct megasas_instance *instance)
5147 {
5148 const struct cpumask *mask;
5149 unsigned int queue, cpu;
5150
5151 for (queue = 0; queue < instance->msix_vectors; queue++) {
5152 mask = pci_irq_get_affinity(instance->pdev, queue);
5153 if (!mask)
5154 goto fallback;
5155
5156 for_each_cpu(cpu, mask)
5157 instance->reply_map[cpu] = queue;
5158 }
5159 return;
5160
5161 fallback:
5162 for_each_possible_cpu(cpu)
5163 instance->reply_map[cpu] = cpu % instance->msix_vectors;
5164 }
5165
5166 /**
5167 * megasas_init_fw - Initializes the FW
5168 * @instance: Adapter soft state
5169 *
5170 * This is the main function for initializing firmware
5171 */
5172
megasas_init_fw(struct megasas_instance * instance)5173 static int megasas_init_fw(struct megasas_instance *instance)
5174 {
5175 u32 max_sectors_1;
5176 u32 max_sectors_2, tmp_sectors, msix_enable;
5177 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5178 resource_size_t base_addr;
5179 struct megasas_register_set __iomem *reg_set;
5180 struct megasas_ctrl_info *ctrl_info = NULL;
5181 unsigned long bar_list;
5182 int i, j, loop, fw_msix_count = 0;
5183 struct IOV_111 *iovPtr;
5184 struct fusion_context *fusion;
5185
5186 fusion = instance->ctrl_context;
5187
5188 /* Find first memory bar */
5189 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5190 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5191 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5192 "megasas: LSI")) {
5193 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5194 return -EBUSY;
5195 }
5196
5197 base_addr = pci_resource_start(instance->pdev, instance->bar);
5198 instance->reg_set = ioremap_nocache(base_addr, 8192);
5199
5200 if (!instance->reg_set) {
5201 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5202 goto fail_ioremap;
5203 }
5204
5205 reg_set = instance->reg_set;
5206
5207 if (instance->adapter_type != MFI_SERIES)
5208 instance->instancet = &megasas_instance_template_fusion;
5209 else {
5210 switch (instance->pdev->device) {
5211 case PCI_DEVICE_ID_LSI_SAS1078R:
5212 case PCI_DEVICE_ID_LSI_SAS1078DE:
5213 instance->instancet = &megasas_instance_template_ppc;
5214 break;
5215 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5216 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5217 instance->instancet = &megasas_instance_template_gen2;
5218 break;
5219 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5220 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5221 instance->instancet = &megasas_instance_template_skinny;
5222 break;
5223 case PCI_DEVICE_ID_LSI_SAS1064R:
5224 case PCI_DEVICE_ID_DELL_PERC5:
5225 default:
5226 instance->instancet = &megasas_instance_template_xscale;
5227 instance->pd_list_not_supported = 1;
5228 break;
5229 }
5230 }
5231
5232 if (megasas_transition_to_ready(instance, 0)) {
5233 atomic_set(&instance->fw_reset_no_pci_access, 1);
5234 instance->instancet->adp_reset
5235 (instance, instance->reg_set);
5236 atomic_set(&instance->fw_reset_no_pci_access, 0);
5237 dev_info(&instance->pdev->dev,
5238 "FW restarted successfully from %s!\n",
5239 __func__);
5240
5241 /*waitting for about 30 second before retry*/
5242 ssleep(30);
5243
5244 if (megasas_transition_to_ready(instance, 0))
5245 goto fail_ready_state;
5246 }
5247
5248 if (instance->adapter_type == VENTURA_SERIES) {
5249 scratch_pad_3 =
5250 readl(&instance->reg_set->outbound_scratch_pad_3);
5251 instance->max_raid_mapsize = ((scratch_pad_3 >>
5252 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5253 MR_MAX_RAID_MAP_SIZE_MASK);
5254 }
5255
5256 /* Check if MSI-X is supported while in ready state */
5257 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5258 0x4000000) >> 0x1a;
5259 if (msix_enable && !msix_disable) {
5260 int irq_flags = PCI_IRQ_MSIX;
5261
5262 scratch_pad_2 = readl
5263 (&instance->reg_set->outbound_scratch_pad_2);
5264 /* Check max MSI-X vectors */
5265 if (fusion) {
5266 if (instance->adapter_type == THUNDERBOLT_SERIES) {
5267 /* Thunderbolt Series*/
5268 instance->msix_vectors = (scratch_pad_2
5269 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5270 fw_msix_count = instance->msix_vectors;
5271 } else { /* Invader series supports more than 8 MSI-x vectors*/
5272 instance->msix_vectors = ((scratch_pad_2
5273 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5274 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5275 if (instance->msix_vectors > 16)
5276 instance->msix_combined = true;
5277
5278 if (rdpq_enable)
5279 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5280 1 : 0;
5281 fw_msix_count = instance->msix_vectors;
5282 /* Save 1-15 reply post index address to local memory
5283 * Index 0 is already saved from reg offset
5284 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5285 */
5286 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5287 instance->reply_post_host_index_addr[loop] =
5288 (u32 __iomem *)
5289 ((u8 __iomem *)instance->reg_set +
5290 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5291 + (loop * 0x10));
5292 }
5293 }
5294 if (msix_vectors)
5295 instance->msix_vectors = min(msix_vectors,
5296 instance->msix_vectors);
5297 } else /* MFI adapters */
5298 instance->msix_vectors = 1;
5299 /* Don't bother allocating more MSI-X vectors than cpus */
5300 instance->msix_vectors = min(instance->msix_vectors,
5301 (unsigned int)num_online_cpus());
5302 if (smp_affinity_enable)
5303 irq_flags |= PCI_IRQ_AFFINITY;
5304 i = pci_alloc_irq_vectors(instance->pdev, 1,
5305 instance->msix_vectors, irq_flags);
5306 if (i > 0)
5307 instance->msix_vectors = i;
5308 else
5309 instance->msix_vectors = 0;
5310 }
5311 /*
5312 * MSI-X host index 0 is common for all adapter.
5313 * It is used for all MPT based Adapters.
5314 */
5315 if (instance->msix_combined) {
5316 instance->reply_post_host_index_addr[0] =
5317 (u32 *)((u8 *)instance->reg_set +
5318 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5319 } else {
5320 instance->reply_post_host_index_addr[0] =
5321 (u32 *)((u8 *)instance->reg_set +
5322 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5323 }
5324
5325 if (!instance->msix_vectors) {
5326 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5327 if (i < 0)
5328 goto fail_init_adapter;
5329 }
5330
5331 megasas_setup_reply_map(instance);
5332
5333 dev_info(&instance->pdev->dev,
5334 "firmware supports msix\t: (%d)", fw_msix_count);
5335 dev_info(&instance->pdev->dev,
5336 "current msix/online cpus\t: (%d/%d)\n",
5337 instance->msix_vectors, (unsigned int)num_online_cpus());
5338 dev_info(&instance->pdev->dev,
5339 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5340
5341 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5342 (unsigned long)instance);
5343
5344 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5345 GFP_KERNEL);
5346 if (instance->ctrl_info == NULL)
5347 goto fail_init_adapter;
5348
5349 /*
5350 * Below are default value for legacy Firmware.
5351 * non-fusion based controllers
5352 */
5353 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5354 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5355 /* Get operational params, sge flags, send init cmd to controller */
5356 if (instance->instancet->init_adapter(instance))
5357 goto fail_init_adapter;
5358
5359 if (instance->adapter_type == VENTURA_SERIES) {
5360 scratch_pad_4 =
5361 readl(&instance->reg_set->outbound_scratch_pad_4);
5362 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5363 MR_DEFAULT_NVME_PAGE_SHIFT)
5364 instance->nvme_page_size =
5365 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5366
5367 dev_info(&instance->pdev->dev,
5368 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5369 }
5370
5371 if (instance->msix_vectors ?
5372 megasas_setup_irqs_msix(instance, 1) :
5373 megasas_setup_irqs_ioapic(instance))
5374 goto fail_init_adapter;
5375
5376 instance->instancet->enable_intr(instance);
5377
5378 dev_info(&instance->pdev->dev, "INIT adapter done\n");
5379
5380 megasas_setup_jbod_map(instance);
5381
5382 /** for passthrough
5383 * the following function will get the PD LIST.
5384 */
5385 memset(instance->pd_list, 0,
5386 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5387 if (megasas_get_pd_list(instance) < 0) {
5388 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5389 goto fail_get_ld_pd_list;
5390 }
5391
5392 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5393
5394 /* stream detection initialization */
5395 if (instance->adapter_type == VENTURA_SERIES) {
5396 fusion->stream_detect_by_ld =
5397 kzalloc(sizeof(struct LD_STREAM_DETECT *)
5398 * MAX_LOGICAL_DRIVES_EXT,
5399 GFP_KERNEL);
5400 if (!fusion->stream_detect_by_ld) {
5401 dev_err(&instance->pdev->dev,
5402 "unable to allocate stream detection for pool of LDs\n");
5403 goto fail_get_ld_pd_list;
5404 }
5405 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5406 fusion->stream_detect_by_ld[i] =
5407 kmalloc(sizeof(struct LD_STREAM_DETECT),
5408 GFP_KERNEL);
5409 if (!fusion->stream_detect_by_ld[i]) {
5410 dev_err(&instance->pdev->dev,
5411 "unable to allocate stream detect by LD\n ");
5412 for (j = 0; j < i; ++j)
5413 kfree(fusion->stream_detect_by_ld[j]);
5414 kfree(fusion->stream_detect_by_ld);
5415 fusion->stream_detect_by_ld = NULL;
5416 goto fail_get_ld_pd_list;
5417 }
5418 fusion->stream_detect_by_ld[i]->mru_bit_map
5419 = MR_STREAM_BITMAP;
5420 }
5421 }
5422
5423 if (megasas_ld_list_query(instance,
5424 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5425 goto fail_get_ld_pd_list;
5426
5427 /*
5428 * Compute the max allowed sectors per IO: The controller info has two
5429 * limits on max sectors. Driver should use the minimum of these two.
5430 *
5431 * 1 << stripe_sz_ops.min = max sectors per strip
5432 *
5433 * Note that older firmwares ( < FW ver 30) didn't report information
5434 * to calculate max_sectors_1. So the number ended up as zero always.
5435 */
5436 tmp_sectors = 0;
5437 ctrl_info = instance->ctrl_info;
5438
5439 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5440 le16_to_cpu(ctrl_info->max_strips_per_io);
5441 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5442
5443 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5444
5445 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5446 instance->passive = ctrl_info->cluster.passive;
5447 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5448 instance->UnevenSpanSupport =
5449 ctrl_info->adapterOperations2.supportUnevenSpans;
5450 if (instance->UnevenSpanSupport) {
5451 struct fusion_context *fusion = instance->ctrl_context;
5452 if (MR_ValidateMapInfo(instance))
5453 fusion->fast_path_io = 1;
5454 else
5455 fusion->fast_path_io = 0;
5456
5457 }
5458 if (ctrl_info->host_interface.SRIOV) {
5459 instance->requestorId = ctrl_info->iov.requestorId;
5460 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5461 if (!ctrl_info->adapterOperations2.activePassive)
5462 instance->PlasmaFW111 = 1;
5463
5464 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5465 instance->PlasmaFW111 ? "1.11" : "new");
5466
5467 if (instance->PlasmaFW111) {
5468 iovPtr = (struct IOV_111 *)
5469 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5470 instance->requestorId = iovPtr->requestorId;
5471 }
5472 }
5473 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5474 instance->requestorId);
5475 }
5476
5477 instance->crash_dump_fw_support =
5478 ctrl_info->adapterOperations3.supportCrashDump;
5479 instance->crash_dump_drv_support =
5480 (instance->crash_dump_fw_support &&
5481 instance->crash_dump_buf);
5482 if (instance->crash_dump_drv_support)
5483 megasas_set_crash_dump_params(instance,
5484 MR_CRASH_BUF_TURN_OFF);
5485
5486 else {
5487 if (instance->crash_dump_buf)
5488 pci_free_consistent(instance->pdev,
5489 CRASH_DMA_BUF_SIZE,
5490 instance->crash_dump_buf,
5491 instance->crash_dump_h);
5492 instance->crash_dump_buf = NULL;
5493 }
5494
5495
5496 dev_info(&instance->pdev->dev,
5497 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5498 le16_to_cpu(ctrl_info->pci.vendor_id),
5499 le16_to_cpu(ctrl_info->pci.device_id),
5500 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5501 le16_to_cpu(ctrl_info->pci.sub_device_id));
5502 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5503 instance->UnevenSpanSupport ? "yes" : "no");
5504 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5505 instance->crash_dump_drv_support ? "yes" : "no");
5506 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5507 instance->use_seqnum_jbod_fp ? "yes" : "no");
5508
5509
5510 instance->max_sectors_per_req = instance->max_num_sge *
5511 SGE_BUFFER_SIZE / 512;
5512 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5513 instance->max_sectors_per_req = tmp_sectors;
5514
5515 /* Check for valid throttlequeuedepth module parameter */
5516 if (throttlequeuedepth &&
5517 throttlequeuedepth <= instance->max_scsi_cmds)
5518 instance->throttlequeuedepth = throttlequeuedepth;
5519 else
5520 instance->throttlequeuedepth =
5521 MEGASAS_THROTTLE_QUEUE_DEPTH;
5522
5523 if ((resetwaittime < 1) ||
5524 (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5525 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5526
5527 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5528 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5529
5530 /* Launch SR-IOV heartbeat timer */
5531 if (instance->requestorId) {
5532 if (!megasas_sriov_start_heartbeat(instance, 1))
5533 megasas_start_timer(instance,
5534 &instance->sriov_heartbeat_timer,
5535 megasas_sriov_heartbeat_handler,
5536 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5537 else
5538 instance->skip_heartbeat_timer_del = 1;
5539 }
5540
5541 return 0;
5542
5543 fail_get_ld_pd_list:
5544 instance->instancet->disable_intr(instance);
5545 megasas_destroy_irqs(instance);
5546 fail_init_adapter:
5547 if (instance->msix_vectors)
5548 pci_free_irq_vectors(instance->pdev);
5549 instance->msix_vectors = 0;
5550 fail_ready_state:
5551 kfree(instance->ctrl_info);
5552 instance->ctrl_info = NULL;
5553 iounmap(instance->reg_set);
5554
5555 fail_ioremap:
5556 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5557
5558 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5559 __func__, __LINE__);
5560 return -EINVAL;
5561 }
5562
5563 /**
5564 * megasas_release_mfi - Reverses the FW initialization
5565 * @instance: Adapter soft state
5566 */
megasas_release_mfi(struct megasas_instance * instance)5567 static void megasas_release_mfi(struct megasas_instance *instance)
5568 {
5569 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5570
5571 if (instance->reply_queue)
5572 pci_free_consistent(instance->pdev, reply_q_sz,
5573 instance->reply_queue, instance->reply_queue_h);
5574
5575 megasas_free_cmds(instance);
5576
5577 iounmap(instance->reg_set);
5578
5579 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5580 }
5581
5582 /**
5583 * megasas_get_seq_num - Gets latest event sequence numbers
5584 * @instance: Adapter soft state
5585 * @eli: FW event log sequence numbers information
5586 *
5587 * FW maintains a log of all events in a non-volatile area. Upper layers would
5588 * usually find out the latest sequence number of the events, the seq number at
5589 * the boot etc. They would "read" all the events below the latest seq number
5590 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5591 * number), they would subsribe to AEN (asynchronous event notification) and
5592 * wait for the events to happen.
5593 */
5594 static int
megasas_get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)5595 megasas_get_seq_num(struct megasas_instance *instance,
5596 struct megasas_evt_log_info *eli)
5597 {
5598 struct megasas_cmd *cmd;
5599 struct megasas_dcmd_frame *dcmd;
5600 struct megasas_evt_log_info *el_info;
5601 dma_addr_t el_info_h = 0;
5602
5603 cmd = megasas_get_cmd(instance);
5604
5605 if (!cmd) {
5606 return -ENOMEM;
5607 }
5608
5609 dcmd = &cmd->frame->dcmd;
5610 el_info = pci_alloc_consistent(instance->pdev,
5611 sizeof(struct megasas_evt_log_info),
5612 &el_info_h);
5613
5614 if (!el_info) {
5615 megasas_return_cmd(instance, cmd);
5616 return -ENOMEM;
5617 }
5618
5619 memset(el_info, 0, sizeof(*el_info));
5620 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5621
5622 dcmd->cmd = MFI_CMD_DCMD;
5623 dcmd->cmd_status = 0x0;
5624 dcmd->sge_count = 1;
5625 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5626 dcmd->timeout = 0;
5627 dcmd->pad_0 = 0;
5628 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5629 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5630 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5631 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5632
5633 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5634 DCMD_SUCCESS) {
5635 /*
5636 * Copy the data back into callers buffer
5637 */
5638 eli->newest_seq_num = el_info->newest_seq_num;
5639 eli->oldest_seq_num = el_info->oldest_seq_num;
5640 eli->clear_seq_num = el_info->clear_seq_num;
5641 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5642 eli->boot_seq_num = el_info->boot_seq_num;
5643 } else
5644 dev_err(&instance->pdev->dev, "DCMD failed "
5645 "from %s\n", __func__);
5646
5647 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5648 el_info, el_info_h);
5649
5650 megasas_return_cmd(instance, cmd);
5651
5652 return 0;
5653 }
5654
5655 /**
5656 * megasas_register_aen - Registers for asynchronous event notification
5657 * @instance: Adapter soft state
5658 * @seq_num: The starting sequence number
5659 * @class_locale: Class of the event
5660 *
5661 * This function subscribes for AEN for events beyond the @seq_num. It requests
5662 * to be notified if and only if the event is of type @class_locale
5663 */
5664 static int
megasas_register_aen(struct megasas_instance * instance,u32 seq_num,u32 class_locale_word)5665 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5666 u32 class_locale_word)
5667 {
5668 int ret_val;
5669 struct megasas_cmd *cmd;
5670 struct megasas_dcmd_frame *dcmd;
5671 union megasas_evt_class_locale curr_aen;
5672 union megasas_evt_class_locale prev_aen;
5673
5674 /*
5675 * If there an AEN pending already (aen_cmd), check if the
5676 * class_locale of that pending AEN is inclusive of the new
5677 * AEN request we currently have. If it is, then we don't have
5678 * to do anything. In other words, whichever events the current
5679 * AEN request is subscribing to, have already been subscribed
5680 * to.
5681 *
5682 * If the old_cmd is _not_ inclusive, then we have to abort
5683 * that command, form a class_locale that is superset of both
5684 * old and current and re-issue to the FW
5685 */
5686
5687 curr_aen.word = class_locale_word;
5688
5689 if (instance->aen_cmd) {
5690
5691 prev_aen.word =
5692 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5693
5694 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
5695 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
5696 dev_info(&instance->pdev->dev,
5697 "%s %d out of range class %d send by application\n",
5698 __func__, __LINE__, curr_aen.members.class);
5699 return 0;
5700 }
5701
5702 /*
5703 * A class whose enum value is smaller is inclusive of all
5704 * higher values. If a PROGRESS (= -1) was previously
5705 * registered, then a new registration requests for higher
5706 * classes need not be sent to FW. They are automatically
5707 * included.
5708 *
5709 * Locale numbers don't have such hierarchy. They are bitmap
5710 * values
5711 */
5712 if ((prev_aen.members.class <= curr_aen.members.class) &&
5713 !((prev_aen.members.locale & curr_aen.members.locale) ^
5714 curr_aen.members.locale)) {
5715 /*
5716 * Previously issued event registration includes
5717 * current request. Nothing to do.
5718 */
5719 return 0;
5720 } else {
5721 curr_aen.members.locale |= prev_aen.members.locale;
5722
5723 if (prev_aen.members.class < curr_aen.members.class)
5724 curr_aen.members.class = prev_aen.members.class;
5725
5726 instance->aen_cmd->abort_aen = 1;
5727 ret_val = megasas_issue_blocked_abort_cmd(instance,
5728 instance->
5729 aen_cmd, 30);
5730
5731 if (ret_val) {
5732 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5733 "previous AEN command\n");
5734 return ret_val;
5735 }
5736 }
5737 }
5738
5739 cmd = megasas_get_cmd(instance);
5740
5741 if (!cmd)
5742 return -ENOMEM;
5743
5744 dcmd = &cmd->frame->dcmd;
5745
5746 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5747
5748 /*
5749 * Prepare DCMD for aen registration
5750 */
5751 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5752
5753 dcmd->cmd = MFI_CMD_DCMD;
5754 dcmd->cmd_status = 0x0;
5755 dcmd->sge_count = 1;
5756 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5757 dcmd->timeout = 0;
5758 dcmd->pad_0 = 0;
5759 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5760 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5761 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5762 instance->last_seq_num = seq_num;
5763 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5764 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5765 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5766
5767 if (instance->aen_cmd != NULL) {
5768 megasas_return_cmd(instance, cmd);
5769 return 0;
5770 }
5771
5772 /*
5773 * Store reference to the cmd used to register for AEN. When an
5774 * application wants us to register for AEN, we have to abort this
5775 * cmd and re-register with a new EVENT LOCALE supplied by that app
5776 */
5777 instance->aen_cmd = cmd;
5778
5779 /*
5780 * Issue the aen registration frame
5781 */
5782 instance->instancet->issue_dcmd(instance, cmd);
5783
5784 return 0;
5785 }
5786
5787 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5788 *
5789 * This DCMD will fetch few properties of LD/system PD defined
5790 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5791 *
5792 * DCMD send by drivers whenever new target is added to the OS.
5793 *
5794 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
5795 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
5796 * 0 = system PD, 1 = LD.
5797 * dcmd.mbox.s[1] - TargetID for LD/system PD.
5798 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
5799 *
5800 * @instance: Adapter soft state
5801 * @sdev: OS provided scsi device
5802 *
5803 * Returns 0 on success non-zero on failure.
5804 */
5805 static int
megasas_get_target_prop(struct megasas_instance * instance,struct scsi_device * sdev)5806 megasas_get_target_prop(struct megasas_instance *instance,
5807 struct scsi_device *sdev)
5808 {
5809 int ret;
5810 struct megasas_cmd *cmd;
5811 struct megasas_dcmd_frame *dcmd;
5812 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
5813 sdev->id;
5814
5815 cmd = megasas_get_cmd(instance);
5816
5817 if (!cmd) {
5818 dev_err(&instance->pdev->dev,
5819 "Failed to get cmd %s\n", __func__);
5820 return -ENOMEM;
5821 }
5822
5823 dcmd = &cmd->frame->dcmd;
5824
5825 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5826 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5827 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5828
5829 dcmd->mbox.s[1] = cpu_to_le16(targetId);
5830 dcmd->cmd = MFI_CMD_DCMD;
5831 dcmd->cmd_status = 0xFF;
5832 dcmd->sge_count = 1;
5833 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5834 dcmd->timeout = 0;
5835 dcmd->pad_0 = 0;
5836 dcmd->data_xfer_len =
5837 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5838 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5839 dcmd->sgl.sge32[0].phys_addr =
5840 cpu_to_le32(instance->tgt_prop_h);
5841 dcmd->sgl.sge32[0].length =
5842 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5843
5844 if ((instance->adapter_type != MFI_SERIES) &&
5845 !instance->mask_interrupts)
5846 ret = megasas_issue_blocked_cmd(instance,
5847 cmd, MFI_IO_TIMEOUT_SECS);
5848 else
5849 ret = megasas_issue_polled(instance, cmd);
5850
5851 switch (ret) {
5852 case DCMD_TIMEOUT:
5853 switch (dcmd_timeout_ocr_possible(instance)) {
5854 case INITIATE_OCR:
5855 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5856 megasas_reset_fusion(instance->host,
5857 MFI_IO_TIMEOUT_OCR);
5858 break;
5859 case KILL_ADAPTER:
5860 megaraid_sas_kill_hba(instance);
5861 break;
5862 case IGNORE_TIMEOUT:
5863 dev_info(&instance->pdev->dev,
5864 "Ignore DCMD timeout: %s %d\n",
5865 __func__, __LINE__);
5866 break;
5867 }
5868 break;
5869
5870 default:
5871 megasas_return_cmd(instance, cmd);
5872 }
5873 if (ret != DCMD_SUCCESS)
5874 dev_err(&instance->pdev->dev,
5875 "return from %s %d return value %d\n",
5876 __func__, __LINE__, ret);
5877
5878 return ret;
5879 }
5880
5881 /**
5882 * megasas_start_aen - Subscribes to AEN during driver load time
5883 * @instance: Adapter soft state
5884 */
megasas_start_aen(struct megasas_instance * instance)5885 static int megasas_start_aen(struct megasas_instance *instance)
5886 {
5887 struct megasas_evt_log_info eli;
5888 union megasas_evt_class_locale class_locale;
5889
5890 /*
5891 * Get the latest sequence number from FW
5892 */
5893 memset(&eli, 0, sizeof(eli));
5894
5895 if (megasas_get_seq_num(instance, &eli))
5896 return -1;
5897
5898 /*
5899 * Register AEN with FW for latest sequence number plus 1
5900 */
5901 class_locale.members.reserved = 0;
5902 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5903 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5904
5905 return megasas_register_aen(instance,
5906 le32_to_cpu(eli.newest_seq_num) + 1,
5907 class_locale.word);
5908 }
5909
5910 /**
5911 * megasas_io_attach - Attaches this driver to SCSI mid-layer
5912 * @instance: Adapter soft state
5913 */
megasas_io_attach(struct megasas_instance * instance)5914 static int megasas_io_attach(struct megasas_instance *instance)
5915 {
5916 struct Scsi_Host *host = instance->host;
5917
5918 /*
5919 * Export parameters required by SCSI mid-layer
5920 */
5921 host->unique_id = instance->unique_id;
5922 host->can_queue = instance->max_scsi_cmds;
5923 host->this_id = instance->init_id;
5924 host->sg_tablesize = instance->max_num_sge;
5925
5926 if (instance->fw_support_ieee)
5927 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5928
5929 /*
5930 * Check if the module parameter value for max_sectors can be used
5931 */
5932 if (max_sectors && max_sectors < instance->max_sectors_per_req)
5933 instance->max_sectors_per_req = max_sectors;
5934 else {
5935 if (max_sectors) {
5936 if (((instance->pdev->device ==
5937 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5938 (instance->pdev->device ==
5939 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5940 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5941 instance->max_sectors_per_req = max_sectors;
5942 } else {
5943 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5944 "and <= %d (or < 1MB for GEN2 controller)\n",
5945 instance->max_sectors_per_req);
5946 }
5947 }
5948 }
5949
5950 host->max_sectors = instance->max_sectors_per_req;
5951 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5952 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5953 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5954 host->max_lun = MEGASAS_MAX_LUN;
5955 host->max_cmd_len = 16;
5956
5957 /*
5958 * Notify the mid-layer about the new controller
5959 */
5960 if (scsi_add_host(host, &instance->pdev->dev)) {
5961 dev_err(&instance->pdev->dev,
5962 "Failed to add host from %s %d\n",
5963 __func__, __LINE__);
5964 return -ENODEV;
5965 }
5966
5967 return 0;
5968 }
5969
5970 static int
megasas_set_dma_mask(struct pci_dev * pdev)5971 megasas_set_dma_mask(struct pci_dev *pdev)
5972 {
5973 /*
5974 * All our controllers are capable of performing 64-bit DMA
5975 */
5976 if (IS_DMA64) {
5977 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5978
5979 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5980 goto fail_set_dma_mask;
5981 }
5982 } else {
5983 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5984 goto fail_set_dma_mask;
5985 }
5986 /*
5987 * Ensure that all data structures are allocated in 32-bit
5988 * memory.
5989 */
5990 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5991 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
5992 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5993 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5994 dev_info(&pdev->dev, "set 32bit DMA mask"
5995 "and 32 bit consistent mask\n");
5996 else
5997 goto fail_set_dma_mask;
5998 }
5999
6000 return 0;
6001
6002 fail_set_dma_mask:
6003 return 1;
6004 }
6005
6006 /*
6007 * megasas_set_adapter_type - Set adapter type.
6008 * Supported controllers can be divided in
6009 * 4 categories- enum MR_ADAPTER_TYPE {
6010 * MFI_SERIES = 1,
6011 * THUNDERBOLT_SERIES = 2,
6012 * INVADER_SERIES = 3,
6013 * VENTURA_SERIES = 4,
6014 * };
6015 * @instance: Adapter soft state
6016 * return: void
6017 */
megasas_set_adapter_type(struct megasas_instance * instance)6018 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6019 {
6020 switch (instance->pdev->device) {
6021 case PCI_DEVICE_ID_LSI_VENTURA:
6022 case PCI_DEVICE_ID_LSI_HARPOON:
6023 case PCI_DEVICE_ID_LSI_TOMCAT:
6024 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6025 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6026 instance->adapter_type = VENTURA_SERIES;
6027 break;
6028 case PCI_DEVICE_ID_LSI_FUSION:
6029 case PCI_DEVICE_ID_LSI_PLASMA:
6030 instance->adapter_type = THUNDERBOLT_SERIES;
6031 break;
6032 case PCI_DEVICE_ID_LSI_INVADER:
6033 case PCI_DEVICE_ID_LSI_INTRUDER:
6034 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6035 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6036 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6037 case PCI_DEVICE_ID_LSI_FURY:
6038 instance->adapter_type = INVADER_SERIES;
6039 break;
6040 default: /* For all other supported controllers */
6041 instance->adapter_type = MFI_SERIES;
6042 break;
6043 }
6044 }
6045
megasas_alloc_mfi_ctrl_mem(struct megasas_instance * instance)6046 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6047 {
6048 instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6049 &instance->producer_h);
6050 instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6051 &instance->consumer_h);
6052
6053 if (!instance->producer || !instance->consumer) {
6054 dev_err(&instance->pdev->dev,
6055 "Failed to allocate memory for producer, consumer\n");
6056 return -1;
6057 }
6058
6059 *instance->producer = 0;
6060 *instance->consumer = 0;
6061 return 0;
6062 }
6063
6064 /**
6065 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
6066 * structures which are not common across MFI
6067 * adapters and fusion adapters.
6068 * For MFI based adapters, allocate producer and
6069 * consumer buffers. For fusion adapters, allocate
6070 * memory for fusion context.
6071 * @instance: Adapter soft state
6072 * return: 0 for SUCCESS
6073 */
megasas_alloc_ctrl_mem(struct megasas_instance * instance)6074 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6075 {
6076 instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
6077 GFP_KERNEL);
6078 if (!instance->reply_map)
6079 return -ENOMEM;
6080
6081 switch (instance->adapter_type) {
6082 case MFI_SERIES:
6083 if (megasas_alloc_mfi_ctrl_mem(instance))
6084 goto fail;
6085 break;
6086 case VENTURA_SERIES:
6087 case THUNDERBOLT_SERIES:
6088 case INVADER_SERIES:
6089 if (megasas_alloc_fusion_context(instance))
6090 goto fail;
6091 break;
6092 }
6093
6094 return 0;
6095 fail:
6096 kfree(instance->reply_map);
6097 instance->reply_map = NULL;
6098 return -ENOMEM;
6099 }
6100
6101 /*
6102 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
6103 * producer, consumer buffers for MFI adapters
6104 *
6105 * @instance - Adapter soft instance
6106 *
6107 */
megasas_free_ctrl_mem(struct megasas_instance * instance)6108 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6109 {
6110 kfree(instance->reply_map);
6111 if (instance->adapter_type == MFI_SERIES) {
6112 if (instance->producer)
6113 pci_free_consistent(instance->pdev, sizeof(u32),
6114 instance->producer,
6115 instance->producer_h);
6116 if (instance->consumer)
6117 pci_free_consistent(instance->pdev, sizeof(u32),
6118 instance->consumer,
6119 instance->consumer_h);
6120 } else {
6121 megasas_free_fusion_context(instance);
6122 }
6123 }
6124
6125 /**
6126 * megasas_probe_one - PCI hotplug entry point
6127 * @pdev: PCI device structure
6128 * @id: PCI ids of supported hotplugged adapter
6129 */
megasas_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)6130 static int megasas_probe_one(struct pci_dev *pdev,
6131 const struct pci_device_id *id)
6132 {
6133 int rval, pos;
6134 struct Scsi_Host *host;
6135 struct megasas_instance *instance;
6136 u16 control = 0;
6137
6138 /* Reset MSI-X in the kdump kernel */
6139 if (reset_devices) {
6140 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6141 if (pos) {
6142 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6143 &control);
6144 if (control & PCI_MSIX_FLAGS_ENABLE) {
6145 dev_info(&pdev->dev, "resetting MSI-X\n");
6146 pci_write_config_word(pdev,
6147 pos + PCI_MSIX_FLAGS,
6148 control &
6149 ~PCI_MSIX_FLAGS_ENABLE);
6150 }
6151 }
6152 }
6153
6154 /*
6155 * PCI prepping: enable device set bus mastering and dma mask
6156 */
6157 rval = pci_enable_device_mem(pdev);
6158
6159 if (rval) {
6160 return rval;
6161 }
6162
6163 pci_set_master(pdev);
6164
6165 if (megasas_set_dma_mask(pdev))
6166 goto fail_set_dma_mask;
6167
6168 host = scsi_host_alloc(&megasas_template,
6169 sizeof(struct megasas_instance));
6170
6171 if (!host) {
6172 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6173 goto fail_alloc_instance;
6174 }
6175
6176 instance = (struct megasas_instance *)host->hostdata;
6177 memset(instance, 0, sizeof(*instance));
6178 atomic_set(&instance->fw_reset_no_pci_access, 0);
6179 instance->pdev = pdev;
6180
6181 megasas_set_adapter_type(instance);
6182
6183 if (megasas_alloc_ctrl_mem(instance))
6184 goto fail_alloc_dma_buf;
6185
6186 /* Crash dump feature related initialisation*/
6187 instance->drv_buf_index = 0;
6188 instance->drv_buf_alloc = 0;
6189 instance->crash_dump_fw_support = 0;
6190 instance->crash_dump_app_support = 0;
6191 instance->fw_crash_state = UNAVAILABLE;
6192 spin_lock_init(&instance->crashdump_lock);
6193 instance->crash_dump_buf = NULL;
6194
6195 megasas_poll_wait_aen = 0;
6196 instance->flag_ieee = 0;
6197 instance->ev = NULL;
6198 instance->issuepend_done = 1;
6199 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6200 instance->is_imr = 0;
6201
6202 instance->evt_detail = pci_alloc_consistent(pdev,
6203 sizeof(struct
6204 megasas_evt_detail),
6205 &instance->evt_detail_h);
6206
6207 if (!instance->evt_detail) {
6208 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
6209 "event detail structure\n");
6210 goto fail_alloc_dma_buf;
6211 }
6212
6213 if (!reset_devices) {
6214 instance->system_info_buf = pci_zalloc_consistent(pdev,
6215 sizeof(struct MR_DRV_SYSTEM_INFO),
6216 &instance->system_info_h);
6217 if (!instance->system_info_buf)
6218 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
6219
6220 instance->pd_info = pci_alloc_consistent(pdev,
6221 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
6222
6223 if (!instance->pd_info)
6224 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
6225
6226 instance->tgt_prop = pci_alloc_consistent(pdev,
6227 sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
6228
6229 if (!instance->tgt_prop)
6230 dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
6231
6232 instance->crash_dump_buf = pci_alloc_consistent(pdev,
6233 CRASH_DMA_BUF_SIZE,
6234 &instance->crash_dump_h);
6235 if (!instance->crash_dump_buf)
6236 dev_err(&pdev->dev, "Can't allocate Firmware "
6237 "crash dump DMA buffer\n");
6238 }
6239
6240 /*
6241 * Initialize locks and queues
6242 */
6243 INIT_LIST_HEAD(&instance->cmd_pool);
6244 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6245
6246 atomic_set(&instance->fw_outstanding,0);
6247
6248 init_waitqueue_head(&instance->int_cmd_wait_q);
6249 init_waitqueue_head(&instance->abort_cmd_wait_q);
6250
6251 spin_lock_init(&instance->mfi_pool_lock);
6252 spin_lock_init(&instance->hba_lock);
6253 spin_lock_init(&instance->stream_lock);
6254 spin_lock_init(&instance->completion_lock);
6255
6256 mutex_init(&instance->reset_mutex);
6257 mutex_init(&instance->hba_mutex);
6258
6259 /*
6260 * Initialize PCI related and misc parameters
6261 */
6262 instance->host = host;
6263 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6264 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6265 instance->ctrl_info = NULL;
6266
6267
6268 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6269 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6270 instance->flag_ieee = 1;
6271
6272 megasas_dbg_lvl = 0;
6273 instance->flag = 0;
6274 instance->unload = 1;
6275 instance->last_time = 0;
6276 instance->disableOnlineCtrlReset = 1;
6277 instance->UnevenSpanSupport = 0;
6278
6279 if (instance->adapter_type != MFI_SERIES) {
6280 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6281 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6282 } else
6283 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6284
6285 /*
6286 * Initialize MFI Firmware
6287 */
6288 if (megasas_init_fw(instance))
6289 goto fail_init_mfi;
6290
6291 if (instance->requestorId) {
6292 if (instance->PlasmaFW111) {
6293 instance->vf_affiliation_111 =
6294 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6295 &instance->vf_affiliation_111_h);
6296 if (!instance->vf_affiliation_111)
6297 dev_warn(&pdev->dev, "Can't allocate "
6298 "memory for VF affiliation buffer\n");
6299 } else {
6300 instance->vf_affiliation =
6301 pci_alloc_consistent(pdev,
6302 (MAX_LOGICAL_DRIVES + 1) *
6303 sizeof(struct MR_LD_VF_AFFILIATION),
6304 &instance->vf_affiliation_h);
6305 if (!instance->vf_affiliation)
6306 dev_warn(&pdev->dev, "Can't allocate "
6307 "memory for VF affiliation buffer\n");
6308 }
6309 }
6310
6311 /*
6312 * Store instance in PCI softstate
6313 */
6314 pci_set_drvdata(pdev, instance);
6315
6316 /*
6317 * Add this controller to megasas_mgmt_info structure so that it
6318 * can be exported to management applications
6319 */
6320 megasas_mgmt_info.count++;
6321 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6322 megasas_mgmt_info.max_index++;
6323
6324 /*
6325 * Register with SCSI mid-layer
6326 */
6327 if (megasas_io_attach(instance))
6328 goto fail_io_attach;
6329
6330 instance->unload = 0;
6331 /*
6332 * Trigger SCSI to scan our drives
6333 */
6334 scsi_scan_host(host);
6335
6336 /*
6337 * Initiate AEN (Asynchronous Event Notification)
6338 */
6339 if (megasas_start_aen(instance)) {
6340 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6341 goto fail_start_aen;
6342 }
6343
6344 /* Get current SR-IOV LD/VF affiliation */
6345 if (instance->requestorId)
6346 megasas_get_ld_vf_affiliation(instance, 1);
6347
6348 return 0;
6349
6350 fail_start_aen:
6351 fail_io_attach:
6352 megasas_mgmt_info.count--;
6353 megasas_mgmt_info.max_index--;
6354 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6355
6356 instance->instancet->disable_intr(instance);
6357 megasas_destroy_irqs(instance);
6358
6359 if (instance->adapter_type != MFI_SERIES)
6360 megasas_release_fusion(instance);
6361 else
6362 megasas_release_mfi(instance);
6363 if (instance->msix_vectors)
6364 pci_free_irq_vectors(instance->pdev);
6365 fail_init_mfi:
6366 fail_alloc_dma_buf:
6367 if (instance->evt_detail)
6368 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6369 instance->evt_detail,
6370 instance->evt_detail_h);
6371
6372 if (instance->pd_info)
6373 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6374 instance->pd_info,
6375 instance->pd_info_h);
6376 if (instance->tgt_prop)
6377 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6378 instance->tgt_prop,
6379 instance->tgt_prop_h);
6380 megasas_free_ctrl_mem(instance);
6381 scsi_host_put(host);
6382 fail_alloc_instance:
6383 fail_set_dma_mask:
6384 pci_disable_device(pdev);
6385
6386 return -ENODEV;
6387 }
6388
6389 /**
6390 * megasas_flush_cache - Requests FW to flush all its caches
6391 * @instance: Adapter soft state
6392 */
megasas_flush_cache(struct megasas_instance * instance)6393 static void megasas_flush_cache(struct megasas_instance *instance)
6394 {
6395 struct megasas_cmd *cmd;
6396 struct megasas_dcmd_frame *dcmd;
6397
6398 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6399 return;
6400
6401 cmd = megasas_get_cmd(instance);
6402
6403 if (!cmd)
6404 return;
6405
6406 dcmd = &cmd->frame->dcmd;
6407
6408 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6409
6410 dcmd->cmd = MFI_CMD_DCMD;
6411 dcmd->cmd_status = 0x0;
6412 dcmd->sge_count = 0;
6413 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6414 dcmd->timeout = 0;
6415 dcmd->pad_0 = 0;
6416 dcmd->data_xfer_len = 0;
6417 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6418 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6419
6420 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6421 != DCMD_SUCCESS) {
6422 dev_err(&instance->pdev->dev,
6423 "return from %s %d\n", __func__, __LINE__);
6424 return;
6425 }
6426
6427 megasas_return_cmd(instance, cmd);
6428 }
6429
6430 /**
6431 * megasas_shutdown_controller - Instructs FW to shutdown the controller
6432 * @instance: Adapter soft state
6433 * @opcode: Shutdown/Hibernate
6434 */
megasas_shutdown_controller(struct megasas_instance * instance,u32 opcode)6435 static void megasas_shutdown_controller(struct megasas_instance *instance,
6436 u32 opcode)
6437 {
6438 struct megasas_cmd *cmd;
6439 struct megasas_dcmd_frame *dcmd;
6440
6441 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6442 return;
6443
6444 cmd = megasas_get_cmd(instance);
6445
6446 if (!cmd)
6447 return;
6448
6449 if (instance->aen_cmd)
6450 megasas_issue_blocked_abort_cmd(instance,
6451 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6452 if (instance->map_update_cmd)
6453 megasas_issue_blocked_abort_cmd(instance,
6454 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6455 if (instance->jbod_seq_cmd)
6456 megasas_issue_blocked_abort_cmd(instance,
6457 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6458
6459 dcmd = &cmd->frame->dcmd;
6460
6461 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6462
6463 dcmd->cmd = MFI_CMD_DCMD;
6464 dcmd->cmd_status = 0x0;
6465 dcmd->sge_count = 0;
6466 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6467 dcmd->timeout = 0;
6468 dcmd->pad_0 = 0;
6469 dcmd->data_xfer_len = 0;
6470 dcmd->opcode = cpu_to_le32(opcode);
6471
6472 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6473 != DCMD_SUCCESS) {
6474 dev_err(&instance->pdev->dev,
6475 "return from %s %d\n", __func__, __LINE__);
6476 return;
6477 }
6478
6479 megasas_return_cmd(instance, cmd);
6480 }
6481
6482 #ifdef CONFIG_PM
6483 /**
6484 * megasas_suspend - driver suspend entry point
6485 * @pdev: PCI device structure
6486 * @state: PCI power state to suspend routine
6487 */
6488 static int
megasas_suspend(struct pci_dev * pdev,pm_message_t state)6489 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6490 {
6491 struct Scsi_Host *host;
6492 struct megasas_instance *instance;
6493
6494 instance = pci_get_drvdata(pdev);
6495 host = instance->host;
6496 instance->unload = 1;
6497
6498 /* Shutdown SR-IOV heartbeat timer */
6499 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6500 del_timer_sync(&instance->sriov_heartbeat_timer);
6501
6502 megasas_flush_cache(instance);
6503 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6504
6505 /* cancel the delayed work if this work still in queue */
6506 if (instance->ev != NULL) {
6507 struct megasas_aen_event *ev = instance->ev;
6508 cancel_delayed_work_sync(&ev->hotplug_work);
6509 instance->ev = NULL;
6510 }
6511
6512 tasklet_kill(&instance->isr_tasklet);
6513
6514 pci_set_drvdata(instance->pdev, instance);
6515 instance->instancet->disable_intr(instance);
6516
6517 megasas_destroy_irqs(instance);
6518
6519 if (instance->msix_vectors)
6520 pci_free_irq_vectors(instance->pdev);
6521
6522 pci_save_state(pdev);
6523 pci_disable_device(pdev);
6524
6525 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6526
6527 return 0;
6528 }
6529
6530 /**
6531 * megasas_resume- driver resume entry point
6532 * @pdev: PCI device structure
6533 */
6534 static int
megasas_resume(struct pci_dev * pdev)6535 megasas_resume(struct pci_dev *pdev)
6536 {
6537 int rval;
6538 struct Scsi_Host *host;
6539 struct megasas_instance *instance;
6540 int irq_flags = PCI_IRQ_LEGACY;
6541
6542 instance = pci_get_drvdata(pdev);
6543 host = instance->host;
6544 pci_set_power_state(pdev, PCI_D0);
6545 pci_enable_wake(pdev, PCI_D0, 0);
6546 pci_restore_state(pdev);
6547
6548 /*
6549 * PCI prepping: enable device set bus mastering and dma mask
6550 */
6551 rval = pci_enable_device_mem(pdev);
6552
6553 if (rval) {
6554 dev_err(&pdev->dev, "Enable device failed\n");
6555 return rval;
6556 }
6557
6558 pci_set_master(pdev);
6559
6560 if (megasas_set_dma_mask(pdev))
6561 goto fail_set_dma_mask;
6562
6563 /*
6564 * Initialize MFI Firmware
6565 */
6566
6567 atomic_set(&instance->fw_outstanding, 0);
6568
6569 /*
6570 * We expect the FW state to be READY
6571 */
6572 if (megasas_transition_to_ready(instance, 0))
6573 goto fail_ready_state;
6574
6575 /* Now re-enable MSI-X */
6576 if (instance->msix_vectors) {
6577 irq_flags = PCI_IRQ_MSIX;
6578 if (smp_affinity_enable)
6579 irq_flags |= PCI_IRQ_AFFINITY;
6580 }
6581 rval = pci_alloc_irq_vectors(instance->pdev, 1,
6582 instance->msix_vectors ?
6583 instance->msix_vectors : 1, irq_flags);
6584 if (rval < 0)
6585 goto fail_reenable_msix;
6586
6587 megasas_setup_reply_map(instance);
6588
6589 if (instance->adapter_type != MFI_SERIES) {
6590 megasas_reset_reply_desc(instance);
6591 if (megasas_ioc_init_fusion(instance)) {
6592 megasas_free_cmds(instance);
6593 megasas_free_cmds_fusion(instance);
6594 goto fail_init_mfi;
6595 }
6596 if (!megasas_get_map_info(instance))
6597 megasas_sync_map_info(instance);
6598 } else {
6599 *instance->producer = 0;
6600 *instance->consumer = 0;
6601 if (megasas_issue_init_mfi(instance))
6602 goto fail_init_mfi;
6603 }
6604
6605 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
6606 goto fail_init_mfi;
6607
6608 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6609 (unsigned long)instance);
6610
6611 if (instance->msix_vectors ?
6612 megasas_setup_irqs_msix(instance, 0) :
6613 megasas_setup_irqs_ioapic(instance))
6614 goto fail_init_mfi;
6615
6616 /* Re-launch SR-IOV heartbeat timer */
6617 if (instance->requestorId) {
6618 if (!megasas_sriov_start_heartbeat(instance, 0))
6619 megasas_start_timer(instance,
6620 &instance->sriov_heartbeat_timer,
6621 megasas_sriov_heartbeat_handler,
6622 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6623 else {
6624 instance->skip_heartbeat_timer_del = 1;
6625 goto fail_init_mfi;
6626 }
6627 }
6628
6629 instance->instancet->enable_intr(instance);
6630 megasas_setup_jbod_map(instance);
6631 instance->unload = 0;
6632
6633 /*
6634 * Initiate AEN (Asynchronous Event Notification)
6635 */
6636 if (megasas_start_aen(instance))
6637 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6638
6639 return 0;
6640
6641 fail_init_mfi:
6642 if (instance->evt_detail)
6643 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6644 instance->evt_detail,
6645 instance->evt_detail_h);
6646
6647 if (instance->pd_info)
6648 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6649 instance->pd_info,
6650 instance->pd_info_h);
6651 if (instance->tgt_prop)
6652 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6653 instance->tgt_prop,
6654 instance->tgt_prop_h);
6655
6656 megasas_free_ctrl_mem(instance);
6657 scsi_host_put(host);
6658
6659 fail_set_dma_mask:
6660 fail_ready_state:
6661 fail_reenable_msix:
6662
6663 pci_disable_device(pdev);
6664
6665 return -ENODEV;
6666 }
6667 #else
6668 #define megasas_suspend NULL
6669 #define megasas_resume NULL
6670 #endif
6671
6672 static inline int
megasas_wait_for_adapter_operational(struct megasas_instance * instance)6673 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6674 {
6675 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6676 int i;
6677
6678 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6679 return 1;
6680
6681 for (i = 0; i < wait_time; i++) {
6682 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
6683 break;
6684
6685 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6686 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6687
6688 msleep(1000);
6689 }
6690
6691 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6692 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6693 __func__);
6694 return 1;
6695 }
6696
6697 return 0;
6698 }
6699
6700 /**
6701 * megasas_detach_one - PCI hot"un"plug entry point
6702 * @pdev: PCI device structure
6703 */
megasas_detach_one(struct pci_dev * pdev)6704 static void megasas_detach_one(struct pci_dev *pdev)
6705 {
6706 int i;
6707 struct Scsi_Host *host;
6708 struct megasas_instance *instance;
6709 struct fusion_context *fusion;
6710 u32 pd_seq_map_sz;
6711
6712 instance = pci_get_drvdata(pdev);
6713 host = instance->host;
6714 fusion = instance->ctrl_context;
6715
6716 /* Shutdown SR-IOV heartbeat timer */
6717 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6718 del_timer_sync(&instance->sriov_heartbeat_timer);
6719
6720 if (instance->fw_crash_state != UNAVAILABLE)
6721 megasas_free_host_crash_buffer(instance);
6722 scsi_remove_host(instance->host);
6723 instance->unload = 1;
6724
6725 if (megasas_wait_for_adapter_operational(instance))
6726 goto skip_firing_dcmds;
6727
6728 megasas_flush_cache(instance);
6729 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6730
6731 skip_firing_dcmds:
6732 /* cancel the delayed work if this work still in queue*/
6733 if (instance->ev != NULL) {
6734 struct megasas_aen_event *ev = instance->ev;
6735 cancel_delayed_work_sync(&ev->hotplug_work);
6736 instance->ev = NULL;
6737 }
6738
6739 /* cancel all wait events */
6740 wake_up_all(&instance->int_cmd_wait_q);
6741
6742 tasklet_kill(&instance->isr_tasklet);
6743
6744 /*
6745 * Take the instance off the instance array. Note that we will not
6746 * decrement the max_index. We let this array be sparse array
6747 */
6748 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6749 if (megasas_mgmt_info.instance[i] == instance) {
6750 megasas_mgmt_info.count--;
6751 megasas_mgmt_info.instance[i] = NULL;
6752
6753 break;
6754 }
6755 }
6756
6757 instance->instancet->disable_intr(instance);
6758
6759 megasas_destroy_irqs(instance);
6760
6761 if (instance->msix_vectors)
6762 pci_free_irq_vectors(instance->pdev);
6763
6764 if (instance->adapter_type == VENTURA_SERIES) {
6765 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6766 kfree(fusion->stream_detect_by_ld[i]);
6767 kfree(fusion->stream_detect_by_ld);
6768 fusion->stream_detect_by_ld = NULL;
6769 }
6770
6771
6772 if (instance->adapter_type != MFI_SERIES) {
6773 megasas_release_fusion(instance);
6774 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6775 (sizeof(struct MR_PD_CFG_SEQ) *
6776 (MAX_PHYSICAL_DEVICES - 1));
6777 for (i = 0; i < 2 ; i++) {
6778 if (fusion->ld_map[i])
6779 dma_free_coherent(&instance->pdev->dev,
6780 fusion->max_map_sz,
6781 fusion->ld_map[i],
6782 fusion->ld_map_phys[i]);
6783 if (fusion->ld_drv_map[i]) {
6784 if (is_vmalloc_addr(fusion->ld_drv_map[i]))
6785 vfree(fusion->ld_drv_map[i]);
6786 else
6787 free_pages((ulong)fusion->ld_drv_map[i],
6788 fusion->drv_map_pages);
6789 }
6790
6791 if (fusion->pd_seq_sync[i])
6792 dma_free_coherent(&instance->pdev->dev,
6793 pd_seq_map_sz,
6794 fusion->pd_seq_sync[i],
6795 fusion->pd_seq_phys[i]);
6796 }
6797 } else {
6798 megasas_release_mfi(instance);
6799 }
6800
6801 kfree(instance->ctrl_info);
6802
6803 if (instance->evt_detail)
6804 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6805 instance->evt_detail, instance->evt_detail_h);
6806 if (instance->pd_info)
6807 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6808 instance->pd_info,
6809 instance->pd_info_h);
6810 if (instance->tgt_prop)
6811 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6812 instance->tgt_prop,
6813 instance->tgt_prop_h);
6814 if (instance->vf_affiliation)
6815 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6816 sizeof(struct MR_LD_VF_AFFILIATION),
6817 instance->vf_affiliation,
6818 instance->vf_affiliation_h);
6819
6820 if (instance->vf_affiliation_111)
6821 pci_free_consistent(pdev,
6822 sizeof(struct MR_LD_VF_AFFILIATION_111),
6823 instance->vf_affiliation_111,
6824 instance->vf_affiliation_111_h);
6825
6826 if (instance->hb_host_mem)
6827 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6828 instance->hb_host_mem,
6829 instance->hb_host_mem_h);
6830
6831 if (instance->crash_dump_buf)
6832 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6833 instance->crash_dump_buf, instance->crash_dump_h);
6834
6835 if (instance->system_info_buf)
6836 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6837 instance->system_info_buf, instance->system_info_h);
6838
6839 megasas_free_ctrl_mem(instance);
6840
6841 scsi_host_put(host);
6842
6843 pci_disable_device(pdev);
6844 }
6845
6846 /**
6847 * megasas_shutdown - Shutdown entry point
6848 * @device: Generic device structure
6849 */
megasas_shutdown(struct pci_dev * pdev)6850 static void megasas_shutdown(struct pci_dev *pdev)
6851 {
6852 struct megasas_instance *instance = pci_get_drvdata(pdev);
6853
6854 instance->unload = 1;
6855
6856 if (megasas_wait_for_adapter_operational(instance))
6857 goto skip_firing_dcmds;
6858
6859 megasas_flush_cache(instance);
6860 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6861
6862 skip_firing_dcmds:
6863 instance->instancet->disable_intr(instance);
6864 megasas_destroy_irqs(instance);
6865
6866 if (instance->msix_vectors)
6867 pci_free_irq_vectors(instance->pdev);
6868 }
6869
6870 /**
6871 * megasas_mgmt_open - char node "open" entry point
6872 */
megasas_mgmt_open(struct inode * inode,struct file * filep)6873 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6874 {
6875 /*
6876 * Allow only those users with admin rights
6877 */
6878 if (!capable(CAP_SYS_ADMIN))
6879 return -EACCES;
6880
6881 return 0;
6882 }
6883
6884 /**
6885 * megasas_mgmt_fasync - Async notifier registration from applications
6886 *
6887 * This function adds the calling process to a driver global queue. When an
6888 * event occurs, SIGIO will be sent to all processes in this queue.
6889 */
megasas_mgmt_fasync(int fd,struct file * filep,int mode)6890 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6891 {
6892 int rc;
6893
6894 mutex_lock(&megasas_async_queue_mutex);
6895
6896 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6897
6898 mutex_unlock(&megasas_async_queue_mutex);
6899
6900 if (rc >= 0) {
6901 /* For sanity check when we get ioctl */
6902 filep->private_data = filep;
6903 return 0;
6904 }
6905
6906 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6907
6908 return rc;
6909 }
6910
6911 /**
6912 * megasas_mgmt_poll - char node "poll" entry point
6913 * */
megasas_mgmt_poll(struct file * file,poll_table * wait)6914 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6915 {
6916 unsigned int mask;
6917 unsigned long flags;
6918
6919 poll_wait(file, &megasas_poll_wait, wait);
6920 spin_lock_irqsave(&poll_aen_lock, flags);
6921 if (megasas_poll_wait_aen)
6922 mask = (POLLIN | POLLRDNORM);
6923 else
6924 mask = 0;
6925 megasas_poll_wait_aen = 0;
6926 spin_unlock_irqrestore(&poll_aen_lock, flags);
6927 return mask;
6928 }
6929
6930 /*
6931 * megasas_set_crash_dump_params_ioctl:
6932 * Send CRASH_DUMP_MODE DCMD to all controllers
6933 * @cmd: MFI command frame
6934 */
6935
megasas_set_crash_dump_params_ioctl(struct megasas_cmd * cmd)6936 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6937 {
6938 struct megasas_instance *local_instance;
6939 int i, error = 0;
6940 int crash_support;
6941
6942 crash_support = cmd->frame->dcmd.mbox.w[0];
6943
6944 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6945 local_instance = megasas_mgmt_info.instance[i];
6946 if (local_instance && local_instance->crash_dump_drv_support) {
6947 if ((atomic_read(&local_instance->adprecovery) ==
6948 MEGASAS_HBA_OPERATIONAL) &&
6949 !megasas_set_crash_dump_params(local_instance,
6950 crash_support)) {
6951 local_instance->crash_dump_app_support =
6952 crash_support;
6953 dev_info(&local_instance->pdev->dev,
6954 "Application firmware crash "
6955 "dump mode set success\n");
6956 error = 0;
6957 } else {
6958 dev_info(&local_instance->pdev->dev,
6959 "Application firmware crash "
6960 "dump mode set failed\n");
6961 error = -1;
6962 }
6963 }
6964 }
6965 return error;
6966 }
6967
6968 /**
6969 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
6970 * @instance: Adapter soft state
6971 * @argp: User's ioctl packet
6972 */
6973 static int
megasas_mgmt_fw_ioctl(struct megasas_instance * instance,struct megasas_iocpacket __user * user_ioc,struct megasas_iocpacket * ioc)6974 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6975 struct megasas_iocpacket __user * user_ioc,
6976 struct megasas_iocpacket *ioc)
6977 {
6978 struct megasas_sge32 *kern_sge32;
6979 struct megasas_cmd *cmd;
6980 void *kbuff_arr[MAX_IOCTL_SGE];
6981 dma_addr_t buf_handle = 0;
6982 int error = 0, i;
6983 void *sense = NULL;
6984 dma_addr_t sense_handle;
6985 unsigned long *sense_ptr;
6986 u32 opcode;
6987
6988 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6989
6990 if (ioc->sge_count > MAX_IOCTL_SGE) {
6991 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
6992 ioc->sge_count, MAX_IOCTL_SGE);
6993 return -EINVAL;
6994 }
6995
6996 cmd = megasas_get_cmd(instance);
6997 if (!cmd) {
6998 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6999 return -ENOMEM;
7000 }
7001
7002 /*
7003 * User's IOCTL packet has 2 frames (maximum). Copy those two
7004 * frames into our cmd's frames. cmd->frame's context will get
7005 * overwritten when we copy from user's frames. So set that value
7006 * alone separately
7007 */
7008 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7009 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7010 cmd->frame->hdr.pad_0 = 0;
7011 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
7012 MFI_FRAME_SGL64 |
7013 MFI_FRAME_SENSE64));
7014 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7015
7016 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7017 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
7018 megasas_return_cmd(instance, cmd);
7019 return -1;
7020 }
7021 }
7022
7023 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
7024 error = megasas_set_crash_dump_params_ioctl(cmd);
7025 megasas_return_cmd(instance, cmd);
7026 return error;
7027 }
7028
7029 /*
7030 * The management interface between applications and the fw uses
7031 * MFI frames. E.g, RAID configuration changes, LD property changes
7032 * etc are accomplishes through different kinds of MFI frames. The
7033 * driver needs to care only about substituting user buffers with
7034 * kernel buffers in SGLs. The location of SGL is embedded in the
7035 * struct iocpacket itself.
7036 */
7037 kern_sge32 = (struct megasas_sge32 *)
7038 ((unsigned long)cmd->frame + ioc->sgl_off);
7039
7040 /*
7041 * For each user buffer, create a mirror buffer and copy in
7042 */
7043 for (i = 0; i < ioc->sge_count; i++) {
7044 if (!ioc->sgl[i].iov_len)
7045 continue;
7046
7047 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
7048 ioc->sgl[i].iov_len,
7049 &buf_handle, GFP_KERNEL);
7050 if (!kbuff_arr[i]) {
7051 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
7052 "kernel SGL buffer for IOCTL\n");
7053 error = -ENOMEM;
7054 goto out;
7055 }
7056
7057 /*
7058 * We don't change the dma_coherent_mask, so
7059 * pci_alloc_consistent only returns 32bit addresses
7060 */
7061 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7062 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7063
7064 /*
7065 * We created a kernel buffer corresponding to the
7066 * user buffer. Now copy in from the user buffer
7067 */
7068 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
7069 (u32) (ioc->sgl[i].iov_len))) {
7070 error = -EFAULT;
7071 goto out;
7072 }
7073 }
7074
7075 if (ioc->sense_len) {
7076 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7077 &sense_handle, GFP_KERNEL);
7078 if (!sense) {
7079 error = -ENOMEM;
7080 goto out;
7081 }
7082
7083 sense_ptr =
7084 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7085 *sense_ptr = cpu_to_le32(sense_handle);
7086 }
7087
7088 /*
7089 * Set the sync_cmd flag so that the ISR knows not to complete this
7090 * cmd to the SCSI mid-layer
7091 */
7092 cmd->sync_cmd = 1;
7093 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
7094 cmd->sync_cmd = 0;
7095 dev_err(&instance->pdev->dev,
7096 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
7097 __func__, __LINE__, opcode, cmd->cmd_status_drv);
7098 return -EBUSY;
7099 }
7100
7101 cmd->sync_cmd = 0;
7102
7103 if (instance->unload == 1) {
7104 dev_info(&instance->pdev->dev, "Driver unload is in progress "
7105 "don't submit data to application\n");
7106 goto out;
7107 }
7108 /*
7109 * copy out the kernel buffers to user buffers
7110 */
7111 for (i = 0; i < ioc->sge_count; i++) {
7112 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7113 ioc->sgl[i].iov_len)) {
7114 error = -EFAULT;
7115 goto out;
7116 }
7117 }
7118
7119 /*
7120 * copy out the sense
7121 */
7122 if (ioc->sense_len) {
7123 /*
7124 * sense_ptr points to the location that has the user
7125 * sense buffer address
7126 */
7127 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7128 ioc->sense_off);
7129
7130 if (copy_to_user((void __user *)((unsigned long)
7131 get_unaligned((unsigned long *)sense_ptr)),
7132 sense, ioc->sense_len)) {
7133 dev_err(&instance->pdev->dev, "Failed to copy out to user "
7134 "sense data\n");
7135 error = -EFAULT;
7136 goto out;
7137 }
7138 }
7139
7140 /*
7141 * copy the status codes returned by the fw
7142 */
7143 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7144 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7145 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7146 error = -EFAULT;
7147 }
7148
7149 out:
7150 if (sense) {
7151 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7152 sense, sense_handle);
7153 }
7154
7155 for (i = 0; i < ioc->sge_count; i++) {
7156 if (kbuff_arr[i]) {
7157 dma_free_coherent(&instance->pdev->dev,
7158 le32_to_cpu(kern_sge32[i].length),
7159 kbuff_arr[i],
7160 le32_to_cpu(kern_sge32[i].phys_addr));
7161 kbuff_arr[i] = NULL;
7162 }
7163 }
7164
7165 megasas_return_cmd(instance, cmd);
7166 return error;
7167 }
7168
megasas_mgmt_ioctl_fw(struct file * file,unsigned long arg)7169 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7170 {
7171 struct megasas_iocpacket __user *user_ioc =
7172 (struct megasas_iocpacket __user *)arg;
7173 struct megasas_iocpacket *ioc;
7174 struct megasas_instance *instance;
7175 int error;
7176 int i;
7177 unsigned long flags;
7178 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7179
7180 ioc = memdup_user(user_ioc, sizeof(*ioc));
7181 if (IS_ERR(ioc))
7182 return PTR_ERR(ioc);
7183
7184 instance = megasas_lookup_instance(ioc->host_no);
7185 if (!instance) {
7186 error = -ENODEV;
7187 goto out_kfree_ioc;
7188 }
7189
7190 /* Adjust ioctl wait time for VF mode */
7191 if (instance->requestorId)
7192 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7193
7194 /* Block ioctls in VF mode */
7195 if (instance->requestorId && !allow_vf_ioctls) {
7196 error = -ENODEV;
7197 goto out_kfree_ioc;
7198 }
7199
7200 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7201 dev_err(&instance->pdev->dev, "Controller in crit error\n");
7202 error = -ENODEV;
7203 goto out_kfree_ioc;
7204 }
7205
7206 if (instance->unload == 1) {
7207 error = -ENODEV;
7208 goto out_kfree_ioc;
7209 }
7210
7211 if (down_interruptible(&instance->ioctl_sem)) {
7212 error = -ERESTARTSYS;
7213 goto out_kfree_ioc;
7214 }
7215
7216 for (i = 0; i < wait_time; i++) {
7217
7218 spin_lock_irqsave(&instance->hba_lock, flags);
7219 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7220 spin_unlock_irqrestore(&instance->hba_lock, flags);
7221 break;
7222 }
7223 spin_unlock_irqrestore(&instance->hba_lock, flags);
7224
7225 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7226 dev_notice(&instance->pdev->dev, "waiting"
7227 "for controller reset to finish\n");
7228 }
7229
7230 msleep(1000);
7231 }
7232
7233 spin_lock_irqsave(&instance->hba_lock, flags);
7234 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7235 spin_unlock_irqrestore(&instance->hba_lock, flags);
7236
7237 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7238 error = -ENODEV;
7239 goto out_up;
7240 }
7241 spin_unlock_irqrestore(&instance->hba_lock, flags);
7242
7243 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7244 out_up:
7245 up(&instance->ioctl_sem);
7246
7247 out_kfree_ioc:
7248 kfree(ioc);
7249 return error;
7250 }
7251
megasas_mgmt_ioctl_aen(struct file * file,unsigned long arg)7252 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7253 {
7254 struct megasas_instance *instance;
7255 struct megasas_aen aen;
7256 int error;
7257 int i;
7258 unsigned long flags;
7259 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7260
7261 if (file->private_data != file) {
7262 printk(KERN_DEBUG "megasas: fasync_helper was not "
7263 "called first\n");
7264 return -EINVAL;
7265 }
7266
7267 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7268 return -EFAULT;
7269
7270 instance = megasas_lookup_instance(aen.host_no);
7271
7272 if (!instance)
7273 return -ENODEV;
7274
7275 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7276 return -ENODEV;
7277 }
7278
7279 if (instance->unload == 1) {
7280 return -ENODEV;
7281 }
7282
7283 for (i = 0; i < wait_time; i++) {
7284
7285 spin_lock_irqsave(&instance->hba_lock, flags);
7286 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7287 spin_unlock_irqrestore(&instance->hba_lock,
7288 flags);
7289 break;
7290 }
7291
7292 spin_unlock_irqrestore(&instance->hba_lock, flags);
7293
7294 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7295 dev_notice(&instance->pdev->dev, "waiting for"
7296 "controller reset to finish\n");
7297 }
7298
7299 msleep(1000);
7300 }
7301
7302 spin_lock_irqsave(&instance->hba_lock, flags);
7303 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7304 spin_unlock_irqrestore(&instance->hba_lock, flags);
7305 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7306 return -ENODEV;
7307 }
7308 spin_unlock_irqrestore(&instance->hba_lock, flags);
7309
7310 mutex_lock(&instance->reset_mutex);
7311 error = megasas_register_aen(instance, aen.seq_num,
7312 aen.class_locale_word);
7313 mutex_unlock(&instance->reset_mutex);
7314 return error;
7315 }
7316
7317 /**
7318 * megasas_mgmt_ioctl - char node ioctl entry point
7319 */
7320 static long
megasas_mgmt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)7321 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7322 {
7323 switch (cmd) {
7324 case MEGASAS_IOC_FIRMWARE:
7325 return megasas_mgmt_ioctl_fw(file, arg);
7326
7327 case MEGASAS_IOC_GET_AEN:
7328 return megasas_mgmt_ioctl_aen(file, arg);
7329 }
7330
7331 return -ENOTTY;
7332 }
7333
7334 #ifdef CONFIG_COMPAT
megasas_mgmt_compat_ioctl_fw(struct file * file,unsigned long arg)7335 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7336 {
7337 struct compat_megasas_iocpacket __user *cioc =
7338 (struct compat_megasas_iocpacket __user *)arg;
7339 struct megasas_iocpacket __user *ioc =
7340 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7341 int i;
7342 int error = 0;
7343 compat_uptr_t ptr;
7344 u32 local_sense_off;
7345 u32 local_sense_len;
7346 u32 user_sense_off;
7347
7348 if (clear_user(ioc, sizeof(*ioc)))
7349 return -EFAULT;
7350
7351 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7352 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7353 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7354 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7355 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7356 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7357 return -EFAULT;
7358
7359 /*
7360 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7361 * sense_len is not null, so prepare the 64bit value under
7362 * the same condition.
7363 */
7364 if (get_user(local_sense_off, &ioc->sense_off) ||
7365 get_user(local_sense_len, &ioc->sense_len) ||
7366 get_user(user_sense_off, &cioc->sense_off))
7367 return -EFAULT;
7368
7369 if (local_sense_off != user_sense_off)
7370 return -EINVAL;
7371
7372 if (local_sense_len) {
7373 void __user **sense_ioc_ptr =
7374 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7375 compat_uptr_t *sense_cioc_ptr =
7376 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7377 if (get_user(ptr, sense_cioc_ptr) ||
7378 put_user(compat_ptr(ptr), sense_ioc_ptr))
7379 return -EFAULT;
7380 }
7381
7382 for (i = 0; i < MAX_IOCTL_SGE; i++) {
7383 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7384 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7385 copy_in_user(&ioc->sgl[i].iov_len,
7386 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7387 return -EFAULT;
7388 }
7389
7390 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7391
7392 if (copy_in_user(&cioc->frame.hdr.cmd_status,
7393 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7394 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7395 return -EFAULT;
7396 }
7397 return error;
7398 }
7399
7400 static long
megasas_mgmt_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)7401 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7402 unsigned long arg)
7403 {
7404 switch (cmd) {
7405 case MEGASAS_IOC_FIRMWARE32:
7406 return megasas_mgmt_compat_ioctl_fw(file, arg);
7407 case MEGASAS_IOC_GET_AEN:
7408 return megasas_mgmt_ioctl_aen(file, arg);
7409 }
7410
7411 return -ENOTTY;
7412 }
7413 #endif
7414
7415 /*
7416 * File operations structure for management interface
7417 */
7418 static const struct file_operations megasas_mgmt_fops = {
7419 .owner = THIS_MODULE,
7420 .open = megasas_mgmt_open,
7421 .fasync = megasas_mgmt_fasync,
7422 .unlocked_ioctl = megasas_mgmt_ioctl,
7423 .poll = megasas_mgmt_poll,
7424 #ifdef CONFIG_COMPAT
7425 .compat_ioctl = megasas_mgmt_compat_ioctl,
7426 #endif
7427 .llseek = noop_llseek,
7428 };
7429
7430 /*
7431 * PCI hotplug support registration structure
7432 */
7433 static struct pci_driver megasas_pci_driver = {
7434
7435 .name = "megaraid_sas",
7436 .id_table = megasas_pci_table,
7437 .probe = megasas_probe_one,
7438 .remove = megasas_detach_one,
7439 .suspend = megasas_suspend,
7440 .resume = megasas_resume,
7441 .shutdown = megasas_shutdown,
7442 };
7443
7444 /*
7445 * Sysfs driver attributes
7446 */
version_show(struct device_driver * dd,char * buf)7447 static ssize_t version_show(struct device_driver *dd, char *buf)
7448 {
7449 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7450 MEGASAS_VERSION);
7451 }
7452 static DRIVER_ATTR_RO(version);
7453
release_date_show(struct device_driver * dd,char * buf)7454 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7455 {
7456 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7457 MEGASAS_RELDATE);
7458 }
7459 static DRIVER_ATTR_RO(release_date);
7460
support_poll_for_event_show(struct device_driver * dd,char * buf)7461 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7462 {
7463 return sprintf(buf, "%u\n", support_poll_for_event);
7464 }
7465 static DRIVER_ATTR_RO(support_poll_for_event);
7466
support_device_change_show(struct device_driver * dd,char * buf)7467 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7468 {
7469 return sprintf(buf, "%u\n", support_device_change);
7470 }
7471 static DRIVER_ATTR_RO(support_device_change);
7472
dbg_lvl_show(struct device_driver * dd,char * buf)7473 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7474 {
7475 return sprintf(buf, "%u\n", megasas_dbg_lvl);
7476 }
7477
dbg_lvl_store(struct device_driver * dd,const char * buf,size_t count)7478 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7479 size_t count)
7480 {
7481 int retval = count;
7482
7483 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7484 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7485 retval = -EINVAL;
7486 }
7487 return retval;
7488 }
7489 static DRIVER_ATTR_RW(dbg_lvl);
7490
megasas_remove_scsi_device(struct scsi_device * sdev)7491 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7492 {
7493 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7494 scsi_remove_device(sdev);
7495 scsi_device_put(sdev);
7496 }
7497
7498 static void
megasas_aen_polling(struct work_struct * work)7499 megasas_aen_polling(struct work_struct *work)
7500 {
7501 struct megasas_aen_event *ev =
7502 container_of(work, struct megasas_aen_event, hotplug_work.work);
7503 struct megasas_instance *instance = ev->instance;
7504 union megasas_evt_class_locale class_locale;
7505 struct Scsi_Host *host;
7506 struct scsi_device *sdev1;
7507 u16 pd_index = 0;
7508 u16 ld_index = 0;
7509 int i, j, doscan = 0;
7510 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7511 int error;
7512 u8 dcmd_ret = DCMD_SUCCESS;
7513
7514 if (!instance) {
7515 printk(KERN_ERR "invalid instance!\n");
7516 kfree(ev);
7517 return;
7518 }
7519
7520 /* Adjust event workqueue thread wait time for VF mode */
7521 if (instance->requestorId)
7522 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7523
7524 /* Don't run the event workqueue thread if OCR is running */
7525 mutex_lock(&instance->reset_mutex);
7526
7527 instance->ev = NULL;
7528 host = instance->host;
7529 if (instance->evt_detail) {
7530 megasas_decode_evt(instance);
7531
7532 switch (le32_to_cpu(instance->evt_detail->code)) {
7533
7534 case MR_EVT_PD_INSERTED:
7535 case MR_EVT_PD_REMOVED:
7536 dcmd_ret = megasas_get_pd_list(instance);
7537 if (dcmd_ret == DCMD_SUCCESS)
7538 doscan = SCAN_PD_CHANNEL;
7539 break;
7540
7541 case MR_EVT_LD_OFFLINE:
7542 case MR_EVT_CFG_CLEARED:
7543 case MR_EVT_LD_DELETED:
7544 case MR_EVT_LD_CREATED:
7545 if (!instance->requestorId ||
7546 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7547 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7548
7549 if (dcmd_ret == DCMD_SUCCESS)
7550 doscan = SCAN_VD_CHANNEL;
7551
7552 break;
7553
7554 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7555 case MR_EVT_FOREIGN_CFG_IMPORTED:
7556 case MR_EVT_LD_STATE_CHANGE:
7557 dcmd_ret = megasas_get_pd_list(instance);
7558
7559 if (dcmd_ret != DCMD_SUCCESS)
7560 break;
7561
7562 if (!instance->requestorId ||
7563 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7564 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7565
7566 if (dcmd_ret != DCMD_SUCCESS)
7567 break;
7568
7569 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7570 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7571 instance->host->host_no);
7572 break;
7573
7574 case MR_EVT_CTRL_PROP_CHANGED:
7575 dcmd_ret = megasas_get_ctrl_info(instance);
7576 break;
7577 default:
7578 doscan = 0;
7579 break;
7580 }
7581 } else {
7582 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7583 mutex_unlock(&instance->reset_mutex);
7584 kfree(ev);
7585 return;
7586 }
7587
7588 mutex_unlock(&instance->reset_mutex);
7589
7590 if (doscan & SCAN_PD_CHANNEL) {
7591 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7592 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7593 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7594 sdev1 = scsi_device_lookup(host, i, j, 0);
7595 if (instance->pd_list[pd_index].driveState ==
7596 MR_PD_STATE_SYSTEM) {
7597 if (!sdev1)
7598 scsi_add_device(host, i, j, 0);
7599 else
7600 scsi_device_put(sdev1);
7601 } else {
7602 if (sdev1)
7603 megasas_remove_scsi_device(sdev1);
7604 }
7605 }
7606 }
7607 }
7608
7609 if (doscan & SCAN_VD_CHANNEL) {
7610 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7611 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7612 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7613 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7614 if (instance->ld_ids[ld_index] != 0xff) {
7615 if (!sdev1)
7616 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7617 else
7618 scsi_device_put(sdev1);
7619 } else {
7620 if (sdev1)
7621 megasas_remove_scsi_device(sdev1);
7622 }
7623 }
7624 }
7625 }
7626
7627 if (dcmd_ret == DCMD_SUCCESS)
7628 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7629 else
7630 seq_num = instance->last_seq_num;
7631
7632 /* Register AEN with FW for latest sequence number plus 1 */
7633 class_locale.members.reserved = 0;
7634 class_locale.members.locale = MR_EVT_LOCALE_ALL;
7635 class_locale.members.class = MR_EVT_CLASS_DEBUG;
7636
7637 if (instance->aen_cmd != NULL) {
7638 kfree(ev);
7639 return;
7640 }
7641
7642 mutex_lock(&instance->reset_mutex);
7643 error = megasas_register_aen(instance, seq_num,
7644 class_locale.word);
7645 if (error)
7646 dev_err(&instance->pdev->dev,
7647 "register aen failed error %x\n", error);
7648
7649 mutex_unlock(&instance->reset_mutex);
7650 kfree(ev);
7651 }
7652
7653 /**
7654 * megasas_init - Driver load entry point
7655 */
megasas_init(void)7656 static int __init megasas_init(void)
7657 {
7658 int rval;
7659
7660 /*
7661 * Booted in kdump kernel, minimize memory footprints by
7662 * disabling few features
7663 */
7664 if (reset_devices) {
7665 msix_vectors = 1;
7666 rdpq_enable = 0;
7667 dual_qdepth_disable = 1;
7668 }
7669
7670 /*
7671 * Announce driver version and other information
7672 */
7673 pr_info("megasas: %s\n", MEGASAS_VERSION);
7674
7675 spin_lock_init(&poll_aen_lock);
7676
7677 support_poll_for_event = 2;
7678 support_device_change = 1;
7679
7680 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7681
7682 /*
7683 * Register character device node
7684 */
7685 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7686
7687 if (rval < 0) {
7688 printk(KERN_DEBUG "megasas: failed to open device node\n");
7689 return rval;
7690 }
7691
7692 megasas_mgmt_majorno = rval;
7693
7694 /*
7695 * Register ourselves as PCI hotplug module
7696 */
7697 rval = pci_register_driver(&megasas_pci_driver);
7698
7699 if (rval) {
7700 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7701 goto err_pcidrv;
7702 }
7703
7704 rval = driver_create_file(&megasas_pci_driver.driver,
7705 &driver_attr_version);
7706 if (rval)
7707 goto err_dcf_attr_ver;
7708
7709 rval = driver_create_file(&megasas_pci_driver.driver,
7710 &driver_attr_release_date);
7711 if (rval)
7712 goto err_dcf_rel_date;
7713
7714 rval = driver_create_file(&megasas_pci_driver.driver,
7715 &driver_attr_support_poll_for_event);
7716 if (rval)
7717 goto err_dcf_support_poll_for_event;
7718
7719 rval = driver_create_file(&megasas_pci_driver.driver,
7720 &driver_attr_dbg_lvl);
7721 if (rval)
7722 goto err_dcf_dbg_lvl;
7723 rval = driver_create_file(&megasas_pci_driver.driver,
7724 &driver_attr_support_device_change);
7725 if (rval)
7726 goto err_dcf_support_device_change;
7727
7728 return rval;
7729
7730 err_dcf_support_device_change:
7731 driver_remove_file(&megasas_pci_driver.driver,
7732 &driver_attr_dbg_lvl);
7733 err_dcf_dbg_lvl:
7734 driver_remove_file(&megasas_pci_driver.driver,
7735 &driver_attr_support_poll_for_event);
7736 err_dcf_support_poll_for_event:
7737 driver_remove_file(&megasas_pci_driver.driver,
7738 &driver_attr_release_date);
7739 err_dcf_rel_date:
7740 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7741 err_dcf_attr_ver:
7742 pci_unregister_driver(&megasas_pci_driver);
7743 err_pcidrv:
7744 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7745 return rval;
7746 }
7747
7748 /**
7749 * megasas_exit - Driver unload entry point
7750 */
megasas_exit(void)7751 static void __exit megasas_exit(void)
7752 {
7753 driver_remove_file(&megasas_pci_driver.driver,
7754 &driver_attr_dbg_lvl);
7755 driver_remove_file(&megasas_pci_driver.driver,
7756 &driver_attr_support_poll_for_event);
7757 driver_remove_file(&megasas_pci_driver.driver,
7758 &driver_attr_support_device_change);
7759 driver_remove_file(&megasas_pci_driver.driver,
7760 &driver_attr_release_date);
7761 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7762
7763 pci_unregister_driver(&megasas_pci_driver);
7764 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7765 }
7766
7767 module_init(megasas_init);
7768 module_exit(megasas_exit);
7769