1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * FILE: megaraid_sas_fusion.c
21 *
22 * Authors: Avago Technologies
23 * Sumant Patro
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
45 #include <linux/fs.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_dbg.h>
56 #include <linux/dmi.h>
57
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
60
61
62 extern void megasas_free_cmds(struct megasas_instance *instance);
63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
64 *instance);
65 extern void
66 megasas_complete_cmd(struct megasas_instance *instance,
67 struct megasas_cmd *cmd, u8 alt_status);
68 int
69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
70 int seconds);
71
72 void
73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
74 int megasas_alloc_cmds(struct megasas_instance *instance);
75 int
76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
77 int
78 megasas_issue_polled(struct megasas_instance *instance,
79 struct megasas_cmd *cmd);
80 void
81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
82
83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
84 void megaraid_sas_kill_hba(struct megasas_instance *instance);
85
86 extern u32 megasas_dbg_lvl;
87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
89 int initial);
90 void megasas_start_timer(struct megasas_instance *instance,
91 struct timer_list *timer,
92 void *fn, unsigned long interval);
93 extern struct megasas_mgmt_info megasas_mgmt_info;
94 extern int resetwaittime;
95
96
97
98 /**
99 * megasas_enable_intr_fusion - Enables interrupts
100 * @regs: MFI register set
101 */
102 void
megasas_enable_intr_fusion(struct megasas_instance * instance)103 megasas_enable_intr_fusion(struct megasas_instance *instance)
104 {
105 struct megasas_register_set __iomem *regs;
106 regs = instance->reg_set;
107
108 instance->mask_interrupts = 0;
109 /* For Thunderbolt/Invader also clear intr on enable */
110 writel(~0, ®s->outbound_intr_status);
111 readl(®s->outbound_intr_status);
112
113 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
114
115 /* Dummy readl to force pci flush */
116 readl(®s->outbound_intr_mask);
117 }
118
119 /**
120 * megasas_disable_intr_fusion - Disables interrupt
121 * @regs: MFI register set
122 */
123 void
megasas_disable_intr_fusion(struct megasas_instance * instance)124 megasas_disable_intr_fusion(struct megasas_instance *instance)
125 {
126 u32 mask = 0xFFFFFFFF;
127 u32 status;
128 struct megasas_register_set __iomem *regs;
129 regs = instance->reg_set;
130 instance->mask_interrupts = 1;
131
132 writel(mask, ®s->outbound_intr_mask);
133 /* Dummy readl to force pci flush */
134 status = readl(®s->outbound_intr_mask);
135 }
136
137 int
megasas_clear_intr_fusion(struct megasas_register_set __iomem * regs)138 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
139 {
140 u32 status;
141 /*
142 * Check if it is our interrupt
143 */
144 status = readl(®s->outbound_intr_status);
145
146 if (status & 1) {
147 writel(status, ®s->outbound_intr_status);
148 readl(®s->outbound_intr_status);
149 return 1;
150 }
151 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
152 return 0;
153
154 return 1;
155 }
156
157 /**
158 * megasas_get_cmd_fusion - Get a command from the free pool
159 * @instance: Adapter soft state
160 *
161 * Returns a blk_tag indexed mpt frame
162 */
megasas_get_cmd_fusion(struct megasas_instance * instance,u32 blk_tag)163 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
164 *instance, u32 blk_tag)
165 {
166 struct fusion_context *fusion;
167
168 fusion = instance->ctrl_context;
169 return fusion->cmd_list[blk_tag];
170 }
171
172 /**
173 * megasas_return_cmd_fusion - Return a cmd to free command pool
174 * @instance: Adapter soft state
175 * @cmd: Command packet to be returned to free command pool
176 */
megasas_return_cmd_fusion(struct megasas_instance * instance,struct megasas_cmd_fusion * cmd)177 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
178 struct megasas_cmd_fusion *cmd)
179 {
180 cmd->scmd = NULL;
181 memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
182 }
183
184 /**
185 * megasas_fire_cmd_fusion - Sends command to the FW
186 */
187 static void
megasas_fire_cmd_fusion(struct megasas_instance * instance,union MEGASAS_REQUEST_DESCRIPTOR_UNION * req_desc)188 megasas_fire_cmd_fusion(struct megasas_instance *instance,
189 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
190 {
191 #if defined(writeq) && defined(CONFIG_64BIT)
192 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
193 le32_to_cpu(req_desc->u.low));
194
195 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
196 #else
197 unsigned long flags;
198
199 spin_lock_irqsave(&instance->hba_lock, flags);
200 writel(le32_to_cpu(req_desc->u.low),
201 &instance->reg_set->inbound_low_queue_port);
202 writel(le32_to_cpu(req_desc->u.high),
203 &instance->reg_set->inbound_high_queue_port);
204 mmiowb();
205 spin_unlock_irqrestore(&instance->hba_lock, flags);
206 #endif
207 }
208
209
210 /**
211 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
212 * @instance: Adapter soft state
213 */
megasas_teardown_frame_pool_fusion(struct megasas_instance * instance)214 static void megasas_teardown_frame_pool_fusion(
215 struct megasas_instance *instance)
216 {
217 int i;
218 struct fusion_context *fusion = instance->ctrl_context;
219
220 u16 max_cmd = instance->max_fw_cmds;
221
222 struct megasas_cmd_fusion *cmd;
223
224 if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
225 dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
226 "sense pool : %p\n", fusion->sg_dma_pool,
227 fusion->sense_dma_pool);
228 return;
229 }
230
231 /*
232 * Return all frames to pool
233 */
234 for (i = 0; i < max_cmd; i++) {
235
236 cmd = fusion->cmd_list[i];
237
238 if (cmd->sg_frame)
239 pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
240 cmd->sg_frame_phys_addr);
241
242 if (cmd->sense)
243 pci_pool_free(fusion->sense_dma_pool, cmd->sense,
244 cmd->sense_phys_addr);
245 }
246
247 /*
248 * Now destroy the pool itself
249 */
250 pci_pool_destroy(fusion->sg_dma_pool);
251 pci_pool_destroy(fusion->sense_dma_pool);
252
253 fusion->sg_dma_pool = NULL;
254 fusion->sense_dma_pool = NULL;
255 }
256
257 /**
258 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
259 * @instance: Adapter soft state
260 */
261 void
megasas_free_cmds_fusion(struct megasas_instance * instance)262 megasas_free_cmds_fusion(struct megasas_instance *instance)
263 {
264 int i;
265 struct fusion_context *fusion = instance->ctrl_context;
266
267 u32 max_cmds, req_sz, reply_sz, io_frames_sz;
268
269
270 req_sz = fusion->request_alloc_sz;
271 reply_sz = fusion->reply_alloc_sz;
272 io_frames_sz = fusion->io_frames_alloc_sz;
273
274 max_cmds = instance->max_fw_cmds;
275
276 /* Free descriptors and request Frames memory */
277 if (fusion->req_frames_desc)
278 dma_free_coherent(&instance->pdev->dev, req_sz,
279 fusion->req_frames_desc,
280 fusion->req_frames_desc_phys);
281
282 if (fusion->reply_frames_desc) {
283 pci_pool_free(fusion->reply_frames_desc_pool,
284 fusion->reply_frames_desc,
285 fusion->reply_frames_desc_phys);
286 pci_pool_destroy(fusion->reply_frames_desc_pool);
287 }
288
289 if (fusion->io_request_frames) {
290 pci_pool_free(fusion->io_request_frames_pool,
291 fusion->io_request_frames,
292 fusion->io_request_frames_phys);
293 pci_pool_destroy(fusion->io_request_frames_pool);
294 }
295
296 /* Free the Fusion frame pool */
297 megasas_teardown_frame_pool_fusion(instance);
298
299 /* Free all the commands in the cmd_list */
300 for (i = 0; i < max_cmds; i++)
301 kfree(fusion->cmd_list[i]);
302
303 /* Free the cmd_list buffer itself */
304 kfree(fusion->cmd_list);
305 fusion->cmd_list = NULL;
306
307 }
308
309 /**
310 * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
311 * @instance: Adapter soft state
312 *
313 */
megasas_create_frame_pool_fusion(struct megasas_instance * instance)314 static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
315 {
316 int i;
317 u32 max_cmd;
318 struct fusion_context *fusion;
319 struct megasas_cmd_fusion *cmd;
320
321 fusion = instance->ctrl_context;
322 max_cmd = instance->max_fw_cmds;
323
324
325 /*
326 * Use DMA pool facility provided by PCI layer
327 */
328
329 fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev,
330 instance->max_chain_frame_sz,
331 4, 0);
332 if (!fusion->sg_dma_pool) {
333 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
334 return -ENOMEM;
335 }
336 fusion->sense_dma_pool = pci_pool_create("sense pool fusion",
337 instance->pdev,
338 SCSI_SENSE_BUFFERSIZE, 64, 0);
339
340 if (!fusion->sense_dma_pool) {
341 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
342 pci_pool_destroy(fusion->sg_dma_pool);
343 fusion->sg_dma_pool = NULL;
344 return -ENOMEM;
345 }
346
347 /*
348 * Allocate and attach a frame to each of the commands in cmd_list
349 */
350 for (i = 0; i < max_cmd; i++) {
351
352 cmd = fusion->cmd_list[i];
353
354 cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
355 GFP_KERNEL,
356 &cmd->sg_frame_phys_addr);
357
358 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
359 GFP_KERNEL, &cmd->sense_phys_addr);
360 /*
361 * megasas_teardown_frame_pool_fusion() takes care of freeing
362 * whatever has been allocated
363 */
364 if (!cmd->sg_frame || !cmd->sense) {
365 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
366 megasas_teardown_frame_pool_fusion(instance);
367 return -ENOMEM;
368 }
369 }
370 return 0;
371 }
372
373 /**
374 * megasas_alloc_cmds_fusion - Allocates the command packets
375 * @instance: Adapter soft state
376 *
377 *
378 * Each frame has a 32-bit field called context. This context is used to get
379 * back the megasas_cmd_fusion from the frame when a frame gets completed
380 * In this driver, the 32 bit values are the indices into an array cmd_list.
381 * This array is used only to look up the megasas_cmd_fusion given the context.
382 * The free commands themselves are maintained in a linked list called cmd_pool.
383 *
384 * cmds are formed in the io_request and sg_frame members of the
385 * megasas_cmd_fusion. The context field is used to get a request descriptor
386 * and is used as SMID of the cmd.
387 * SMID value range is from 1 to max_fw_cmds.
388 */
389 int
megasas_alloc_cmds_fusion(struct megasas_instance * instance)390 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
391 {
392 int i, j, count;
393 u32 max_cmd, io_frames_sz;
394 struct fusion_context *fusion;
395 struct megasas_cmd_fusion *cmd;
396 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
397 u32 offset;
398 dma_addr_t io_req_base_phys;
399 u8 *io_req_base;
400
401 fusion = instance->ctrl_context;
402
403 max_cmd = instance->max_fw_cmds;
404
405 fusion->req_frames_desc =
406 dma_alloc_coherent(&instance->pdev->dev,
407 fusion->request_alloc_sz,
408 &fusion->req_frames_desc_phys, GFP_KERNEL);
409
410 if (!fusion->req_frames_desc) {
411 dev_err(&instance->pdev->dev, "Could not allocate memory for "
412 "request_frames\n");
413 goto fail_req_desc;
414 }
415
416 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
417 fusion->reply_frames_desc_pool =
418 pci_pool_create("reply_frames pool", instance->pdev,
419 fusion->reply_alloc_sz * count, 16, 0);
420
421 if (!fusion->reply_frames_desc_pool) {
422 dev_err(&instance->pdev->dev, "Could not allocate memory for "
423 "reply_frame pool\n");
424 goto fail_reply_desc;
425 }
426
427 fusion->reply_frames_desc =
428 pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
429 &fusion->reply_frames_desc_phys);
430 if (!fusion->reply_frames_desc) {
431 dev_err(&instance->pdev->dev, "Could not allocate memory for "
432 "reply_frame pool\n");
433 pci_pool_destroy(fusion->reply_frames_desc_pool);
434 goto fail_reply_desc;
435 }
436
437 reply_desc = fusion->reply_frames_desc;
438 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
439 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
440
441 io_frames_sz = fusion->io_frames_alloc_sz;
442
443 fusion->io_request_frames_pool =
444 pci_pool_create("io_request_frames pool", instance->pdev,
445 fusion->io_frames_alloc_sz, 16, 0);
446
447 if (!fusion->io_request_frames_pool) {
448 dev_err(&instance->pdev->dev, "Could not allocate memory for "
449 "io_request_frame pool\n");
450 goto fail_io_frames;
451 }
452
453 fusion->io_request_frames =
454 pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
455 &fusion->io_request_frames_phys);
456 if (!fusion->io_request_frames) {
457 dev_err(&instance->pdev->dev, "Could not allocate memory for "
458 "io_request_frames frames\n");
459 pci_pool_destroy(fusion->io_request_frames_pool);
460 goto fail_io_frames;
461 }
462
463 /*
464 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
465 * Allocate the dynamic array first and then allocate individual
466 * commands.
467 */
468 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
469 * max_cmd, GFP_KERNEL);
470
471 if (!fusion->cmd_list) {
472 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
473 "memory for cmd_list_fusion\n");
474 goto fail_cmd_list;
475 }
476
477 max_cmd = instance->max_fw_cmds;
478 for (i = 0; i < max_cmd; i++) {
479 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
480 GFP_KERNEL);
481 if (!fusion->cmd_list[i]) {
482 dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
483
484 for (j = 0; j < i; j++)
485 kfree(fusion->cmd_list[j]);
486
487 kfree(fusion->cmd_list);
488 fusion->cmd_list = NULL;
489 goto fail_cmd_list;
490 }
491 }
492
493 /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
494 io_req_base = fusion->io_request_frames +
495 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
496 io_req_base_phys = fusion->io_request_frames_phys +
497 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
498
499 /*
500 * Add all the commands to command pool (fusion->cmd_pool)
501 */
502
503 /* SMID 0 is reserved. Set SMID/index from 1 */
504 for (i = 0; i < max_cmd; i++) {
505 cmd = fusion->cmd_list[i];
506 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
507 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
508 cmd->index = i + 1;
509 cmd->scmd = NULL;
510 cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
511 (i - instance->max_scsi_cmds) :
512 (u32)ULONG_MAX; /* Set to Invalid */
513 cmd->instance = instance;
514 cmd->io_request =
515 (struct MPI2_RAID_SCSI_IO_REQUEST *)
516 (io_req_base + offset);
517 memset(cmd->io_request, 0,
518 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
519 cmd->io_request_phys_addr = io_req_base_phys + offset;
520 }
521
522 /*
523 * Create a frame pool and assign one frame to each cmd
524 */
525 if (megasas_create_frame_pool_fusion(instance)) {
526 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
527 megasas_free_cmds_fusion(instance);
528 goto fail_req_desc;
529 }
530
531 return 0;
532
533 fail_cmd_list:
534 pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
535 fusion->io_request_frames_phys);
536 pci_pool_destroy(fusion->io_request_frames_pool);
537 fail_io_frames:
538 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
539 fusion->reply_frames_desc,
540 fusion->reply_frames_desc_phys);
541 pci_pool_free(fusion->reply_frames_desc_pool,
542 fusion->reply_frames_desc,
543 fusion->reply_frames_desc_phys);
544 pci_pool_destroy(fusion->reply_frames_desc_pool);
545
546 fail_reply_desc:
547 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
548 fusion->req_frames_desc,
549 fusion->req_frames_desc_phys);
550 fail_req_desc:
551 return -ENOMEM;
552 }
553
554 /**
555 * wait_and_poll - Issues a polling command
556 * @instance: Adapter soft state
557 * @cmd: Command packet to be issued
558 *
559 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
560 */
561 int
wait_and_poll(struct megasas_instance * instance,struct megasas_cmd * cmd,int seconds)562 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
563 int seconds)
564 {
565 int i;
566 struct megasas_header *frame_hdr = &cmd->frame->hdr;
567 struct fusion_context *fusion;
568
569 u32 msecs = seconds * 1000;
570
571 fusion = instance->ctrl_context;
572 /*
573 * Wait for cmd_status to change
574 */
575 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
576 rmb();
577 msleep(20);
578 }
579
580 if (frame_hdr->cmd_status == 0xff)
581 return -ETIME;
582
583 return (frame_hdr->cmd_status == MFI_STAT_OK) ?
584 0 : 1;
585 }
586
587 /**
588 * megasas_ioc_init_fusion - Initializes the FW
589 * @instance: Adapter soft state
590 *
591 * Issues the IOC Init cmd
592 */
593 int
megasas_ioc_init_fusion(struct megasas_instance * instance)594 megasas_ioc_init_fusion(struct megasas_instance *instance)
595 {
596 struct megasas_init_frame *init_frame;
597 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
598 dma_addr_t ioc_init_handle;
599 struct megasas_cmd *cmd;
600 u8 ret;
601 struct fusion_context *fusion;
602 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
603 int i;
604 struct megasas_header *frame_hdr;
605 const char *sys_info;
606 MFI_CAPABILITIES *drv_ops;
607
608 fusion = instance->ctrl_context;
609
610 cmd = megasas_get_cmd(instance);
611
612 if (!cmd) {
613 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
614 ret = 1;
615 goto fail_get_cmd;
616 }
617
618 IOCInitMessage =
619 dma_alloc_coherent(&instance->pdev->dev,
620 sizeof(struct MPI2_IOC_INIT_REQUEST),
621 &ioc_init_handle, GFP_KERNEL);
622
623 if (!IOCInitMessage) {
624 dev_err(&instance->pdev->dev, "Could not allocate memory for "
625 "IOCInitMessage\n");
626 ret = 1;
627 goto fail_fw_init;
628 }
629
630 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
631
632 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
633 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
634 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
635 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
636 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
637
638 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
639 IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
640 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
641 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
642 init_frame = (struct megasas_init_frame *)cmd->frame;
643 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
644
645 frame_hdr = &cmd->frame->hdr;
646 frame_hdr->cmd_status = 0xFF;
647 frame_hdr->flags = cpu_to_le16(
648 le16_to_cpu(frame_hdr->flags) |
649 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
650
651 init_frame->cmd = MFI_CMD_INIT;
652 init_frame->cmd_status = 0xFF;
653
654 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
655
656 /* driver support Extended MSIX */
657 if (fusion->adapter_type == INVADER_SERIES)
658 drv_ops->mfi_capabilities.support_additional_msix = 1;
659 /* driver supports HA / Remote LUN over Fast Path interface */
660 drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
661
662 drv_ops->mfi_capabilities.support_max_255lds = 1;
663 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
664 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
665
666 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
667 drv_ops->mfi_capabilities.support_ext_io_size = 1;
668
669 /* Convert capability to LE32 */
670 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
671
672 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
673 if (instance->system_info_buf && sys_info) {
674 memcpy(instance->system_info_buf->systemId, sys_info,
675 strlen(sys_info) > 64 ? 64 : strlen(sys_info));
676 instance->system_info_buf->systemIdLength =
677 strlen(sys_info) > 64 ? 64 : strlen(sys_info);
678 init_frame->system_info_lo = instance->system_info_h;
679 init_frame->system_info_hi = 0;
680 }
681
682 init_frame->queue_info_new_phys_addr_hi =
683 cpu_to_le32(upper_32_bits(ioc_init_handle));
684 init_frame->queue_info_new_phys_addr_lo =
685 cpu_to_le32(lower_32_bits(ioc_init_handle));
686 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
687
688 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
689 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
690 req_desc.MFAIo.RequestFlags =
691 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
692 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
693
694 /*
695 * disable the intr before firing the init frame
696 */
697 instance->instancet->disable_intr(instance);
698
699 for (i = 0; i < (10 * 1000); i += 20) {
700 if (readl(&instance->reg_set->doorbell) & 1)
701 msleep(20);
702 else
703 break;
704 }
705
706 megasas_fire_cmd_fusion(instance, &req_desc);
707
708 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
709
710 frame_hdr = &cmd->frame->hdr;
711 if (frame_hdr->cmd_status != 0) {
712 ret = 1;
713 goto fail_fw_init;
714 }
715 dev_err(&instance->pdev->dev, "Init cmd success\n");
716
717 ret = 0;
718
719 fail_fw_init:
720 megasas_return_cmd(instance, cmd);
721 if (IOCInitMessage)
722 dma_free_coherent(&instance->pdev->dev,
723 sizeof(struct MPI2_IOC_INIT_REQUEST),
724 IOCInitMessage, ioc_init_handle);
725 fail_get_cmd:
726 return ret;
727 }
728
729 /**
730 * megasas_sync_pd_seq_num - JBOD SEQ MAP
731 * @instance: Adapter soft state
732 * @pend: set to 1, if it is pended jbod map.
733 *
734 * Issue Jbod map to the firmware. If it is pended command,
735 * issue command and return. If it is first instance of jbod map
736 * issue and receive command.
737 */
738 int
megasas_sync_pd_seq_num(struct megasas_instance * instance,bool pend)739 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
740 int ret = 0;
741 u32 pd_seq_map_sz;
742 struct megasas_cmd *cmd;
743 struct megasas_dcmd_frame *dcmd;
744 struct fusion_context *fusion = instance->ctrl_context;
745 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
746 dma_addr_t pd_seq_h;
747
748 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
749 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
750 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
751 (sizeof(struct MR_PD_CFG_SEQ) *
752 (MAX_PHYSICAL_DEVICES - 1));
753
754 cmd = megasas_get_cmd(instance);
755 if (!cmd) {
756 dev_err(&instance->pdev->dev,
757 "Could not get mfi cmd. Fail from %s %d\n",
758 __func__, __LINE__);
759 return -ENOMEM;
760 }
761
762 dcmd = &cmd->frame->dcmd;
763
764 memset(pd_sync, 0, pd_seq_map_sz);
765 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
766 dcmd->cmd = MFI_CMD_DCMD;
767 dcmd->cmd_status = 0xFF;
768 dcmd->sge_count = 1;
769 dcmd->timeout = 0;
770 dcmd->pad_0 = 0;
771 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
772 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
773 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
774 dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
775
776 if (pend) {
777 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
778 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
779 instance->jbod_seq_cmd = cmd;
780 instance->instancet->issue_dcmd(instance, cmd);
781 return 0;
782 }
783
784 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
785
786 /* Below code is only for non pended DCMD */
787 if (instance->ctrl_context && !instance->mask_interrupts)
788 ret = megasas_issue_blocked_cmd(instance, cmd, 60);
789 else
790 ret = megasas_issue_polled(instance, cmd);
791
792 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
793 dev_warn(&instance->pdev->dev,
794 "driver supports max %d JBOD, but FW reports %d\n",
795 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
796 ret = -EINVAL;
797 }
798
799 if (!ret)
800 instance->pd_seq_map_id++;
801
802 megasas_return_cmd(instance, cmd);
803 return ret;
804 }
805
806 /*
807 * megasas_get_ld_map_info - Returns FW's ld_map structure
808 * @instance: Adapter soft state
809 * @pend: Pend the command or not
810 * Issues an internal command (DCMD) to get the FW's controller PD
811 * list structure. This information is mainly used to find out SYSTEM
812 * supported by the FW.
813 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
814 * dcmd.mbox.b[0] - number of LDs being sync'd
815 * dcmd.mbox.b[1] - 0 - complete command immediately.
816 * - 1 - pend till config change
817 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
818 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
819 * uses extended struct MR_FW_RAID_MAP_EXT
820 */
821 static int
megasas_get_ld_map_info(struct megasas_instance * instance)822 megasas_get_ld_map_info(struct megasas_instance *instance)
823 {
824 int ret = 0;
825 struct megasas_cmd *cmd;
826 struct megasas_dcmd_frame *dcmd;
827 void *ci;
828 dma_addr_t ci_h = 0;
829 u32 size_map_info;
830 struct fusion_context *fusion;
831
832 cmd = megasas_get_cmd(instance);
833
834 if (!cmd) {
835 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
836 return -ENOMEM;
837 }
838
839 fusion = instance->ctrl_context;
840
841 if (!fusion) {
842 megasas_return_cmd(instance, cmd);
843 return -ENXIO;
844 }
845
846 dcmd = &cmd->frame->dcmd;
847
848 size_map_info = fusion->current_map_sz;
849
850 ci = (void *) fusion->ld_map[(instance->map_id & 1)];
851 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
852
853 if (!ci) {
854 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
855 megasas_return_cmd(instance, cmd);
856 return -ENOMEM;
857 }
858
859 memset(ci, 0, fusion->max_map_sz);
860 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
861 #if VD_EXT_DEBUG
862 dev_dbg(&instance->pdev->dev,
863 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
864 __func__, cpu_to_le32(size_map_info));
865 #endif
866 dcmd->cmd = MFI_CMD_DCMD;
867 dcmd->cmd_status = 0xFF;
868 dcmd->sge_count = 1;
869 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
870 dcmd->timeout = 0;
871 dcmd->pad_0 = 0;
872 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
873 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
874 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
875 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
876
877 if (instance->ctrl_context && !instance->mask_interrupts)
878 ret = megasas_issue_blocked_cmd(instance, cmd,
879 MEGASAS_BLOCKED_CMD_TIMEOUT);
880 else
881 ret = megasas_issue_polled(instance, cmd);
882
883 megasas_return_cmd(instance, cmd);
884
885 return ret;
886 }
887
888 u8
megasas_get_map_info(struct megasas_instance * instance)889 megasas_get_map_info(struct megasas_instance *instance)
890 {
891 struct fusion_context *fusion = instance->ctrl_context;
892
893 fusion->fast_path_io = 0;
894 if (!megasas_get_ld_map_info(instance)) {
895 if (MR_ValidateMapInfo(instance)) {
896 fusion->fast_path_io = 1;
897 return 0;
898 }
899 }
900 return 1;
901 }
902
903 /*
904 * megasas_sync_map_info - Returns FW's ld_map structure
905 * @instance: Adapter soft state
906 *
907 * Issues an internal command (DCMD) to get the FW's controller PD
908 * list structure. This information is mainly used to find out SYSTEM
909 * supported by the FW.
910 */
911 int
megasas_sync_map_info(struct megasas_instance * instance)912 megasas_sync_map_info(struct megasas_instance *instance)
913 {
914 int ret = 0, i;
915 struct megasas_cmd *cmd;
916 struct megasas_dcmd_frame *dcmd;
917 u32 size_sync_info, num_lds;
918 struct fusion_context *fusion;
919 struct MR_LD_TARGET_SYNC *ci = NULL;
920 struct MR_DRV_RAID_MAP_ALL *map;
921 struct MR_LD_RAID *raid;
922 struct MR_LD_TARGET_SYNC *ld_sync;
923 dma_addr_t ci_h = 0;
924 u32 size_map_info;
925
926 cmd = megasas_get_cmd(instance);
927
928 if (!cmd) {
929 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
930 return -ENOMEM;
931 }
932
933 fusion = instance->ctrl_context;
934
935 if (!fusion) {
936 megasas_return_cmd(instance, cmd);
937 return 1;
938 }
939
940 map = fusion->ld_drv_map[instance->map_id & 1];
941
942 num_lds = le16_to_cpu(map->raidMap.ldCount);
943
944 dcmd = &cmd->frame->dcmd;
945
946 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
947
948 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
949
950 ci = (struct MR_LD_TARGET_SYNC *)
951 fusion->ld_map[(instance->map_id - 1) & 1];
952 memset(ci, 0, fusion->max_map_sz);
953
954 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
955
956 ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
957
958 for (i = 0; i < num_lds; i++, ld_sync++) {
959 raid = MR_LdRaidGet(i, map);
960 ld_sync->targetId = MR_GetLDTgtId(i, map);
961 ld_sync->seqNum = raid->seqNum;
962 }
963
964 size_map_info = fusion->current_map_sz;
965
966 dcmd->cmd = MFI_CMD_DCMD;
967 dcmd->cmd_status = 0xFF;
968 dcmd->sge_count = 1;
969 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
970 dcmd->timeout = 0;
971 dcmd->pad_0 = 0;
972 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
973 dcmd->mbox.b[0] = num_lds;
974 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
975 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
976 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
977 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
978
979 instance->map_update_cmd = cmd;
980
981 instance->instancet->issue_dcmd(instance, cmd);
982
983 return ret;
984 }
985
986 /*
987 * meagasas_display_intel_branding - Display branding string
988 * @instance: per adapter object
989 *
990 * Return nothing.
991 */
992 static void
megasas_display_intel_branding(struct megasas_instance * instance)993 megasas_display_intel_branding(struct megasas_instance *instance)
994 {
995 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
996 return;
997
998 switch (instance->pdev->device) {
999 case PCI_DEVICE_ID_LSI_INVADER:
1000 switch (instance->pdev->subsystem_device) {
1001 case MEGARAID_INTEL_RS3DC080_SSDID:
1002 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1003 instance->host->host_no,
1004 MEGARAID_INTEL_RS3DC080_BRANDING);
1005 break;
1006 case MEGARAID_INTEL_RS3DC040_SSDID:
1007 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1008 instance->host->host_no,
1009 MEGARAID_INTEL_RS3DC040_BRANDING);
1010 break;
1011 case MEGARAID_INTEL_RS3SC008_SSDID:
1012 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1013 instance->host->host_no,
1014 MEGARAID_INTEL_RS3SC008_BRANDING);
1015 break;
1016 case MEGARAID_INTEL_RS3MC044_SSDID:
1017 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1018 instance->host->host_no,
1019 MEGARAID_INTEL_RS3MC044_BRANDING);
1020 break;
1021 default:
1022 break;
1023 }
1024 break;
1025 case PCI_DEVICE_ID_LSI_FURY:
1026 switch (instance->pdev->subsystem_device) {
1027 case MEGARAID_INTEL_RS3WC080_SSDID:
1028 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1029 instance->host->host_no,
1030 MEGARAID_INTEL_RS3WC080_BRANDING);
1031 break;
1032 case MEGARAID_INTEL_RS3WC040_SSDID:
1033 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1034 instance->host->host_no,
1035 MEGARAID_INTEL_RS3WC040_BRANDING);
1036 break;
1037 default:
1038 break;
1039 }
1040 break;
1041 case PCI_DEVICE_ID_LSI_CUTLASS_52:
1042 case PCI_DEVICE_ID_LSI_CUTLASS_53:
1043 switch (instance->pdev->subsystem_device) {
1044 case MEGARAID_INTEL_RMS3BC160_SSDID:
1045 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1046 instance->host->host_no,
1047 MEGARAID_INTEL_RMS3BC160_BRANDING);
1048 break;
1049 default:
1050 break;
1051 }
1052 break;
1053 default:
1054 break;
1055 }
1056 }
1057
1058 /**
1059 * megasas_init_adapter_fusion - Initializes the FW
1060 * @instance: Adapter soft state
1061 *
1062 * This is the main function for initializing firmware.
1063 */
1064 u32
megasas_init_adapter_fusion(struct megasas_instance * instance)1065 megasas_init_adapter_fusion(struct megasas_instance *instance)
1066 {
1067 struct megasas_register_set __iomem *reg_set;
1068 struct fusion_context *fusion;
1069 u32 max_cmd, scratch_pad_2;
1070 int i = 0, count;
1071
1072 fusion = instance->ctrl_context;
1073
1074 reg_set = instance->reg_set;
1075
1076 /*
1077 * Get various operational parameters from status register
1078 */
1079 instance->max_fw_cmds =
1080 instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
1081 instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
1082
1083 /*
1084 * Reduce the max supported cmds by 1. This is to ensure that the
1085 * reply_q_sz (1 more than the max cmd that driver may send)
1086 * does not exceed max cmds that the FW can support
1087 */
1088 instance->max_fw_cmds = instance->max_fw_cmds-1;
1089
1090 /*
1091 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1092 */
1093 instance->max_mfi_cmds =
1094 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1095
1096 max_cmd = instance->max_fw_cmds;
1097
1098 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1099
1100 fusion->request_alloc_sz =
1101 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
1102 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1103 *(fusion->reply_q_depth);
1104 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1105 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
1106 (max_cmd + 1)); /* Extra 1 for SMID 0 */
1107
1108 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1109 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1110 * Firmware support extended IO chain frame which is 4 times more than
1111 * legacy Firmware.
1112 * Legacy Firmware - Frame size is (8 * 128) = 1K
1113 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
1114 */
1115 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1116 instance->max_chain_frame_sz =
1117 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1118 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1119 else
1120 instance->max_chain_frame_sz =
1121 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1122 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1123
1124 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1125 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1126 instance->max_chain_frame_sz,
1127 MEGASAS_CHAIN_FRAME_SZ_MIN);
1128 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1129 }
1130
1131 fusion->max_sge_in_main_msg =
1132 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1133 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1134
1135 fusion->max_sge_in_chain =
1136 instance->max_chain_frame_sz
1137 / sizeof(union MPI2_SGE_IO_UNION);
1138
1139 instance->max_num_sge =
1140 rounddown_pow_of_two(fusion->max_sge_in_main_msg
1141 + fusion->max_sge_in_chain - 2);
1142
1143 /* Used for pass thru MFI frame (DCMD) */
1144 fusion->chain_offset_mfi_pthru =
1145 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1146
1147 fusion->chain_offset_io_request =
1148 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1149 sizeof(union MPI2_SGE_IO_UNION))/16;
1150
1151 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1152 for (i = 0 ; i < count; i++)
1153 fusion->last_reply_idx[i] = 0;
1154
1155 /*
1156 * For fusion adapters, 3 commands for IOCTL and 5 commands
1157 * for driver's internal DCMDs.
1158 */
1159 instance->max_scsi_cmds = instance->max_fw_cmds -
1160 (MEGASAS_FUSION_INTERNAL_CMDS +
1161 MEGASAS_FUSION_IOCTL_CMDS);
1162 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1163
1164 /*
1165 * Allocate memory for descriptors
1166 * Create a pool of commands
1167 */
1168 if (megasas_alloc_cmds(instance))
1169 goto fail_alloc_mfi_cmds;
1170 if (megasas_alloc_cmds_fusion(instance))
1171 goto fail_alloc_cmds;
1172
1173 if (megasas_ioc_init_fusion(instance))
1174 goto fail_ioc_init;
1175
1176 megasas_display_intel_branding(instance);
1177 if (megasas_get_ctrl_info(instance)) {
1178 dev_err(&instance->pdev->dev,
1179 "Could not get controller info. Fail from %s %d\n",
1180 __func__, __LINE__);
1181 goto fail_ioc_init;
1182 }
1183
1184 instance->flag_ieee = 1;
1185 fusion->fast_path_io = 0;
1186
1187 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1188 for (i = 0; i < 2; i++) {
1189 fusion->ld_map[i] = NULL;
1190 fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
1191 fusion->drv_map_pages);
1192 if (!fusion->ld_drv_map[i]) {
1193 dev_err(&instance->pdev->dev, "Could not allocate "
1194 "memory for local map info for %d pages\n",
1195 fusion->drv_map_pages);
1196 if (i == 1)
1197 free_pages((ulong)fusion->ld_drv_map[0],
1198 fusion->drv_map_pages);
1199 goto fail_ioc_init;
1200 }
1201 memset(fusion->ld_drv_map[i], 0,
1202 ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1203 }
1204
1205 for (i = 0; i < 2; i++) {
1206 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1207 fusion->max_map_sz,
1208 &fusion->ld_map_phys[i],
1209 GFP_KERNEL);
1210 if (!fusion->ld_map[i]) {
1211 dev_err(&instance->pdev->dev, "Could not allocate memory "
1212 "for map info\n");
1213 goto fail_map_info;
1214 }
1215 }
1216
1217 if (!megasas_get_map_info(instance))
1218 megasas_sync_map_info(instance);
1219
1220 return 0;
1221
1222 fail_map_info:
1223 if (i == 1)
1224 dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
1225 fusion->ld_map[0], fusion->ld_map_phys[0]);
1226 fail_ioc_init:
1227 megasas_free_cmds_fusion(instance);
1228 fail_alloc_cmds:
1229 megasas_free_cmds(instance);
1230 fail_alloc_mfi_cmds:
1231 return 1;
1232 }
1233
1234 /**
1235 * map_cmd_status - Maps FW cmd status to OS cmd status
1236 * @cmd : Pointer to cmd
1237 * @status : status of cmd returned by FW
1238 * @ext_status : ext status of cmd returned by FW
1239 */
1240
1241 void
map_cmd_status(struct megasas_cmd_fusion * cmd,u8 status,u8 ext_status)1242 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1243 {
1244
1245 switch (status) {
1246
1247 case MFI_STAT_OK:
1248 cmd->scmd->result = DID_OK << 16;
1249 break;
1250
1251 case MFI_STAT_SCSI_IO_FAILED:
1252 case MFI_STAT_LD_INIT_IN_PROGRESS:
1253 cmd->scmd->result = (DID_ERROR << 16) | ext_status;
1254 break;
1255
1256 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1257
1258 cmd->scmd->result = (DID_OK << 16) | ext_status;
1259 if (ext_status == SAM_STAT_CHECK_CONDITION) {
1260 memset(cmd->scmd->sense_buffer, 0,
1261 SCSI_SENSE_BUFFERSIZE);
1262 memcpy(cmd->scmd->sense_buffer, cmd->sense,
1263 SCSI_SENSE_BUFFERSIZE);
1264 cmd->scmd->result |= DRIVER_SENSE << 24;
1265 }
1266 break;
1267
1268 case MFI_STAT_LD_OFFLINE:
1269 case MFI_STAT_DEVICE_NOT_FOUND:
1270 cmd->scmd->result = DID_BAD_TARGET << 16;
1271 break;
1272 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1273 cmd->scmd->result = DID_IMM_RETRY << 16;
1274 break;
1275 default:
1276 dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
1277 cmd->scmd->result = DID_ERROR << 16;
1278 break;
1279 }
1280 }
1281
1282 /**
1283 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1284 * @instance: Adapter soft state
1285 * @scp: SCSI command from the mid-layer
1286 * @sgl_ptr: SGL to be filled in
1287 * @cmd: cmd we are working on
1288 *
1289 * If successful, this function returns the number of SG elements.
1290 */
1291 static int
megasas_make_sgl_fusion(struct megasas_instance * instance,struct scsi_cmnd * scp,struct MPI25_IEEE_SGE_CHAIN64 * sgl_ptr,struct megasas_cmd_fusion * cmd)1292 megasas_make_sgl_fusion(struct megasas_instance *instance,
1293 struct scsi_cmnd *scp,
1294 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1295 struct megasas_cmd_fusion *cmd)
1296 {
1297 int i, sg_processed, sge_count;
1298 struct scatterlist *os_sgl;
1299 struct fusion_context *fusion;
1300
1301 fusion = instance->ctrl_context;
1302
1303 if (fusion->adapter_type == INVADER_SERIES) {
1304 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1305 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1306 sgl_ptr_end->Flags = 0;
1307 }
1308
1309 sge_count = scsi_dma_map(scp);
1310
1311 BUG_ON(sge_count < 0);
1312
1313 if (sge_count > instance->max_num_sge || !sge_count)
1314 return sge_count;
1315
1316 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1317 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1318 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1319 sgl_ptr->Flags = 0;
1320 if (fusion->adapter_type == INVADER_SERIES)
1321 if (i == sge_count - 1)
1322 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1323 sgl_ptr++;
1324
1325 sg_processed = i + 1;
1326
1327 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
1328 (sge_count > fusion->max_sge_in_main_msg)) {
1329
1330 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1331 if (fusion->adapter_type == INVADER_SERIES) {
1332 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1333 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1334 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1335 cmd->io_request->ChainOffset =
1336 fusion->
1337 chain_offset_io_request;
1338 else
1339 cmd->io_request->ChainOffset = 0;
1340 } else
1341 cmd->io_request->ChainOffset =
1342 fusion->chain_offset_io_request;
1343
1344 sg_chain = sgl_ptr;
1345 /* Prepare chain element */
1346 sg_chain->NextChainOffset = 0;
1347 if (fusion->adapter_type == INVADER_SERIES)
1348 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1349 else
1350 sg_chain->Flags =
1351 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1352 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1353 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1354 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1355
1356 sgl_ptr =
1357 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1358 memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1359 }
1360 }
1361
1362 return sge_count;
1363 }
1364
1365 /**
1366 * megasas_set_pd_lba - Sets PD LBA
1367 * @cdb: CDB
1368 * @cdb_len: cdb length
1369 * @start_blk: Start block of IO
1370 *
1371 * Used to set the PD LBA in CDB for FP IOs
1372 */
1373 void
megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST * io_request,u8 cdb_len,struct IO_REQUEST_INFO * io_info,struct scsi_cmnd * scp,struct MR_DRV_RAID_MAP_ALL * local_map_ptr,u32 ref_tag)1374 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1375 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1376 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1377 {
1378 struct MR_LD_RAID *raid;
1379 u32 ld;
1380 u64 start_blk = io_info->pdBlock;
1381 u8 *cdb = io_request->CDB.CDB32;
1382 u32 num_blocks = io_info->numBlocks;
1383 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1384
1385 /* Check if T10 PI (DIF) is enabled for this LD */
1386 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1387 raid = MR_LdRaidGet(ld, local_map_ptr);
1388 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1389 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1390 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1391 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
1392
1393 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1394 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1395 else
1396 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1397 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1398
1399 /* LBA */
1400 cdb[12] = (u8)((start_blk >> 56) & 0xff);
1401 cdb[13] = (u8)((start_blk >> 48) & 0xff);
1402 cdb[14] = (u8)((start_blk >> 40) & 0xff);
1403 cdb[15] = (u8)((start_blk >> 32) & 0xff);
1404 cdb[16] = (u8)((start_blk >> 24) & 0xff);
1405 cdb[17] = (u8)((start_blk >> 16) & 0xff);
1406 cdb[18] = (u8)((start_blk >> 8) & 0xff);
1407 cdb[19] = (u8)(start_blk & 0xff);
1408
1409 /* Logical block reference tag */
1410 io_request->CDB.EEDP32.PrimaryReferenceTag =
1411 cpu_to_be32(ref_tag);
1412 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1413 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1414
1415 /* Transfer length */
1416 cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1417 cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1418 cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1419 cdb[31] = (u8)(num_blocks & 0xff);
1420
1421 /* set SCSI IO EEDPFlags */
1422 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1423 io_request->EEDPFlags = cpu_to_le16(
1424 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1425 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1426 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1427 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1428 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1429 } else {
1430 io_request->EEDPFlags = cpu_to_le16(
1431 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1432 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1433 }
1434 io_request->Control |= cpu_to_le32((0x4 << 26));
1435 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1436 } else {
1437 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1438 if (((cdb_len == 12) || (cdb_len == 16)) &&
1439 (start_blk <= 0xffffffff)) {
1440 if (cdb_len == 16) {
1441 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1442 flagvals = cdb[1];
1443 groupnum = cdb[14];
1444 control = cdb[15];
1445 } else {
1446 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1447 flagvals = cdb[1];
1448 groupnum = cdb[10];
1449 control = cdb[11];
1450 }
1451
1452 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1453
1454 cdb[0] = opcode;
1455 cdb[1] = flagvals;
1456 cdb[6] = groupnum;
1457 cdb[9] = control;
1458
1459 /* Transfer length */
1460 cdb[8] = (u8)(num_blocks & 0xff);
1461 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1462
1463 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1464 cdb_len = 10;
1465 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1466 /* Convert to 16 byte CDB for large LBA's */
1467 switch (cdb_len) {
1468 case 6:
1469 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1470 control = cdb[5];
1471 break;
1472 case 10:
1473 opcode =
1474 cdb[0] == READ_10 ? READ_16 : WRITE_16;
1475 flagvals = cdb[1];
1476 groupnum = cdb[6];
1477 control = cdb[9];
1478 break;
1479 case 12:
1480 opcode =
1481 cdb[0] == READ_12 ? READ_16 : WRITE_16;
1482 flagvals = cdb[1];
1483 groupnum = cdb[10];
1484 control = cdb[11];
1485 break;
1486 }
1487
1488 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1489
1490 cdb[0] = opcode;
1491 cdb[1] = flagvals;
1492 cdb[14] = groupnum;
1493 cdb[15] = control;
1494
1495 /* Transfer length */
1496 cdb[13] = (u8)(num_blocks & 0xff);
1497 cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1498 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1499 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1500
1501 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
1502 cdb_len = 16;
1503 }
1504
1505 /* Normal case, just load LBA here */
1506 switch (cdb_len) {
1507 case 6:
1508 {
1509 u8 val = cdb[1] & 0xE0;
1510 cdb[3] = (u8)(start_blk & 0xff);
1511 cdb[2] = (u8)((start_blk >> 8) & 0xff);
1512 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
1513 break;
1514 }
1515 case 10:
1516 cdb[5] = (u8)(start_blk & 0xff);
1517 cdb[4] = (u8)((start_blk >> 8) & 0xff);
1518 cdb[3] = (u8)((start_blk >> 16) & 0xff);
1519 cdb[2] = (u8)((start_blk >> 24) & 0xff);
1520 break;
1521 case 12:
1522 cdb[5] = (u8)(start_blk & 0xff);
1523 cdb[4] = (u8)((start_blk >> 8) & 0xff);
1524 cdb[3] = (u8)((start_blk >> 16) & 0xff);
1525 cdb[2] = (u8)((start_blk >> 24) & 0xff);
1526 break;
1527 case 16:
1528 cdb[9] = (u8)(start_blk & 0xff);
1529 cdb[8] = (u8)((start_blk >> 8) & 0xff);
1530 cdb[7] = (u8)((start_blk >> 16) & 0xff);
1531 cdb[6] = (u8)((start_blk >> 24) & 0xff);
1532 cdb[5] = (u8)((start_blk >> 32) & 0xff);
1533 cdb[4] = (u8)((start_blk >> 40) & 0xff);
1534 cdb[3] = (u8)((start_blk >> 48) & 0xff);
1535 cdb[2] = (u8)((start_blk >> 56) & 0xff);
1536 break;
1537 }
1538 }
1539 }
1540
1541 /**
1542 * megasas_build_ldio_fusion - Prepares IOs to devices
1543 * @instance: Adapter soft state
1544 * @scp: SCSI command
1545 * @cmd: Command to be prepared
1546 *
1547 * Prepares the io_request and chain elements (sg_frame) for IO
1548 * The IO can be for PD (Fast Path) or LD
1549 */
1550 void
megasas_build_ldio_fusion(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd_fusion * cmd)1551 megasas_build_ldio_fusion(struct megasas_instance *instance,
1552 struct scsi_cmnd *scp,
1553 struct megasas_cmd_fusion *cmd)
1554 {
1555 u8 fp_possible;
1556 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1557 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1558 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1559 struct IO_REQUEST_INFO io_info;
1560 struct fusion_context *fusion;
1561 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1562 u8 *raidLUN;
1563
1564 device_id = MEGASAS_DEV_INDEX(scp);
1565
1566 fusion = instance->ctrl_context;
1567
1568 io_request = cmd->io_request;
1569 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1570 io_request->RaidContext.status = 0;
1571 io_request->RaidContext.exStatus = 0;
1572
1573 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1574
1575 start_lba_lo = 0;
1576 start_lba_hi = 0;
1577 fp_possible = 0;
1578
1579 /*
1580 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1581 */
1582 if (scp->cmd_len == 6) {
1583 datalength = (u32) scp->cmnd[4];
1584 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1585 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1586
1587 start_lba_lo &= 0x1FFFFF;
1588 }
1589
1590 /*
1591 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1592 */
1593 else if (scp->cmd_len == 10) {
1594 datalength = (u32) scp->cmnd[8] |
1595 ((u32) scp->cmnd[7] << 8);
1596 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1597 ((u32) scp->cmnd[3] << 16) |
1598 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1599 }
1600
1601 /*
1602 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1603 */
1604 else if (scp->cmd_len == 12) {
1605 datalength = ((u32) scp->cmnd[6] << 24) |
1606 ((u32) scp->cmnd[7] << 16) |
1607 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1608 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1609 ((u32) scp->cmnd[3] << 16) |
1610 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1611 }
1612
1613 /*
1614 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1615 */
1616 else if (scp->cmd_len == 16) {
1617 datalength = ((u32) scp->cmnd[10] << 24) |
1618 ((u32) scp->cmnd[11] << 16) |
1619 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1620 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
1621 ((u32) scp->cmnd[7] << 16) |
1622 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1623
1624 start_lba_hi = ((u32) scp->cmnd[2] << 24) |
1625 ((u32) scp->cmnd[3] << 16) |
1626 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1627 }
1628
1629 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1630 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1631 io_info.numBlocks = datalength;
1632 io_info.ldTgtId = device_id;
1633 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
1634
1635 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1636 io_info.isRead = 1;
1637
1638 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1639
1640 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1641 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
1642 io_request->RaidContext.regLockFlags = 0;
1643 fp_possible = 0;
1644 } else {
1645 if (MR_BuildRaidContext(instance, &io_info,
1646 &io_request->RaidContext,
1647 local_map_ptr, &raidLUN))
1648 fp_possible = io_info.fpOkForIo;
1649 }
1650
1651 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
1652 id by default, not CPU group id, otherwise all MSI-X queues won't
1653 be utilized */
1654 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1655 raw_smp_processor_id() % instance->msix_vectors : 0;
1656
1657 if (fp_possible) {
1658 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1659 local_map_ptr, start_lba_lo);
1660 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1661 cmd->request_desc->SCSIIO.RequestFlags =
1662 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
1663 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1664 if (fusion->adapter_type == INVADER_SERIES) {
1665 if (io_request->RaidContext.regLockFlags ==
1666 REGION_TYPE_UNUSED)
1667 cmd->request_desc->SCSIIO.RequestFlags =
1668 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1669 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1670 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1671 io_request->RaidContext.nseg = 0x1;
1672 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1673 io_request->RaidContext.regLockFlags |=
1674 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1675 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1676 }
1677 if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1678 (io_info.isRead)) {
1679 io_info.devHandle =
1680 get_updated_dev_handle(instance,
1681 &fusion->load_balance_info[device_id],
1682 &io_info);
1683 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1684 cmd->pd_r1_lb = io_info.pd_after_lb;
1685 } else
1686 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1687
1688 if ((raidLUN[0] == 1) &&
1689 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
1690 instance->dev_handle = !(instance->dev_handle);
1691 io_info.devHandle =
1692 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
1693 }
1694
1695 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1696 io_request->DevHandle = io_info.devHandle;
1697 /* populate the LUN field */
1698 memcpy(io_request->LUN, raidLUN, 8);
1699 } else {
1700 io_request->RaidContext.timeoutValue =
1701 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1702 cmd->request_desc->SCSIIO.RequestFlags =
1703 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1704 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1705 if (fusion->adapter_type == INVADER_SERIES) {
1706 if (io_request->RaidContext.regLockFlags ==
1707 REGION_TYPE_UNUSED)
1708 cmd->request_desc->SCSIIO.RequestFlags =
1709 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1710 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1711 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1712 io_request->RaidContext.regLockFlags |=
1713 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1714 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1715 io_request->RaidContext.nseg = 0x1;
1716 }
1717 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1718 io_request->DevHandle = cpu_to_le16(device_id);
1719 } /* Not FP */
1720 }
1721
1722 /**
1723 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
1724 * @instance: Adapter soft state
1725 * @scp: SCSI command
1726 * @cmd: Command to be prepared
1727 *
1728 * Prepares the io_request frame for non-rw io cmds for vd.
1729 */
megasas_build_ld_nonrw_fusion(struct megasas_instance * instance,struct scsi_cmnd * scmd,struct megasas_cmd_fusion * cmd)1730 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1731 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
1732 {
1733 u32 device_id;
1734 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1735 u16 pd_index = 0;
1736 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1737 struct fusion_context *fusion = instance->ctrl_context;
1738 u8 span, physArm;
1739 __le16 devHandle;
1740 u32 ld, arRef, pd;
1741 struct MR_LD_RAID *raid;
1742 struct RAID_CONTEXT *pRAID_Context;
1743 u8 fp_possible = 1;
1744
1745 io_request = cmd->io_request;
1746 device_id = MEGASAS_DEV_INDEX(scmd);
1747 pd_index = MEGASAS_PD_INDEX(scmd);
1748 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1749 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1750 /* get RAID_Context pointer */
1751 pRAID_Context = &io_request->RaidContext;
1752 /* Check with FW team */
1753 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1754 pRAID_Context->regLockRowLBA = 0;
1755 pRAID_Context->regLockLength = 0;
1756
1757 if (fusion->fast_path_io && (
1758 device_id < instance->fw_supported_vd_count)) {
1759
1760 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1761 if (ld >= instance->fw_supported_vd_count - 1)
1762 fp_possible = 0;
1763
1764 raid = MR_LdRaidGet(ld, local_map_ptr);
1765 if (!(raid->capability.fpNonRWCapable))
1766 fp_possible = 0;
1767 } else
1768 fp_possible = 0;
1769
1770 if (!fp_possible) {
1771 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1772 io_request->DevHandle = cpu_to_le16(device_id);
1773 io_request->LUN[1] = scmd->device->lun;
1774 pRAID_Context->timeoutValue =
1775 cpu_to_le16 (scmd->request->timeout / HZ);
1776 cmd->request_desc->SCSIIO.RequestFlags =
1777 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1778 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1779 } else {
1780
1781 /* set RAID context values */
1782 pRAID_Context->configSeqNum = raid->seqNum;
1783 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1784 pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1785
1786 /* get the DevHandle for the PD (since this is
1787 fpNonRWCapable, this is a single disk RAID0) */
1788 span = physArm = 0;
1789 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
1790 pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
1791 devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
1792
1793 /* build request descriptor */
1794 cmd->request_desc->SCSIIO.RequestFlags =
1795 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1796 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1797 cmd->request_desc->SCSIIO.DevHandle = devHandle;
1798
1799 /* populate the LUN field */
1800 memcpy(io_request->LUN, raid->LUN, 8);
1801
1802 /* build the raidScsiIO structure */
1803 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1804 io_request->DevHandle = devHandle;
1805 }
1806 }
1807
1808 /**
1809 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
1810 * @instance: Adapter soft state
1811 * @scp: SCSI command
1812 * @cmd: Command to be prepared
1813 * @fp_possible: parameter to detect fast path or firmware path io.
1814 *
1815 * Prepares the io_request frame for rw/non-rw io cmds for syspds
1816 */
1817 static void
megasas_build_syspd_fusion(struct megasas_instance * instance,struct scsi_cmnd * scmd,struct megasas_cmd_fusion * cmd,u8 fp_possible)1818 megasas_build_syspd_fusion(struct megasas_instance *instance,
1819 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1820 {
1821 u32 device_id;
1822 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1823 u16 pd_index = 0;
1824 u16 os_timeout_value;
1825 u16 timeout_limit;
1826 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1827 struct RAID_CONTEXT *pRAID_Context;
1828 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1829 struct fusion_context *fusion = instance->ctrl_context;
1830 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
1831
1832 device_id = MEGASAS_DEV_INDEX(scmd);
1833 pd_index = MEGASAS_PD_INDEX(scmd);
1834 os_timeout_value = scmd->request->timeout / HZ;
1835
1836 io_request = cmd->io_request;
1837 /* get RAID_Context pointer */
1838 pRAID_Context = &io_request->RaidContext;
1839 pRAID_Context->regLockFlags = 0;
1840 pRAID_Context->regLockRowLBA = 0;
1841 pRAID_Context->regLockLength = 0;
1842 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1843 io_request->LUN[1] = scmd->device->lun;
1844 pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1845 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1846
1847 /* If FW supports PD sequence number */
1848 if (instance->use_seqnum_jbod_fp &&
1849 instance->pd_list[pd_index].driveType == TYPE_DISK) {
1850 /* TgtId must be incremented by 255 as jbod seq number is index
1851 * below raid map
1852 */
1853 pRAID_Context->VirtualDiskTgtId =
1854 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
1855 pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
1856 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
1857 pRAID_Context->regLockFlags |=
1858 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1859 pRAID_Context->Type = MPI2_TYPE_CUDA;
1860 pRAID_Context->nseg = 0x1;
1861 } else if (fusion->fast_path_io) {
1862 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1863 pRAID_Context->configSeqNum = 0;
1864 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1865 io_request->DevHandle =
1866 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1867 } else {
1868 /* Want to send all IO via FW path */
1869 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1870 pRAID_Context->configSeqNum = 0;
1871 io_request->DevHandle = cpu_to_le16(0xFFFF);
1872 }
1873
1874 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1875 cmd->request_desc->SCSIIO.MSIxIndex =
1876 instance->msix_vectors ?
1877 (raw_smp_processor_id() % instance->msix_vectors) : 0;
1878
1879
1880 if (!fp_possible) {
1881 /* system pd firmware path */
1882 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1883 cmd->request_desc->SCSIIO.RequestFlags =
1884 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1885 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1886 pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
1887 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1888 } else {
1889 if (os_timeout_value)
1890 os_timeout_value++;
1891
1892 /* system pd Fast Path */
1893 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1894 timeout_limit = (scmd->device->type == TYPE_DISK) ?
1895 255 : 0xFFFF;
1896 pRAID_Context->timeoutValue =
1897 cpu_to_le16((os_timeout_value > timeout_limit) ?
1898 timeout_limit : os_timeout_value);
1899 if (fusion->adapter_type == INVADER_SERIES)
1900 io_request->IoFlags |=
1901 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1902
1903 cmd->request_desc->SCSIIO.RequestFlags =
1904 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1905 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1906 }
1907 }
1908
1909 /**
1910 * megasas_build_io_fusion - Prepares IOs to devices
1911 * @instance: Adapter soft state
1912 * @scp: SCSI command
1913 * @cmd: Command to be prepared
1914 *
1915 * Invokes helper functions to prepare request frames
1916 * and sets flags appropriate for IO/Non-IO cmd
1917 */
1918 int
megasas_build_io_fusion(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd_fusion * cmd)1919 megasas_build_io_fusion(struct megasas_instance *instance,
1920 struct scsi_cmnd *scp,
1921 struct megasas_cmd_fusion *cmd)
1922 {
1923 u16 sge_count;
1924 u8 cmd_type;
1925 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
1926
1927 /* Zero out some fields so they don't get reused */
1928 memset(io_request->LUN, 0x0, 8);
1929 io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
1930 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
1931 io_request->EEDPFlags = 0;
1932 io_request->Control = 0;
1933 io_request->EEDPBlockSize = 0;
1934 io_request->ChainOffset = 0;
1935 io_request->RaidContext.RAIDFlags = 0;
1936 io_request->RaidContext.Type = 0;
1937 io_request->RaidContext.nseg = 0;
1938
1939 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
1940 /*
1941 * Just the CDB length,rest of the Flags are zero
1942 * This will be modified for FP in build_ldio_fusion
1943 */
1944 io_request->IoFlags = cpu_to_le16(scp->cmd_len);
1945
1946 switch (cmd_type = megasas_cmd_type(scp)) {
1947 case READ_WRITE_LDIO:
1948 megasas_build_ldio_fusion(instance, scp, cmd);
1949 break;
1950 case NON_READ_WRITE_LDIO:
1951 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
1952 break;
1953 case READ_WRITE_SYSPDIO:
1954 case NON_READ_WRITE_SYSPDIO:
1955 if (instance->secure_jbod_support &&
1956 (cmd_type == NON_READ_WRITE_SYSPDIO))
1957 megasas_build_syspd_fusion(instance, scp, cmd, 0);
1958 else
1959 megasas_build_syspd_fusion(instance, scp, cmd, 1);
1960 break;
1961 default:
1962 break;
1963 }
1964
1965 /*
1966 * Construct SGL
1967 */
1968
1969 sge_count =
1970 megasas_make_sgl_fusion(instance, scp,
1971 (struct MPI25_IEEE_SGE_CHAIN64 *)
1972 &io_request->SGL, cmd);
1973
1974 if (sge_count > instance->max_num_sge) {
1975 dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
1976 "max (0x%x) allowed\n", sge_count,
1977 instance->max_num_sge);
1978 return 1;
1979 }
1980
1981 /* numSGE store lower 8 bit of sge_count.
1982 * numSGEExt store higher 8 bit of sge_count
1983 */
1984 io_request->RaidContext.numSGE = sge_count;
1985 io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
1986
1987 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1988
1989 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1990 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
1991 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1992 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
1993
1994 io_request->SGLOffset0 =
1995 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
1996
1997 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
1998 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
1999
2000 cmd->scmd = scp;
2001 scp->SCp.ptr = (char *)cmd;
2002
2003 return 0;
2004 }
2005
2006 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
megasas_get_request_descriptor(struct megasas_instance * instance,u16 index)2007 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2008 {
2009 u8 *p;
2010 struct fusion_context *fusion;
2011
2012 if (index >= instance->max_fw_cmds) {
2013 dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
2014 "descriptor for scsi%d\n", index,
2015 instance->host->host_no);
2016 return NULL;
2017 }
2018 fusion = instance->ctrl_context;
2019 p = fusion->req_frames_desc
2020 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
2021
2022 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2023 }
2024
2025 /**
2026 * megasas_build_and_issue_cmd_fusion -Main routine for building and
2027 * issuing non IOCTL cmd
2028 * @instance: Adapter soft state
2029 * @scmd: pointer to scsi cmd from OS
2030 */
2031 static u32
megasas_build_and_issue_cmd_fusion(struct megasas_instance * instance,struct scsi_cmnd * scmd)2032 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2033 struct scsi_cmnd *scmd)
2034 {
2035 struct megasas_cmd_fusion *cmd;
2036 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2037 u32 index;
2038 struct fusion_context *fusion;
2039
2040 fusion = instance->ctrl_context;
2041
2042 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2043
2044 index = cmd->index;
2045
2046 req_desc = megasas_get_request_descriptor(instance, index-1);
2047 if (!req_desc)
2048 return 1;
2049
2050 req_desc->Words = 0;
2051 cmd->request_desc = req_desc;
2052
2053 if (megasas_build_io_fusion(instance, scmd, cmd)) {
2054 megasas_return_cmd_fusion(instance, cmd);
2055 dev_err(&instance->pdev->dev, "Error building command\n");
2056 cmd->request_desc = NULL;
2057 return 1;
2058 }
2059
2060 req_desc = cmd->request_desc;
2061 req_desc->SCSIIO.SMID = cpu_to_le16(index);
2062
2063 if (cmd->io_request->ChainOffset != 0 &&
2064 cmd->io_request->ChainOffset != 0xF)
2065 dev_err(&instance->pdev->dev, "The chain offset value is not "
2066 "correct : %x\n", cmd->io_request->ChainOffset);
2067
2068 /*
2069 * Issue the command to the FW
2070 */
2071 atomic_inc(&instance->fw_outstanding);
2072
2073 megasas_fire_cmd_fusion(instance, req_desc);
2074
2075 return 0;
2076 }
2077
2078 /**
2079 * complete_cmd_fusion - Completes command
2080 * @instance: Adapter soft state
2081 * Completes all commands that is in reply descriptor queue
2082 */
2083 int
complete_cmd_fusion(struct megasas_instance * instance,u32 MSIxIndex)2084 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2085 {
2086 union MPI2_REPLY_DESCRIPTORS_UNION *desc;
2087 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
2088 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
2089 struct fusion_context *fusion;
2090 struct megasas_cmd *cmd_mfi;
2091 struct megasas_cmd_fusion *cmd_fusion;
2092 u16 smid, num_completed;
2093 u8 reply_descript_type;
2094 u32 status, extStatus, device_id;
2095 union desc_value d_val;
2096 struct LD_LOAD_BALANCE_INFO *lbinfo;
2097 int threshold_reply_count = 0;
2098 struct scsi_cmnd *scmd_local = NULL;
2099
2100 fusion = instance->ctrl_context;
2101
2102 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
2103 return IRQ_HANDLED;
2104
2105 desc = fusion->reply_frames_desc;
2106 desc += ((MSIxIndex * fusion->reply_alloc_sz)/
2107 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
2108 fusion->last_reply_idx[MSIxIndex];
2109
2110 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2111
2112 d_val.word = desc->Words;
2113
2114 reply_descript_type = reply_desc->ReplyFlags &
2115 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2116
2117 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2118 return IRQ_NONE;
2119
2120 num_completed = 0;
2121
2122 while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
2123 d_val.u.high != cpu_to_le32(UINT_MAX)) {
2124 smid = le16_to_cpu(reply_desc->SMID);
2125
2126 cmd_fusion = fusion->cmd_list[smid - 1];
2127
2128 scsi_io_req =
2129 (struct MPI2_RAID_SCSI_IO_REQUEST *)
2130 cmd_fusion->io_request;
2131
2132 if (cmd_fusion->scmd)
2133 cmd_fusion->scmd->SCp.ptr = NULL;
2134
2135 scmd_local = cmd_fusion->scmd;
2136 status = scsi_io_req->RaidContext.status;
2137 extStatus = scsi_io_req->RaidContext.exStatus;
2138
2139 switch (scsi_io_req->Function) {
2140 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
2141 /* Update load balancing info */
2142 device_id = MEGASAS_DEV_INDEX(scmd_local);
2143 lbinfo = &fusion->load_balance_info[device_id];
2144 if (cmd_fusion->scmd->SCp.Status &
2145 MEGASAS_LOAD_BALANCE_FLAG) {
2146 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
2147 cmd_fusion->scmd->SCp.Status &=
2148 ~MEGASAS_LOAD_BALANCE_FLAG;
2149 }
2150 if (reply_descript_type ==
2151 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2152 if (megasas_dbg_lvl == 5)
2153 dev_err(&instance->pdev->dev, "\nFAST Path "
2154 "IO Success\n");
2155 }
2156 /* Fall thru and complete IO */
2157 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2158 /* Map the FW Cmd Status */
2159 map_cmd_status(cmd_fusion, status, extStatus);
2160 scsi_io_req->RaidContext.status = 0;
2161 scsi_io_req->RaidContext.exStatus = 0;
2162 megasas_return_cmd_fusion(instance, cmd_fusion);
2163 scsi_dma_unmap(scmd_local);
2164 scmd_local->scsi_done(scmd_local);
2165 atomic_dec(&instance->fw_outstanding);
2166
2167 break;
2168 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2169 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2170
2171 /* Poll mode. Dummy free.
2172 * In case of Interrupt mode, caller has reverse check.
2173 */
2174 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
2175 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
2176 megasas_return_cmd(instance, cmd_mfi);
2177 } else
2178 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2179 break;
2180 }
2181
2182 fusion->last_reply_idx[MSIxIndex]++;
2183 if (fusion->last_reply_idx[MSIxIndex] >=
2184 fusion->reply_q_depth)
2185 fusion->last_reply_idx[MSIxIndex] = 0;
2186
2187 desc->Words = cpu_to_le64(ULLONG_MAX);
2188 num_completed++;
2189 threshold_reply_count++;
2190
2191 /* Get the next reply descriptor */
2192 if (!fusion->last_reply_idx[MSIxIndex])
2193 desc = fusion->reply_frames_desc +
2194 ((MSIxIndex * fusion->reply_alloc_sz)/
2195 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
2196 else
2197 desc++;
2198
2199 reply_desc =
2200 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2201
2202 d_val.word = desc->Words;
2203
2204 reply_descript_type = reply_desc->ReplyFlags &
2205 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2206
2207 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2208 break;
2209 /*
2210 * Write to reply post host index register after completing threshold
2211 * number of reply counts and still there are more replies in reply queue
2212 * pending to be completed
2213 */
2214 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2215 if (fusion->adapter_type == INVADER_SERIES)
2216 writel(((MSIxIndex & 0x7) << 24) |
2217 fusion->last_reply_idx[MSIxIndex],
2218 instance->reply_post_host_index_addr[MSIxIndex/8]);
2219 else
2220 writel((MSIxIndex << 24) |
2221 fusion->last_reply_idx[MSIxIndex],
2222 instance->reply_post_host_index_addr[0]);
2223 threshold_reply_count = 0;
2224 }
2225 }
2226
2227 if (!num_completed)
2228 return IRQ_NONE;
2229
2230 wmb();
2231 if (fusion->adapter_type == INVADER_SERIES)
2232 writel(((MSIxIndex & 0x7) << 24) |
2233 fusion->last_reply_idx[MSIxIndex],
2234 instance->reply_post_host_index_addr[MSIxIndex/8]);
2235 else
2236 writel((MSIxIndex << 24) |
2237 fusion->last_reply_idx[MSIxIndex],
2238 instance->reply_post_host_index_addr[0]);
2239 megasas_check_and_restore_queue_depth(instance);
2240 return IRQ_HANDLED;
2241 }
2242
2243 /**
2244 * megasas_complete_cmd_dpc_fusion - Completes command
2245 * @instance: Adapter soft state
2246 *
2247 * Tasklet to complete cmds
2248 */
2249 void
megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)2250 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
2251 {
2252 struct megasas_instance *instance =
2253 (struct megasas_instance *)instance_addr;
2254 unsigned long flags;
2255 u32 count, MSIxIndex;
2256
2257 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2258
2259 /* If we have already declared adapter dead, donot complete cmds */
2260 spin_lock_irqsave(&instance->hba_lock, flags);
2261 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2262 spin_unlock_irqrestore(&instance->hba_lock, flags);
2263 return;
2264 }
2265 spin_unlock_irqrestore(&instance->hba_lock, flags);
2266
2267 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
2268 complete_cmd_fusion(instance, MSIxIndex);
2269 }
2270
2271 /**
2272 * megasas_isr_fusion - isr entry point
2273 */
megasas_isr_fusion(int irq,void * devp)2274 irqreturn_t megasas_isr_fusion(int irq, void *devp)
2275 {
2276 struct megasas_irq_context *irq_context = devp;
2277 struct megasas_instance *instance = irq_context->instance;
2278 u32 mfiStatus, fw_state, dma_state;
2279
2280 if (instance->mask_interrupts)
2281 return IRQ_NONE;
2282
2283 if (!instance->msix_vectors) {
2284 mfiStatus = instance->instancet->clear_intr(instance->reg_set);
2285 if (!mfiStatus)
2286 return IRQ_NONE;
2287 }
2288
2289 /* If we are resetting, bail */
2290 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
2291 instance->instancet->clear_intr(instance->reg_set);
2292 return IRQ_HANDLED;
2293 }
2294
2295 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
2296 instance->instancet->clear_intr(instance->reg_set);
2297 /* If we didn't complete any commands, check for FW fault */
2298 fw_state = instance->instancet->read_fw_status_reg(
2299 instance->reg_set) & MFI_STATE_MASK;
2300 dma_state = instance->instancet->read_fw_status_reg
2301 (instance->reg_set) & MFI_STATE_DMADONE;
2302 if (instance->crash_dump_drv_support &&
2303 instance->crash_dump_app_support) {
2304 /* Start collecting crash, if DMA bit is done */
2305 if ((fw_state == MFI_STATE_FAULT) && dma_state)
2306 schedule_work(&instance->crash_init);
2307 else if (fw_state == MFI_STATE_FAULT)
2308 schedule_work(&instance->work_init);
2309 } else if (fw_state == MFI_STATE_FAULT) {
2310 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2311 "for scsi%d\n", instance->host->host_no);
2312 schedule_work(&instance->work_init);
2313 }
2314 }
2315
2316 return IRQ_HANDLED;
2317 }
2318
2319 /**
2320 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
2321 * @instance: Adapter soft state
2322 * mfi_cmd: megasas_cmd pointer
2323 *
2324 */
2325 u8
build_mpt_mfi_pass_thru(struct megasas_instance * instance,struct megasas_cmd * mfi_cmd)2326 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2327 struct megasas_cmd *mfi_cmd)
2328 {
2329 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2330 struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
2331 struct megasas_cmd_fusion *cmd;
2332 struct fusion_context *fusion;
2333 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2334
2335 fusion = instance->ctrl_context;
2336
2337 cmd = megasas_get_cmd_fusion(instance,
2338 instance->max_scsi_cmds + mfi_cmd->index);
2339
2340 /* Save the smid. To be used for returning the cmd */
2341 mfi_cmd->context.smid = cmd->index;
2342
2343 /*
2344 * For cmds where the flag is set, store the flag and check
2345 * on completion. For cmds with this flag, don't call
2346 * megasas_complete_cmd
2347 */
2348
2349 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2350 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
2351
2352 io_req = cmd->io_request;
2353
2354 if (fusion->adapter_type == INVADER_SERIES) {
2355 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
2356 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
2357 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2358 sgl_ptr_end->Flags = 0;
2359 }
2360
2361 mpi25_ieee_chain =
2362 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2363
2364 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2365 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
2366 SGL) / 4;
2367 io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
2368
2369 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
2370
2371 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2372 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2373
2374 mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
2375
2376 return 0;
2377 }
2378
2379 /**
2380 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
2381 * @instance: Adapter soft state
2382 * @cmd: mfi cmd to build
2383 *
2384 */
2385 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
build_mpt_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)2386 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2387 {
2388 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2389 u16 index;
2390
2391 if (build_mpt_mfi_pass_thru(instance, cmd)) {
2392 dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2393 return NULL;
2394 }
2395
2396 index = cmd->context.smid;
2397
2398 req_desc = megasas_get_request_descriptor(instance, index - 1);
2399
2400 if (!req_desc)
2401 return NULL;
2402
2403 req_desc->Words = 0;
2404 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2405 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2406
2407 req_desc->SCSIIO.SMID = cpu_to_le16(index);
2408
2409 return req_desc;
2410 }
2411
2412 /**
2413 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
2414 * @instance: Adapter soft state
2415 * @cmd: mfi cmd pointer
2416 *
2417 */
2418 void
megasas_issue_dcmd_fusion(struct megasas_instance * instance,struct megasas_cmd * cmd)2419 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2420 struct megasas_cmd *cmd)
2421 {
2422 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2423
2424 req_desc = build_mpt_cmd(instance, cmd);
2425 if (!req_desc) {
2426 dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
2427 return;
2428 }
2429 megasas_fire_cmd_fusion(instance, req_desc);
2430 }
2431
2432 /**
2433 * megasas_release_fusion - Reverses the FW initialization
2434 * @instance: Adapter soft state
2435 */
2436 void
megasas_release_fusion(struct megasas_instance * instance)2437 megasas_release_fusion(struct megasas_instance *instance)
2438 {
2439 megasas_free_cmds(instance);
2440 megasas_free_cmds_fusion(instance);
2441
2442 iounmap(instance->reg_set);
2443
2444 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
2445 }
2446
2447 /**
2448 * megasas_read_fw_status_reg_fusion - returns the current FW status value
2449 * @regs: MFI register set
2450 */
2451 static u32
megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem * regs)2452 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
2453 {
2454 return readl(&(regs)->outbound_scratch_pad);
2455 }
2456
2457 /**
2458 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
2459 * @instance: Controller's soft instance
2460 * return: Number of allocated host crash buffers
2461 */
2462 static void
megasas_alloc_host_crash_buffer(struct megasas_instance * instance)2463 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2464 {
2465 unsigned int i;
2466
2467 instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
2468 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
2469 instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
2470 instance->crash_buf_pages);
2471 if (!instance->crash_buf[i]) {
2472 dev_info(&instance->pdev->dev, "Firmware crash dump "
2473 "memory allocation failed at index %d\n", i);
2474 break;
2475 }
2476 memset(instance->crash_buf[i], 0,
2477 ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2478 }
2479 instance->drv_buf_alloc = i;
2480 }
2481
2482 /**
2483 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
2484 * @instance: Controller's soft instance
2485 */
2486 void
megasas_free_host_crash_buffer(struct megasas_instance * instance)2487 megasas_free_host_crash_buffer(struct megasas_instance *instance)
2488 {
2489 unsigned int i
2490 ;
2491 for (i = 0; i < instance->drv_buf_alloc; i++) {
2492 if (instance->crash_buf[i])
2493 free_pages((ulong)instance->crash_buf[i],
2494 instance->crash_buf_pages);
2495 }
2496 instance->drv_buf_index = 0;
2497 instance->drv_buf_alloc = 0;
2498 instance->fw_crash_state = UNAVAILABLE;
2499 instance->fw_crash_buffer_size = 0;
2500 }
2501
2502 /**
2503 * megasas_adp_reset_fusion - For controller reset
2504 * @regs: MFI register set
2505 */
2506 static int
megasas_adp_reset_fusion(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)2507 megasas_adp_reset_fusion(struct megasas_instance *instance,
2508 struct megasas_register_set __iomem *regs)
2509 {
2510 u32 host_diag, abs_state, retry;
2511
2512 /* Now try to reset the chip */
2513 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2514 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2515 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2516 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2517 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2518 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2519 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2520
2521 /* Check that the diag write enable (DRWE) bit is on */
2522 host_diag = readl(&instance->reg_set->fusion_host_diag);
2523 retry = 0;
2524 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2525 msleep(100);
2526 host_diag = readl(&instance->reg_set->fusion_host_diag);
2527 if (retry++ == 100) {
2528 dev_warn(&instance->pdev->dev,
2529 "Host diag unlock failed from %s %d\n",
2530 __func__, __LINE__);
2531 break;
2532 }
2533 }
2534 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2535 return -1;
2536
2537 /* Send chip reset command */
2538 writel(host_diag | HOST_DIAG_RESET_ADAPTER,
2539 &instance->reg_set->fusion_host_diag);
2540 msleep(3000);
2541
2542 /* Make sure reset adapter bit is cleared */
2543 host_diag = readl(&instance->reg_set->fusion_host_diag);
2544 retry = 0;
2545 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2546 msleep(100);
2547 host_diag = readl(&instance->reg_set->fusion_host_diag);
2548 if (retry++ == 1000) {
2549 dev_warn(&instance->pdev->dev,
2550 "Diag reset adapter never cleared %s %d\n",
2551 __func__, __LINE__);
2552 break;
2553 }
2554 }
2555 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2556 return -1;
2557
2558 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
2559 & MFI_STATE_MASK;
2560 retry = 0;
2561
2562 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2563 msleep(100);
2564 abs_state = instance->instancet->
2565 read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2566 }
2567 if (abs_state <= MFI_STATE_FW_INIT) {
2568 dev_warn(&instance->pdev->dev,
2569 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
2570 abs_state, __func__, __LINE__);
2571 return -1;
2572 }
2573
2574 return 0;
2575 }
2576
2577 /**
2578 * megasas_check_reset_fusion - For controller reset check
2579 * @regs: MFI register set
2580 */
2581 static int
megasas_check_reset_fusion(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)2582 megasas_check_reset_fusion(struct megasas_instance *instance,
2583 struct megasas_register_set __iomem *regs)
2584 {
2585 return 0;
2586 }
2587
2588 /* This function waits for outstanding commands on fusion to complete */
megasas_wait_for_outstanding_fusion(struct megasas_instance * instance,int iotimeout,int * convert)2589 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2590 int iotimeout, int *convert)
2591 {
2592 int i, outstanding, retval = 0, hb_seconds_missed = 0;
2593 u32 fw_state;
2594
2595 for (i = 0; i < resetwaittime; i++) {
2596 /* Check if firmware is in fault state */
2597 fw_state = instance->instancet->read_fw_status_reg(
2598 instance->reg_set) & MFI_STATE_MASK;
2599 if (fw_state == MFI_STATE_FAULT) {
2600 dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
2601 " will reset adapter scsi%d.\n",
2602 instance->host->host_no);
2603 retval = 1;
2604 goto out;
2605 }
2606 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
2607 if (instance->requestorId && !iotimeout) {
2608 retval = 1;
2609 goto out;
2610 }
2611
2612 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2613 if (instance->requestorId && iotimeout) {
2614 if (instance->hb_host_mem->HB.fwCounter !=
2615 instance->hb_host_mem->HB.driverCounter) {
2616 instance->hb_host_mem->HB.driverCounter =
2617 instance->hb_host_mem->HB.fwCounter;
2618 hb_seconds_missed = 0;
2619 } else {
2620 hb_seconds_missed++;
2621 if (hb_seconds_missed ==
2622 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2623 dev_warn(&instance->pdev->dev, "SR-IOV:"
2624 " Heartbeat never completed "
2625 " while polling during I/O "
2626 " timeout handling for "
2627 "scsi%d.\n",
2628 instance->host->host_no);
2629 *convert = 1;
2630 retval = 1;
2631 goto out;
2632 }
2633 }
2634 }
2635
2636 outstanding = atomic_read(&instance->fw_outstanding);
2637 if (!outstanding)
2638 goto out;
2639
2640 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2641 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2642 "commands to complete for scsi%d\n", i,
2643 outstanding, instance->host->host_no);
2644 megasas_complete_cmd_dpc_fusion(
2645 (unsigned long)instance);
2646 }
2647 msleep(1000);
2648 }
2649
2650 if (atomic_read(&instance->fw_outstanding)) {
2651 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2652 "will reset adapter scsi%d.\n",
2653 instance->host->host_no);
2654 *convert = 1;
2655 retval = 1;
2656 }
2657 out:
2658 return retval;
2659 }
2660
megasas_reset_reply_desc(struct megasas_instance * instance)2661 void megasas_reset_reply_desc(struct megasas_instance *instance)
2662 {
2663 int i, count;
2664 struct fusion_context *fusion;
2665 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2666
2667 fusion = instance->ctrl_context;
2668 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2669 for (i = 0 ; i < count ; i++)
2670 fusion->last_reply_idx[i] = 0;
2671 reply_desc = fusion->reply_frames_desc;
2672 for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
2673 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
2674 }
2675
2676 /*
2677 * megasas_refire_mgmt_cmd : Re-fire management commands
2678 * @instance: Controller's soft instance
2679 */
megasas_refire_mgmt_cmd(struct megasas_instance * instance)2680 void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
2681 {
2682 int j;
2683 struct megasas_cmd_fusion *cmd_fusion;
2684 struct fusion_context *fusion;
2685 struct megasas_cmd *cmd_mfi;
2686 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2687 u16 smid;
2688
2689 fusion = instance->ctrl_context;
2690
2691 /* Re-fire management commands.
2692 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
2693 */
2694 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
2695 cmd_fusion = fusion->cmd_list[j];
2696 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2697 smid = le16_to_cpu(cmd_mfi->context.smid);
2698
2699 if (!smid)
2700 continue;
2701 req_desc = megasas_get_request_descriptor
2702 (instance, smid - 1);
2703 if (req_desc && ((cmd_mfi->frame->dcmd.opcode !=
2704 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
2705 (cmd_mfi->frame->dcmd.opcode !=
2706 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))))
2707 megasas_fire_cmd_fusion(instance, req_desc);
2708 else
2709 megasas_return_cmd(instance, cmd_mfi);
2710 }
2711 }
2712
2713 /* Check for a second path that is currently UP */
megasas_check_mpio_paths(struct megasas_instance * instance,struct scsi_cmnd * scmd)2714 int megasas_check_mpio_paths(struct megasas_instance *instance,
2715 struct scsi_cmnd *scmd)
2716 {
2717 int i, j, retval = (DID_RESET << 16);
2718
2719 if (instance->mpio && instance->requestorId) {
2720 for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
2721 for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
2722 if (megasas_mgmt_info.instance[i] &&
2723 (megasas_mgmt_info.instance[i] != instance) &&
2724 megasas_mgmt_info.instance[i]->mpio &&
2725 megasas_mgmt_info.instance[i]->requestorId
2726 &&
2727 (megasas_mgmt_info.instance[i]->ld_ids[j]
2728 == scmd->device->id)) {
2729 retval = (DID_NO_CONNECT << 16);
2730 goto out;
2731 }
2732 }
2733 out:
2734 return retval;
2735 }
2736
2737 /* Core fusion reset function */
megasas_reset_fusion(struct Scsi_Host * shost,int iotimeout)2738 int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2739 {
2740 int retval = SUCCESS, i, convert = 0;
2741 struct megasas_instance *instance;
2742 struct megasas_cmd_fusion *cmd_fusion;
2743 struct fusion_context *fusion;
2744 u32 abs_state, status_reg, reset_adapter;
2745 u32 io_timeout_in_crash_mode = 0;
2746 struct scsi_cmnd *scmd_local = NULL;
2747
2748 instance = (struct megasas_instance *)shost->hostdata;
2749 fusion = instance->ctrl_context;
2750
2751 mutex_lock(&instance->reset_mutex);
2752
2753 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2754 dev_warn(&instance->pdev->dev, "Hardware critical error, "
2755 "returning FAILED for scsi%d.\n",
2756 instance->host->host_no);
2757 mutex_unlock(&instance->reset_mutex);
2758 return FAILED;
2759 }
2760 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
2761 abs_state = status_reg & MFI_STATE_MASK;
2762
2763 /* IO timeout detected, forcibly put FW in FAULT state */
2764 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
2765 instance->crash_dump_app_support && iotimeout) {
2766 dev_info(&instance->pdev->dev, "IO timeout is detected, "
2767 "forcibly FAULT Firmware\n");
2768 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2769 status_reg = readl(&instance->reg_set->doorbell);
2770 writel(status_reg | MFI_STATE_FORCE_OCR,
2771 &instance->reg_set->doorbell);
2772 readl(&instance->reg_set->doorbell);
2773 mutex_unlock(&instance->reset_mutex);
2774 do {
2775 ssleep(3);
2776 io_timeout_in_crash_mode++;
2777 dev_dbg(&instance->pdev->dev, "waiting for [%d] "
2778 "seconds for crash dump collection and OCR "
2779 "to be done\n", (io_timeout_in_crash_mode * 3));
2780 } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
2781 (io_timeout_in_crash_mode < 80));
2782
2783 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
2784 dev_info(&instance->pdev->dev, "OCR done for IO "
2785 "timeout case\n");
2786 retval = SUCCESS;
2787 } else {
2788 dev_info(&instance->pdev->dev, "Controller is not "
2789 "operational after 240 seconds wait for IO "
2790 "timeout case in FW crash dump mode\n do "
2791 "OCR/kill adapter\n");
2792 retval = megasas_reset_fusion(shost, 0);
2793 }
2794 return retval;
2795 }
2796
2797 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
2798 del_timer_sync(&instance->sriov_heartbeat_timer);
2799 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2800 instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
2801 instance->instancet->disable_intr(instance);
2802 msleep(1000);
2803
2804 /* First try waiting for commands to complete */
2805 if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
2806 &convert)) {
2807 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2808 dev_warn(&instance->pdev->dev, "resetting fusion "
2809 "adapter scsi%d.\n", instance->host->host_no);
2810 if (convert)
2811 iotimeout = 0;
2812
2813 /* Now return commands back to the OS */
2814 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
2815 cmd_fusion = fusion->cmd_list[i];
2816 scmd_local = cmd_fusion->scmd;
2817 if (cmd_fusion->scmd) {
2818 scmd_local->result =
2819 megasas_check_mpio_paths(instance,
2820 scmd_local);
2821 megasas_return_cmd_fusion(instance, cmd_fusion);
2822 scsi_dma_unmap(scmd_local);
2823 scmd_local->scsi_done(scmd_local);
2824 atomic_dec(&instance->fw_outstanding);
2825 }
2826 }
2827
2828 status_reg = instance->instancet->read_fw_status_reg(
2829 instance->reg_set);
2830 abs_state = status_reg & MFI_STATE_MASK;
2831 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2832 if (instance->disableOnlineCtrlReset ||
2833 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2834 /* Reset not supported, kill adapter */
2835 dev_warn(&instance->pdev->dev, "Reset not supported"
2836 ", killing adapter scsi%d.\n",
2837 instance->host->host_no);
2838 megaraid_sas_kill_hba(instance);
2839 instance->skip_heartbeat_timer_del = 1;
2840 retval = FAILED;
2841 goto out;
2842 }
2843
2844 /* Let SR-IOV VF & PF sync up if there was a HB failure */
2845 if (instance->requestorId && !iotimeout) {
2846 msleep(MEGASAS_OCR_SETTLE_TIME_VF);
2847 /* Look for a late HB update after VF settle time */
2848 if (abs_state == MFI_STATE_OPERATIONAL &&
2849 (instance->hb_host_mem->HB.fwCounter !=
2850 instance->hb_host_mem->HB.driverCounter)) {
2851 instance->hb_host_mem->HB.driverCounter =
2852 instance->hb_host_mem->HB.fwCounter;
2853 dev_warn(&instance->pdev->dev, "SR-IOV:"
2854 "Late FW heartbeat update for "
2855 "scsi%d.\n",
2856 instance->host->host_no);
2857 } else {
2858 /* In VF mode, first poll for FW ready */
2859 for (i = 0;
2860 i < (MEGASAS_RESET_WAIT_TIME * 1000);
2861 i += 20) {
2862 status_reg =
2863 instance->instancet->
2864 read_fw_status_reg(
2865 instance->reg_set);
2866 abs_state = status_reg &
2867 MFI_STATE_MASK;
2868 if (abs_state == MFI_STATE_READY) {
2869 dev_warn(&instance->pdev->dev,
2870 "SR-IOV: FW was found"
2871 "to be in ready state "
2872 "for scsi%d.\n",
2873 instance->host->host_no);
2874 break;
2875 }
2876 msleep(20);
2877 }
2878 if (abs_state != MFI_STATE_READY) {
2879 dev_warn(&instance->pdev->dev, "SR-IOV: "
2880 "FW not in ready state after %d"
2881 " seconds for scsi%d, status_reg = "
2882 "0x%x.\n",
2883 MEGASAS_RESET_WAIT_TIME,
2884 instance->host->host_no,
2885 status_reg);
2886 megaraid_sas_kill_hba(instance);
2887 instance->skip_heartbeat_timer_del = 1;
2888 instance->adprecovery =
2889 MEGASAS_HW_CRITICAL_ERROR;
2890 retval = FAILED;
2891 goto out;
2892 }
2893 }
2894 }
2895
2896 /* Now try to reset the chip */
2897 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
2898
2899 if (instance->instancet->adp_reset
2900 (instance, instance->reg_set))
2901 continue;
2902
2903 /* Wait for FW to become ready */
2904 if (megasas_transition_to_ready(instance, 1)) {
2905 dev_warn(&instance->pdev->dev, "Failed to "
2906 "transition controller to ready "
2907 "for scsi%d.\n",
2908 instance->host->host_no);
2909 continue;
2910 }
2911
2912 megasas_reset_reply_desc(instance);
2913 if (megasas_ioc_init_fusion(instance)) {
2914 dev_warn(&instance->pdev->dev,
2915 "megasas_ioc_init_fusion() failed!"
2916 " for scsi%d\n",
2917 instance->host->host_no);
2918 continue;
2919 }
2920
2921 megasas_refire_mgmt_cmd(instance);
2922
2923 if (megasas_get_ctrl_info(instance)) {
2924 dev_info(&instance->pdev->dev,
2925 "Failed from %s %d\n",
2926 __func__, __LINE__);
2927 megaraid_sas_kill_hba(instance);
2928 retval = FAILED;
2929 }
2930 /* Reset load balance info */
2931 memset(fusion->load_balance_info, 0,
2932 sizeof(struct LD_LOAD_BALANCE_INFO)
2933 *MAX_LOGICAL_DRIVES_EXT);
2934
2935 if (!megasas_get_map_info(instance))
2936 megasas_sync_map_info(instance);
2937
2938 megasas_setup_jbod_map(instance);
2939
2940 clear_bit(MEGASAS_FUSION_IN_RESET,
2941 &instance->reset_flags);
2942 instance->instancet->enable_intr(instance);
2943 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2944
2945 /* Restart SR-IOV heartbeat */
2946 if (instance->requestorId) {
2947 if (!megasas_sriov_start_heartbeat(instance, 0))
2948 megasas_start_timer(instance,
2949 &instance->sriov_heartbeat_timer,
2950 megasas_sriov_heartbeat_handler,
2951 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2952 else
2953 instance->skip_heartbeat_timer_del = 1;
2954 }
2955
2956 /* Adapter reset completed successfully */
2957 dev_warn(&instance->pdev->dev, "Reset "
2958 "successful for scsi%d.\n",
2959 instance->host->host_no);
2960
2961 if (instance->crash_dump_drv_support &&
2962 instance->crash_dump_app_support)
2963 megasas_set_crash_dump_params(instance,
2964 MR_CRASH_BUF_TURN_ON);
2965 else
2966 megasas_set_crash_dump_params(instance,
2967 MR_CRASH_BUF_TURN_OFF);
2968
2969 retval = SUCCESS;
2970 goto out;
2971 }
2972 /* Reset failed, kill the adapter */
2973 dev_warn(&instance->pdev->dev, "Reset failed, killing "
2974 "adapter scsi%d.\n", instance->host->host_no);
2975 megaraid_sas_kill_hba(instance);
2976 instance->skip_heartbeat_timer_del = 1;
2977 retval = FAILED;
2978 } else {
2979 /* For VF: Restart HB timer if we didn't OCR */
2980 if (instance->requestorId) {
2981 megasas_start_timer(instance,
2982 &instance->sriov_heartbeat_timer,
2983 megasas_sriov_heartbeat_handler,
2984 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2985 }
2986 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2987 instance->instancet->enable_intr(instance);
2988 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2989 }
2990 out:
2991 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2992 mutex_unlock(&instance->reset_mutex);
2993 return retval;
2994 }
2995
2996 /* Fusion Crash dump collection work queue */
megasas_fusion_crash_dump_wq(struct work_struct * work)2997 void megasas_fusion_crash_dump_wq(struct work_struct *work)
2998 {
2999 struct megasas_instance *instance =
3000 container_of(work, struct megasas_instance, crash_init);
3001 u32 status_reg;
3002 u8 partial_copy = 0;
3003
3004
3005 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3006
3007 /*
3008 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
3009 * to host crash buffers
3010 */
3011 if (instance->drv_buf_index == 0) {
3012 /* Buffer is already allocated for old Crash dump.
3013 * Do OCR and do not wait for crash dump collection
3014 */
3015 if (instance->drv_buf_alloc) {
3016 dev_info(&instance->pdev->dev, "earlier crash dump is "
3017 "not yet copied by application, ignoring this "
3018 "crash dump and initiating OCR\n");
3019 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3020 writel(status_reg,
3021 &instance->reg_set->outbound_scratch_pad);
3022 readl(&instance->reg_set->outbound_scratch_pad);
3023 return;
3024 }
3025 megasas_alloc_host_crash_buffer(instance);
3026 dev_info(&instance->pdev->dev, "Number of host crash buffers "
3027 "allocated: %d\n", instance->drv_buf_alloc);
3028 }
3029
3030 /*
3031 * Driver has allocated max buffers, which can be allocated
3032 * and FW has more crash dump data, then driver will
3033 * ignore the data.
3034 */
3035 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
3036 dev_info(&instance->pdev->dev, "Driver is done copying "
3037 "the buffer: %d\n", instance->drv_buf_alloc);
3038 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3039 partial_copy = 1;
3040 } else {
3041 memcpy(instance->crash_buf[instance->drv_buf_index],
3042 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
3043 instance->drv_buf_index++;
3044 status_reg &= ~MFI_STATE_DMADONE;
3045 }
3046
3047 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
3048 dev_info(&instance->pdev->dev, "Crash Dump is available,number "
3049 "of copied buffers: %d\n", instance->drv_buf_index);
3050 instance->fw_crash_buffer_size = instance->drv_buf_index;
3051 instance->fw_crash_state = AVAILABLE;
3052 instance->drv_buf_index = 0;
3053 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3054 readl(&instance->reg_set->outbound_scratch_pad);
3055 if (!partial_copy)
3056 megasas_reset_fusion(instance->host, 0);
3057 } else {
3058 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3059 readl(&instance->reg_set->outbound_scratch_pad);
3060 }
3061 }
3062
3063
3064 /* Fusion OCR work queue */
megasas_fusion_ocr_wq(struct work_struct * work)3065 void megasas_fusion_ocr_wq(struct work_struct *work)
3066 {
3067 struct megasas_instance *instance =
3068 container_of(work, struct megasas_instance, work_init);
3069
3070 megasas_reset_fusion(instance->host, 0);
3071 }
3072
3073 struct megasas_instance_template megasas_instance_template_fusion = {
3074 .enable_intr = megasas_enable_intr_fusion,
3075 .disable_intr = megasas_disable_intr_fusion,
3076 .clear_intr = megasas_clear_intr_fusion,
3077 .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
3078 .adp_reset = megasas_adp_reset_fusion,
3079 .check_reset = megasas_check_reset_fusion,
3080 .service_isr = megasas_isr_fusion,
3081 .tasklet = megasas_complete_cmd_dpc_fusion,
3082 .init_adapter = megasas_init_adapter_fusion,
3083 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
3084 .issue_dcmd = megasas_issue_dcmd_fusion,
3085 };
3086