1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Broadcom MPI3 Storage Controllers
4 *
5 * Copyright (C) 2017-2023 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7 *
8 */
9
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u16 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
17 struct mpi3_ioc_facts_data *facts_data);
18 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
19 struct mpi3mr_drv_cmd *drv_cmd);
20 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
21 static int poll_queues;
22 module_param(poll_queues, int, 0444);
23 MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
24
25 #if defined(writeq) && defined(CONFIG_64BIT)
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)26 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
27 spinlock_t *write_queue_lock)
28 {
29 writeq(b, addr);
30 }
31 #else
mpi3mr_writeq(__u64 b,void __iomem * addr,spinlock_t * write_queue_lock)32 static inline void mpi3mr_writeq(__u64 b, void __iomem *addr,
33 spinlock_t *write_queue_lock)
34 {
35 __u64 data_out = b;
36 unsigned long flags;
37
38 spin_lock_irqsave(write_queue_lock, flags);
39 writel((u32)(data_out), addr);
40 writel((u32)(data_out >> 32), (addr + 4));
41 spin_unlock_irqrestore(write_queue_lock, flags);
42 }
43 #endif
44
45 static inline bool
mpi3mr_check_req_qfull(struct op_req_qinfo * op_req_q)46 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
47 {
48 u16 pi, ci, max_entries;
49 bool is_qfull = false;
50
51 pi = op_req_q->pi;
52 ci = READ_ONCE(op_req_q->ci);
53 max_entries = op_req_q->num_requests;
54
55 if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
56 is_qfull = true;
57
58 return is_qfull;
59 }
60
mpi3mr_sync_irqs(struct mpi3mr_ioc * mrioc)61 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
62 {
63 u16 i, max_vectors;
64
65 max_vectors = mrioc->intr_info_count;
66
67 for (i = 0; i < max_vectors; i++)
68 synchronize_irq(pci_irq_vector(mrioc->pdev, i));
69 }
70
mpi3mr_ioc_disable_intr(struct mpi3mr_ioc * mrioc)71 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
72 {
73 mrioc->intr_enabled = 0;
74 mpi3mr_sync_irqs(mrioc);
75 }
76
mpi3mr_ioc_enable_intr(struct mpi3mr_ioc * mrioc)77 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
78 {
79 mrioc->intr_enabled = 1;
80 }
81
mpi3mr_cleanup_isr(struct mpi3mr_ioc * mrioc)82 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
83 {
84 u16 i;
85
86 mpi3mr_ioc_disable_intr(mrioc);
87
88 if (!mrioc->intr_info)
89 return;
90
91 for (i = 0; i < mrioc->intr_info_count; i++)
92 free_irq(pci_irq_vector(mrioc->pdev, i),
93 (mrioc->intr_info + i));
94
95 kfree(mrioc->intr_info);
96 mrioc->intr_info = NULL;
97 mrioc->intr_info_count = 0;
98 mrioc->is_intr_info_set = false;
99 pci_free_irq_vectors(mrioc->pdev);
100 }
101
mpi3mr_add_sg_single(void * paddr,u8 flags,u32 length,dma_addr_t dma_addr)102 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
103 dma_addr_t dma_addr)
104 {
105 struct mpi3_sge_common *sgel = paddr;
106
107 sgel->flags = flags;
108 sgel->length = cpu_to_le32(length);
109 sgel->address = cpu_to_le64(dma_addr);
110 }
111
mpi3mr_build_zero_len_sge(void * paddr)112 void mpi3mr_build_zero_len_sge(void *paddr)
113 {
114 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
115
116 mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
117 }
118
mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)119 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
120 dma_addr_t phys_addr)
121 {
122 if (!phys_addr)
123 return NULL;
124
125 if ((phys_addr < mrioc->reply_buf_dma) ||
126 (phys_addr > mrioc->reply_buf_dma_max_address))
127 return NULL;
128
129 return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
130 }
131
mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc * mrioc,dma_addr_t phys_addr)132 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
133 dma_addr_t phys_addr)
134 {
135 if (!phys_addr)
136 return NULL;
137
138 return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
139 }
140
mpi3mr_repost_reply_buf(struct mpi3mr_ioc * mrioc,u64 reply_dma)141 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
142 u64 reply_dma)
143 {
144 u32 old_idx = 0;
145 unsigned long flags;
146
147 spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
148 old_idx = mrioc->reply_free_queue_host_index;
149 mrioc->reply_free_queue_host_index = (
150 (mrioc->reply_free_queue_host_index ==
151 (mrioc->reply_free_qsz - 1)) ? 0 :
152 (mrioc->reply_free_queue_host_index + 1));
153 mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
154 writel(mrioc->reply_free_queue_host_index,
155 &mrioc->sysif_regs->reply_free_host_index);
156 spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
157 }
158
mpi3mr_repost_sense_buf(struct mpi3mr_ioc * mrioc,u64 sense_buf_dma)159 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
160 u64 sense_buf_dma)
161 {
162 u32 old_idx = 0;
163 unsigned long flags;
164
165 spin_lock_irqsave(&mrioc->sbq_lock, flags);
166 old_idx = mrioc->sbq_host_index;
167 mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
168 (mrioc->sense_buf_q_sz - 1)) ? 0 :
169 (mrioc->sbq_host_index + 1));
170 mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
171 writel(mrioc->sbq_host_index,
172 &mrioc->sysif_regs->sense_buffer_free_host_index);
173 spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
174 }
175
mpi3mr_print_event_data(struct mpi3mr_ioc * mrioc,struct mpi3_event_notification_reply * event_reply)176 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
177 struct mpi3_event_notification_reply *event_reply)
178 {
179 char *desc = NULL;
180 u16 event;
181
182 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
183 return;
184
185 event = event_reply->event;
186
187 switch (event) {
188 case MPI3_EVENT_LOG_DATA:
189 desc = "Log Data";
190 break;
191 case MPI3_EVENT_CHANGE:
192 desc = "Event Change";
193 break;
194 case MPI3_EVENT_GPIO_INTERRUPT:
195 desc = "GPIO Interrupt";
196 break;
197 case MPI3_EVENT_CABLE_MGMT:
198 desc = "Cable Management";
199 break;
200 case MPI3_EVENT_ENERGY_PACK_CHANGE:
201 desc = "Energy Pack Change";
202 break;
203 case MPI3_EVENT_DEVICE_ADDED:
204 {
205 struct mpi3_device_page0 *event_data =
206 (struct mpi3_device_page0 *)event_reply->event_data;
207 ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
208 event_data->dev_handle, event_data->device_form);
209 return;
210 }
211 case MPI3_EVENT_DEVICE_INFO_CHANGED:
212 {
213 struct mpi3_device_page0 *event_data =
214 (struct mpi3_device_page0 *)event_reply->event_data;
215 ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
216 event_data->dev_handle, event_data->device_form);
217 return;
218 }
219 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
220 {
221 struct mpi3_event_data_device_status_change *event_data =
222 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
223 ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
224 event_data->dev_handle, event_data->reason_code);
225 return;
226 }
227 case MPI3_EVENT_SAS_DISCOVERY:
228 {
229 struct mpi3_event_data_sas_discovery *event_data =
230 (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
231 ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
232 (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
233 "start" : "stop",
234 le32_to_cpu(event_data->discovery_status));
235 return;
236 }
237 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
238 desc = "SAS Broadcast Primitive";
239 break;
240 case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
241 desc = "SAS Notify Primitive";
242 break;
243 case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
244 desc = "SAS Init Device Status Change";
245 break;
246 case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
247 desc = "SAS Init Table Overflow";
248 break;
249 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
250 desc = "SAS Topology Change List";
251 break;
252 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
253 desc = "Enclosure Device Status Change";
254 break;
255 case MPI3_EVENT_ENCL_DEVICE_ADDED:
256 desc = "Enclosure Added";
257 break;
258 case MPI3_EVENT_HARD_RESET_RECEIVED:
259 desc = "Hard Reset Received";
260 break;
261 case MPI3_EVENT_SAS_PHY_COUNTER:
262 desc = "SAS PHY Counter";
263 break;
264 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
265 desc = "SAS Device Discovery Error";
266 break;
267 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
268 desc = "PCIE Topology Change List";
269 break;
270 case MPI3_EVENT_PCIE_ENUMERATION:
271 {
272 struct mpi3_event_data_pcie_enumeration *event_data =
273 (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
274 ioc_info(mrioc, "PCIE Enumeration: (%s)",
275 (event_data->reason_code ==
276 MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
277 if (event_data->enumeration_status)
278 ioc_info(mrioc, "enumeration_status(0x%08x)\n",
279 le32_to_cpu(event_data->enumeration_status));
280 return;
281 }
282 case MPI3_EVENT_PREPARE_FOR_RESET:
283 desc = "Prepare For Reset";
284 break;
285 case MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE:
286 desc = "Diagnostic Buffer Status Change";
287 break;
288 }
289
290 if (!desc)
291 return;
292
293 ioc_info(mrioc, "%s\n", desc);
294 }
295
mpi3mr_handle_events(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply * def_reply)296 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
297 struct mpi3_default_reply *def_reply)
298 {
299 struct mpi3_event_notification_reply *event_reply =
300 (struct mpi3_event_notification_reply *)def_reply;
301
302 mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
303 mpi3mr_print_event_data(mrioc, event_reply);
304 mpi3mr_os_handle_events(mrioc, event_reply);
305 }
306
307 static struct mpi3mr_drv_cmd *
mpi3mr_get_drv_cmd(struct mpi3mr_ioc * mrioc,u16 host_tag,struct mpi3_default_reply * def_reply)308 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
309 struct mpi3_default_reply *def_reply)
310 {
311 u16 idx;
312
313 switch (host_tag) {
314 case MPI3MR_HOSTTAG_INITCMDS:
315 return &mrioc->init_cmds;
316 case MPI3MR_HOSTTAG_CFG_CMDS:
317 return &mrioc->cfg_cmds;
318 case MPI3MR_HOSTTAG_BSG_CMDS:
319 return &mrioc->bsg_cmds;
320 case MPI3MR_HOSTTAG_BLK_TMS:
321 return &mrioc->host_tm_cmds;
322 case MPI3MR_HOSTTAG_PEL_ABORT:
323 return &mrioc->pel_abort_cmd;
324 case MPI3MR_HOSTTAG_PEL_WAIT:
325 return &mrioc->pel_cmds;
326 case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
327 return &mrioc->transport_cmds;
328 case MPI3MR_HOSTTAG_INVALID:
329 if (def_reply && def_reply->function ==
330 MPI3_FUNCTION_EVENT_NOTIFICATION)
331 mpi3mr_handle_events(mrioc, def_reply);
332 return NULL;
333 default:
334 break;
335 }
336 if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
337 host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
338 idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
339 return &mrioc->dev_rmhs_cmds[idx];
340 }
341
342 if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
343 host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
344 idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
345 return &mrioc->evtack_cmds[idx];
346 }
347
348 return NULL;
349 }
350
mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc * mrioc,struct mpi3_default_reply_descriptor * reply_desc,u64 * reply_dma)351 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
352 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
353 {
354 u16 reply_desc_type, host_tag = 0;
355 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
356 u16 masked_ioc_status = MPI3_IOCSTATUS_SUCCESS;
357 u32 ioc_loginfo = 0, sense_count = 0;
358 struct mpi3_status_reply_descriptor *status_desc;
359 struct mpi3_address_reply_descriptor *addr_desc;
360 struct mpi3_success_reply_descriptor *success_desc;
361 struct mpi3_default_reply *def_reply = NULL;
362 struct mpi3mr_drv_cmd *cmdptr = NULL;
363 struct mpi3_scsi_io_reply *scsi_reply;
364 struct scsi_sense_hdr sshdr;
365 u8 *sense_buf = NULL;
366
367 *reply_dma = 0;
368 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
369 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
370 switch (reply_desc_type) {
371 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
372 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
373 host_tag = le16_to_cpu(status_desc->host_tag);
374 ioc_status = le16_to_cpu(status_desc->ioc_status);
375 if (ioc_status &
376 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
377 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
378 masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
379 mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
380 break;
381 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
382 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
383 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
384 def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
385 if (!def_reply)
386 goto out;
387 host_tag = le16_to_cpu(def_reply->host_tag);
388 ioc_status = le16_to_cpu(def_reply->ioc_status);
389 if (ioc_status &
390 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
391 ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
392 masked_ioc_status = ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
393 if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
394 scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
395 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
396 le64_to_cpu(scsi_reply->sense_data_buffer_address));
397 sense_count = le32_to_cpu(scsi_reply->sense_count);
398 if (sense_buf) {
399 scsi_normalize_sense(sense_buf, sense_count,
400 &sshdr);
401 mpi3mr_scsisense_trigger(mrioc, sshdr.sense_key,
402 sshdr.asc, sshdr.ascq);
403 }
404 }
405 mpi3mr_reply_trigger(mrioc, masked_ioc_status, ioc_loginfo);
406 break;
407 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
408 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
409 host_tag = le16_to_cpu(success_desc->host_tag);
410 break;
411 default:
412 break;
413 }
414
415 cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
416 if (cmdptr) {
417 if (cmdptr->state & MPI3MR_CMD_PENDING) {
418 cmdptr->state |= MPI3MR_CMD_COMPLETE;
419 cmdptr->ioc_loginfo = ioc_loginfo;
420 if (host_tag == MPI3MR_HOSTTAG_BSG_CMDS)
421 cmdptr->ioc_status = ioc_status;
422 else
423 cmdptr->ioc_status = masked_ioc_status;
424 cmdptr->state &= ~MPI3MR_CMD_PENDING;
425 if (def_reply) {
426 cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
427 memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
428 mrioc->reply_sz);
429 }
430 if (sense_buf && cmdptr->sensebuf) {
431 cmdptr->is_sense = 1;
432 memcpy(cmdptr->sensebuf, sense_buf,
433 MPI3MR_SENSE_BUF_SZ);
434 }
435 if (cmdptr->is_waiting) {
436 cmdptr->is_waiting = 0;
437 complete(&cmdptr->done);
438 } else if (cmdptr->callback)
439 cmdptr->callback(mrioc, cmdptr);
440 }
441 }
442 out:
443 if (sense_buf)
444 mpi3mr_repost_sense_buf(mrioc,
445 le64_to_cpu(scsi_reply->sense_data_buffer_address));
446 }
447
mpi3mr_process_admin_reply_q(struct mpi3mr_ioc * mrioc)448 int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
449 {
450 u32 exp_phase = mrioc->admin_reply_ephase;
451 u32 admin_reply_ci = mrioc->admin_reply_ci;
452 u32 num_admin_replies = 0;
453 u64 reply_dma = 0;
454 u16 threshold_comps = 0;
455 struct mpi3_default_reply_descriptor *reply_desc;
456
457 if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
458 return 0;
459
460 reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
461 admin_reply_ci;
462
463 if ((le16_to_cpu(reply_desc->reply_flags) &
464 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
465 atomic_dec(&mrioc->admin_reply_q_in_use);
466 return 0;
467 }
468
469 do {
470 if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
471 break;
472
473 mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
474 mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
475 if (reply_dma)
476 mpi3mr_repost_reply_buf(mrioc, reply_dma);
477 num_admin_replies++;
478 threshold_comps++;
479 if (++admin_reply_ci == mrioc->num_admin_replies) {
480 admin_reply_ci = 0;
481 exp_phase ^= 1;
482 }
483 reply_desc =
484 (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
485 admin_reply_ci;
486 if ((le16_to_cpu(reply_desc->reply_flags) &
487 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
488 break;
489 if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
490 writel(admin_reply_ci,
491 &mrioc->sysif_regs->admin_reply_queue_ci);
492 threshold_comps = 0;
493 }
494 } while (1);
495
496 writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
497 mrioc->admin_reply_ci = admin_reply_ci;
498 mrioc->admin_reply_ephase = exp_phase;
499 atomic_dec(&mrioc->admin_reply_q_in_use);
500
501 return num_admin_replies;
502 }
503
504 /**
505 * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
506 * queue's consumer index from operational reply descriptor queue.
507 * @op_reply_q: op_reply_qinfo object
508 * @reply_ci: operational reply descriptor's queue consumer index
509 *
510 * Returns: reply descriptor frame address
511 */
512 static inline struct mpi3_default_reply_descriptor *
mpi3mr_get_reply_desc(struct op_reply_qinfo * op_reply_q,u32 reply_ci)513 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
514 {
515 void *segment_base_addr;
516 struct segments *segments = op_reply_q->q_segments;
517 struct mpi3_default_reply_descriptor *reply_desc = NULL;
518
519 segment_base_addr =
520 segments[reply_ci / op_reply_q->segment_qd].segment;
521 reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
522 (reply_ci % op_reply_q->segment_qd);
523 return reply_desc;
524 }
525
526 /**
527 * mpi3mr_process_op_reply_q - Operational reply queue handler
528 * @mrioc: Adapter instance reference
529 * @op_reply_q: Operational reply queue info
530 *
531 * Checks the specific operational reply queue and drains the
532 * reply queue entries until the queue is empty and process the
533 * individual reply descriptors.
534 *
535 * Return: 0 if queue is already processed,or number of reply
536 * descriptors processed.
537 */
mpi3mr_process_op_reply_q(struct mpi3mr_ioc * mrioc,struct op_reply_qinfo * op_reply_q)538 int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
539 struct op_reply_qinfo *op_reply_q)
540 {
541 struct op_req_qinfo *op_req_q;
542 u32 exp_phase;
543 u32 reply_ci;
544 u32 num_op_reply = 0;
545 u64 reply_dma = 0;
546 struct mpi3_default_reply_descriptor *reply_desc;
547 u16 req_q_idx = 0, reply_qidx, threshold_comps = 0;
548
549 reply_qidx = op_reply_q->qid - 1;
550
551 if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
552 return 0;
553
554 exp_phase = op_reply_q->ephase;
555 reply_ci = op_reply_q->ci;
556
557 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
558 if ((le16_to_cpu(reply_desc->reply_flags) &
559 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
560 atomic_dec(&op_reply_q->in_use);
561 return 0;
562 }
563
564 do {
565 if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
566 break;
567
568 req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
569 op_req_q = &mrioc->req_qinfo[req_q_idx];
570
571 WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
572 mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
573 reply_qidx);
574
575 if (reply_dma)
576 mpi3mr_repost_reply_buf(mrioc, reply_dma);
577 num_op_reply++;
578 threshold_comps++;
579
580 if (++reply_ci == op_reply_q->num_replies) {
581 reply_ci = 0;
582 exp_phase ^= 1;
583 }
584
585 reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
586
587 if ((le16_to_cpu(reply_desc->reply_flags) &
588 MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
589 break;
590 #ifndef CONFIG_PREEMPT_RT
591 /*
592 * Exit completion loop to avoid CPU lockup
593 * Ensure remaining completion happens from threaded ISR.
594 */
595 if (num_op_reply > mrioc->max_host_ios) {
596 op_reply_q->enable_irq_poll = true;
597 break;
598 }
599 #endif
600 if (threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
601 writel(reply_ci,
602 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
603 atomic_sub(threshold_comps, &op_reply_q->pend_ios);
604 threshold_comps = 0;
605 }
606 } while (1);
607
608 writel(reply_ci,
609 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
610 op_reply_q->ci = reply_ci;
611 op_reply_q->ephase = exp_phase;
612 atomic_sub(threshold_comps, &op_reply_q->pend_ios);
613 atomic_dec(&op_reply_q->in_use);
614 return num_op_reply;
615 }
616
617 /**
618 * mpi3mr_blk_mq_poll - Operational reply queue handler
619 * @shost: SCSI Host reference
620 * @queue_num: Request queue number (w.r.t OS it is hardware context number)
621 *
622 * Checks the specific operational reply queue and drains the
623 * reply queue entries until the queue is empty and process the
624 * individual reply descriptors.
625 *
626 * Return: 0 if queue is already processed,or number of reply
627 * descriptors processed.
628 */
mpi3mr_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)629 int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
630 {
631 int num_entries = 0;
632 struct mpi3mr_ioc *mrioc;
633
634 mrioc = (struct mpi3mr_ioc *)shost->hostdata;
635
636 if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
637 mrioc->unrecoverable || mrioc->pci_err_recovery))
638 return 0;
639
640 num_entries = mpi3mr_process_op_reply_q(mrioc,
641 &mrioc->op_reply_qinfo[queue_num]);
642
643 return num_entries;
644 }
645
mpi3mr_isr_primary(int irq,void * privdata)646 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
647 {
648 struct mpi3mr_intr_info *intr_info = privdata;
649 struct mpi3mr_ioc *mrioc;
650 u16 midx;
651 u32 num_admin_replies = 0, num_op_reply = 0;
652
653 if (!intr_info)
654 return IRQ_NONE;
655
656 mrioc = intr_info->mrioc;
657
658 if (!mrioc->intr_enabled)
659 return IRQ_NONE;
660
661 midx = intr_info->msix_index;
662
663 if (!midx)
664 num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
665 if (intr_info->op_reply_q)
666 num_op_reply = mpi3mr_process_op_reply_q(mrioc,
667 intr_info->op_reply_q);
668
669 if (num_admin_replies || num_op_reply)
670 return IRQ_HANDLED;
671 else
672 return IRQ_NONE;
673 }
674
675 #ifndef CONFIG_PREEMPT_RT
676
mpi3mr_isr(int irq,void * privdata)677 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
678 {
679 struct mpi3mr_intr_info *intr_info = privdata;
680 int ret;
681
682 if (!intr_info)
683 return IRQ_NONE;
684
685 /* Call primary ISR routine */
686 ret = mpi3mr_isr_primary(irq, privdata);
687
688 /*
689 * If more IOs are expected, schedule IRQ polling thread.
690 * Otherwise exit from ISR.
691 */
692 if (!intr_info->op_reply_q)
693 return ret;
694
695 if (!intr_info->op_reply_q->enable_irq_poll ||
696 !atomic_read(&intr_info->op_reply_q->pend_ios))
697 return ret;
698
699 disable_irq_nosync(intr_info->os_irq);
700
701 return IRQ_WAKE_THREAD;
702 }
703
704 /**
705 * mpi3mr_isr_poll - Reply queue polling routine
706 * @irq: IRQ
707 * @privdata: Interrupt info
708 *
709 * poll for pending I/O completions in a loop until pending I/Os
710 * present or controller queue depth I/Os are processed.
711 *
712 * Return: IRQ_NONE or IRQ_HANDLED
713 */
mpi3mr_isr_poll(int irq,void * privdata)714 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
715 {
716 struct mpi3mr_intr_info *intr_info = privdata;
717 struct mpi3mr_ioc *mrioc;
718 u16 midx;
719 u32 num_op_reply = 0;
720
721 if (!intr_info || !intr_info->op_reply_q)
722 return IRQ_NONE;
723
724 mrioc = intr_info->mrioc;
725 midx = intr_info->msix_index;
726
727 /* Poll for pending IOs completions */
728 do {
729 if (!mrioc->intr_enabled || mrioc->unrecoverable)
730 break;
731
732 if (!midx)
733 mpi3mr_process_admin_reply_q(mrioc);
734 if (intr_info->op_reply_q)
735 num_op_reply +=
736 mpi3mr_process_op_reply_q(mrioc,
737 intr_info->op_reply_q);
738
739 usleep_range(MPI3MR_IRQ_POLL_SLEEP, MPI3MR_IRQ_POLL_SLEEP + 1);
740
741 } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
742 (num_op_reply < mrioc->max_host_ios));
743
744 intr_info->op_reply_q->enable_irq_poll = false;
745 enable_irq(intr_info->os_irq);
746
747 return IRQ_HANDLED;
748 }
749
750 #endif
751
752 /**
753 * mpi3mr_request_irq - Request IRQ and register ISR
754 * @mrioc: Adapter instance reference
755 * @index: IRQ vector index
756 *
757 * Request threaded ISR with primary ISR and secondary
758 *
759 * Return: 0 on success and non zero on failures.
760 */
mpi3mr_request_irq(struct mpi3mr_ioc * mrioc,u16 index)761 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
762 {
763 struct pci_dev *pdev = mrioc->pdev;
764 struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
765 int retval = 0;
766
767 intr_info->mrioc = mrioc;
768 intr_info->msix_index = index;
769 intr_info->op_reply_q = NULL;
770
771 snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
772 mrioc->driver_name, mrioc->id, index);
773
774 #ifndef CONFIG_PREEMPT_RT
775 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
776 mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
777 #else
778 retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
779 NULL, IRQF_SHARED, intr_info->name, intr_info);
780 #endif
781 if (retval) {
782 ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
783 intr_info->name, pci_irq_vector(pdev, index));
784 return retval;
785 }
786
787 intr_info->os_irq = pci_irq_vector(pdev, index);
788 return retval;
789 }
790
mpi3mr_calc_poll_queues(struct mpi3mr_ioc * mrioc,u16 max_vectors)791 static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
792 {
793 if (!mrioc->requested_poll_qcount)
794 return;
795
796 /* Reserved for Admin and Default Queue */
797 if (max_vectors > 2 &&
798 (mrioc->requested_poll_qcount < max_vectors - 2)) {
799 ioc_info(mrioc,
800 "enabled polled queues (%d) msix (%d)\n",
801 mrioc->requested_poll_qcount, max_vectors);
802 } else {
803 ioc_info(mrioc,
804 "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
805 mrioc->requested_poll_qcount, max_vectors);
806 mrioc->requested_poll_qcount = 0;
807 }
808 }
809
810 /**
811 * mpi3mr_setup_isr - Setup ISR for the controller
812 * @mrioc: Adapter instance reference
813 * @setup_one: Request one IRQ or more
814 *
815 * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
816 *
817 * Return: 0 on success and non zero on failures.
818 */
mpi3mr_setup_isr(struct mpi3mr_ioc * mrioc,u8 setup_one)819 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
820 {
821 unsigned int irq_flags = PCI_IRQ_MSIX;
822 int max_vectors, min_vec;
823 int retval;
824 int i;
825 struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
826
827 if (mrioc->is_intr_info_set)
828 return 0;
829
830 mpi3mr_cleanup_isr(mrioc);
831
832 if (setup_one || reset_devices) {
833 max_vectors = 1;
834 retval = pci_alloc_irq_vectors(mrioc->pdev,
835 1, max_vectors, irq_flags);
836 if (retval < 0) {
837 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
838 retval);
839 goto out_failed;
840 }
841 } else {
842 max_vectors =
843 min_t(int, mrioc->cpu_count + 1 +
844 mrioc->requested_poll_qcount, mrioc->msix_count);
845
846 mpi3mr_calc_poll_queues(mrioc, max_vectors);
847
848 ioc_info(mrioc,
849 "MSI-X vectors supported: %d, no of cores: %d,",
850 mrioc->msix_count, mrioc->cpu_count);
851 ioc_info(mrioc,
852 "MSI-x vectors requested: %d poll_queues %d\n",
853 max_vectors, mrioc->requested_poll_qcount);
854
855 desc.post_vectors = mrioc->requested_poll_qcount;
856 min_vec = desc.pre_vectors + desc.post_vectors;
857 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
858
859 retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
860 min_vec, max_vectors, irq_flags, &desc);
861
862 if (retval < 0) {
863 ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
864 retval);
865 goto out_failed;
866 }
867
868
869 /*
870 * If only one MSI-x is allocated, then MSI-x 0 will be shared
871 * between Admin queue and operational queue
872 */
873 if (retval == min_vec)
874 mrioc->op_reply_q_offset = 0;
875 else if (retval != (max_vectors)) {
876 ioc_info(mrioc,
877 "allocated vectors (%d) are less than configured (%d)\n",
878 retval, max_vectors);
879 }
880
881 max_vectors = retval;
882 mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
883
884 mpi3mr_calc_poll_queues(mrioc, max_vectors);
885
886 }
887
888 mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
889 GFP_KERNEL);
890 if (!mrioc->intr_info) {
891 retval = -ENOMEM;
892 pci_free_irq_vectors(mrioc->pdev);
893 goto out_failed;
894 }
895 for (i = 0; i < max_vectors; i++) {
896 retval = mpi3mr_request_irq(mrioc, i);
897 if (retval) {
898 mrioc->intr_info_count = i;
899 goto out_failed;
900 }
901 }
902 if (reset_devices || !setup_one)
903 mrioc->is_intr_info_set = true;
904 mrioc->intr_info_count = max_vectors;
905 mpi3mr_ioc_enable_intr(mrioc);
906 return 0;
907
908 out_failed:
909 mpi3mr_cleanup_isr(mrioc);
910
911 return retval;
912 }
913
914 static const struct {
915 enum mpi3mr_iocstate value;
916 char *name;
917 } mrioc_states[] = {
918 { MRIOC_STATE_READY, "ready" },
919 { MRIOC_STATE_FAULT, "fault" },
920 { MRIOC_STATE_RESET, "reset" },
921 { MRIOC_STATE_BECOMING_READY, "becoming ready" },
922 { MRIOC_STATE_RESET_REQUESTED, "reset requested" },
923 { MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
924 };
925
mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)926 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
927 {
928 int i;
929 char *name = NULL;
930
931 for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
932 if (mrioc_states[i].value == mrioc_state) {
933 name = mrioc_states[i].name;
934 break;
935 }
936 }
937 return name;
938 }
939
940 /* Reset reason to name mapper structure*/
941 static const struct {
942 enum mpi3mr_reset_reason value;
943 char *name;
944 } mpi3mr_reset_reason_codes[] = {
945 { MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
946 { MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
947 { MPI3MR_RESET_FROM_APP, "application invocation" },
948 { MPI3MR_RESET_FROM_EH_HOS, "error handling" },
949 { MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
950 { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
951 { MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
952 { MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
953 { MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
954 { MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
955 { MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
956 { MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
957 { MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
958 {
959 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
960 "create request queue timeout"
961 },
962 {
963 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
964 "create reply queue timeout"
965 },
966 { MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
967 { MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
968 { MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
969 { MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
970 {
971 MPI3MR_RESET_FROM_CIACTVRST_TIMER,
972 "component image activation timeout"
973 },
974 {
975 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
976 "get package version timeout"
977 },
978 { MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
979 { MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
980 {
981 MPI3MR_RESET_FROM_DIAG_BUFFER_POST_TIMEOUT,
982 "diagnostic buffer post timeout"
983 },
984 {
985 MPI3MR_RESET_FROM_DIAG_BUFFER_RELEASE_TIMEOUT,
986 "diagnostic buffer release timeout"
987 },
988 { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
989 { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
990 { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
991 };
992
993 /**
994 * mpi3mr_reset_rc_name - get reset reason code name
995 * @reason_code: reset reason code value
996 *
997 * Map reset reason to an NULL terminated ASCII string
998 *
999 * Return: name corresponding to reset reason value or NULL.
1000 */
mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)1001 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
1002 {
1003 int i;
1004 char *name = NULL;
1005
1006 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
1007 if (mpi3mr_reset_reason_codes[i].value == reason_code) {
1008 name = mpi3mr_reset_reason_codes[i].name;
1009 break;
1010 }
1011 }
1012 return name;
1013 }
1014
1015 /* Reset type to name mapper structure*/
1016 static const struct {
1017 u16 reset_type;
1018 char *name;
1019 } mpi3mr_reset_types[] = {
1020 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
1021 { MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
1022 };
1023
1024 /**
1025 * mpi3mr_reset_type_name - get reset type name
1026 * @reset_type: reset type value
1027 *
1028 * Map reset type to an NULL terminated ASCII string
1029 *
1030 * Return: name corresponding to reset type value or NULL.
1031 */
mpi3mr_reset_type_name(u16 reset_type)1032 static const char *mpi3mr_reset_type_name(u16 reset_type)
1033 {
1034 int i;
1035 char *name = NULL;
1036
1037 for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
1038 if (mpi3mr_reset_types[i].reset_type == reset_type) {
1039 name = mpi3mr_reset_types[i].name;
1040 break;
1041 }
1042 }
1043 return name;
1044 }
1045
1046 /**
1047 * mpi3mr_is_fault_recoverable - Read fault code and decide
1048 * whether the controller can be recoverable
1049 * @mrioc: Adapter instance reference
1050 * Return: true if fault is recoverable, false otherwise.
1051 */
mpi3mr_is_fault_recoverable(struct mpi3mr_ioc * mrioc)1052 static inline bool mpi3mr_is_fault_recoverable(struct mpi3mr_ioc *mrioc)
1053 {
1054 u32 fault;
1055
1056 fault = (readl(&mrioc->sysif_regs->fault) &
1057 MPI3_SYSIF_FAULT_CODE_MASK);
1058
1059 switch (fault) {
1060 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
1061 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
1062 ioc_warn(mrioc,
1063 "controller requires system power cycle, marking controller as unrecoverable\n");
1064 return false;
1065 case MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER:
1066 ioc_warn(mrioc,
1067 "controller faulted due to insufficient power,\n"
1068 " try by connecting it to a different slot\n");
1069 return false;
1070 default:
1071 break;
1072 }
1073 return true;
1074 }
1075
1076 /**
1077 * mpi3mr_print_fault_info - Display fault information
1078 * @mrioc: Adapter instance reference
1079 *
1080 * Display the controller fault information if there is a
1081 * controller fault.
1082 *
1083 * Return: Nothing.
1084 */
mpi3mr_print_fault_info(struct mpi3mr_ioc * mrioc)1085 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
1086 {
1087 u32 ioc_status, code, code1, code2, code3;
1088
1089 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1090
1091 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1092 code = readl(&mrioc->sysif_regs->fault);
1093 code1 = readl(&mrioc->sysif_regs->fault_info[0]);
1094 code2 = readl(&mrioc->sysif_regs->fault_info[1]);
1095 code3 = readl(&mrioc->sysif_regs->fault_info[2]);
1096
1097 ioc_info(mrioc,
1098 "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
1099 code, code1, code2, code3);
1100 }
1101 }
1102
1103 /**
1104 * mpi3mr_get_iocstate - Get IOC State
1105 * @mrioc: Adapter instance reference
1106 *
1107 * Return a proper IOC state enum based on the IOC status and
1108 * IOC configuration and unrcoverable state of the controller.
1109 *
1110 * Return: Current IOC state.
1111 */
mpi3mr_get_iocstate(struct mpi3mr_ioc * mrioc)1112 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
1113 {
1114 u32 ioc_status, ioc_config;
1115 u8 ready, enabled;
1116
1117 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1118 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1119
1120 if (mrioc->unrecoverable)
1121 return MRIOC_STATE_UNRECOVERABLE;
1122 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
1123 return MRIOC_STATE_FAULT;
1124
1125 ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
1126 enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
1127
1128 if (ready && enabled)
1129 return MRIOC_STATE_READY;
1130 if ((!ready) && (!enabled))
1131 return MRIOC_STATE_RESET;
1132 if ((!ready) && (enabled))
1133 return MRIOC_STATE_BECOMING_READY;
1134
1135 return MRIOC_STATE_RESET_REQUESTED;
1136 }
1137
1138 /**
1139 * mpi3mr_free_ioctl_dma_memory - free memory for ioctl dma
1140 * @mrioc: Adapter instance reference
1141 *
1142 * Free the DMA memory allocated for IOCTL handling purpose.
1143 *
1144 * Return: None
1145 */
mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1146 static void mpi3mr_free_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1147 {
1148 struct dma_memory_desc *mem_desc;
1149 u16 i;
1150
1151 if (!mrioc->ioctl_dma_pool)
1152 return;
1153
1154 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1155 mem_desc = &mrioc->ioctl_sge[i];
1156 if (mem_desc->addr) {
1157 dma_pool_free(mrioc->ioctl_dma_pool,
1158 mem_desc->addr,
1159 mem_desc->dma_addr);
1160 mem_desc->addr = NULL;
1161 }
1162 }
1163 dma_pool_destroy(mrioc->ioctl_dma_pool);
1164 mrioc->ioctl_dma_pool = NULL;
1165 mem_desc = &mrioc->ioctl_chain_sge;
1166
1167 if (mem_desc->addr) {
1168 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1169 mem_desc->addr, mem_desc->dma_addr);
1170 mem_desc->addr = NULL;
1171 }
1172 mem_desc = &mrioc->ioctl_resp_sge;
1173 if (mem_desc->addr) {
1174 dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
1175 mem_desc->addr, mem_desc->dma_addr);
1176 mem_desc->addr = NULL;
1177 }
1178
1179 mrioc->ioctl_sges_allocated = false;
1180 }
1181
1182 /**
1183 * mpi3mr_alloc_ioctl_dma_memory - Alloc memory for ioctl dma
1184 * @mrioc: Adapter instance reference
1185 *
1186 * This function allocates dmaable memory required to handle the
1187 * application issued MPI3 IOCTL requests.
1188 *
1189 * Return: None
1190 */
mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc * mrioc)1191 static void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_ioc *mrioc)
1192
1193 {
1194 struct dma_memory_desc *mem_desc;
1195 u16 i;
1196
1197 mrioc->ioctl_dma_pool = dma_pool_create("ioctl dma pool",
1198 &mrioc->pdev->dev,
1199 MPI3MR_IOCTL_SGE_SIZE,
1200 MPI3MR_PAGE_SIZE_4K, 0);
1201
1202 if (!mrioc->ioctl_dma_pool) {
1203 ioc_err(mrioc, "ioctl_dma_pool: dma_pool_create failed\n");
1204 goto out_failed;
1205 }
1206
1207 for (i = 0; i < MPI3MR_NUM_IOCTL_SGE; i++) {
1208 mem_desc = &mrioc->ioctl_sge[i];
1209 mem_desc->size = MPI3MR_IOCTL_SGE_SIZE;
1210 mem_desc->addr = dma_pool_zalloc(mrioc->ioctl_dma_pool,
1211 GFP_KERNEL,
1212 &mem_desc->dma_addr);
1213 if (!mem_desc->addr)
1214 goto out_failed;
1215 }
1216
1217 mem_desc = &mrioc->ioctl_chain_sge;
1218 mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1219 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1220 mem_desc->size,
1221 &mem_desc->dma_addr,
1222 GFP_KERNEL);
1223 if (!mem_desc->addr)
1224 goto out_failed;
1225
1226 mem_desc = &mrioc->ioctl_resp_sge;
1227 mem_desc->size = MPI3MR_PAGE_SIZE_4K;
1228 mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
1229 mem_desc->size,
1230 &mem_desc->dma_addr,
1231 GFP_KERNEL);
1232 if (!mem_desc->addr)
1233 goto out_failed;
1234
1235 mrioc->ioctl_sges_allocated = true;
1236
1237 return;
1238 out_failed:
1239 ioc_warn(mrioc, "cannot allocate DMA memory for the mpt commands\n"
1240 "from the applications, application interface for MPT command is disabled\n");
1241 mpi3mr_free_ioctl_dma_memory(mrioc);
1242 }
1243
1244 /**
1245 * mpi3mr_clear_reset_history - clear reset history
1246 * @mrioc: Adapter instance reference
1247 *
1248 * Write the reset history bit in IOC status to clear the bit,
1249 * if it is already set.
1250 *
1251 * Return: Nothing.
1252 */
mpi3mr_clear_reset_history(struct mpi3mr_ioc * mrioc)1253 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
1254 {
1255 u32 ioc_status;
1256
1257 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1258 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1259 writel(ioc_status, &mrioc->sysif_regs->ioc_status);
1260 }
1261
1262 /**
1263 * mpi3mr_issue_and_process_mur - Message unit Reset handler
1264 * @mrioc: Adapter instance reference
1265 * @reset_reason: Reset reason code
1266 *
1267 * Issue Message unit Reset to the controller and wait for it to
1268 * be complete.
1269 *
1270 * Return: 0 on success, -1 on failure.
1271 */
mpi3mr_issue_and_process_mur(struct mpi3mr_ioc * mrioc,u32 reset_reason)1272 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
1273 u32 reset_reason)
1274 {
1275 u32 ioc_config, timeout, ioc_status, scratch_pad0;
1276 int retval = -1;
1277
1278 ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
1279 if (mrioc->unrecoverable) {
1280 ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
1281 return retval;
1282 }
1283 mpi3mr_clear_reset_history(mrioc);
1284 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1285 MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
1286 (mrioc->facts.ioc_num <<
1287 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1288 writel(scratch_pad0, &mrioc->sysif_regs->scratchpad[0]);
1289 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1290 ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1291 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1292
1293 timeout = MPI3MR_MUR_TIMEOUT * 10;
1294 do {
1295 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1296 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
1297 mpi3mr_clear_reset_history(mrioc);
1298 break;
1299 }
1300 if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
1301 mpi3mr_print_fault_info(mrioc);
1302 break;
1303 }
1304 msleep(100);
1305 } while (--timeout);
1306
1307 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1308 if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1309 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1310 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1311 retval = 0;
1312
1313 ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
1314 (!retval) ? "successful" : "failed", ioc_status, ioc_config);
1315 return retval;
1316 }
1317
1318 /**
1319 * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
1320 * during reset/resume
1321 * @mrioc: Adapter instance reference
1322 *
1323 * Return: zero if the new IOCFacts parameters value is compatible with
1324 * older values else return -EPERM
1325 */
1326 static int
mpi3mr_revalidate_factsdata(struct mpi3mr_ioc * mrioc)1327 mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
1328 {
1329 unsigned long *removepend_bitmap;
1330
1331 if (mrioc->facts.reply_sz > mrioc->reply_sz) {
1332 ioc_err(mrioc,
1333 "cannot increase reply size from %d to %d\n",
1334 mrioc->reply_sz, mrioc->facts.reply_sz);
1335 return -EPERM;
1336 }
1337
1338 if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
1339 ioc_err(mrioc,
1340 "cannot reduce number of operational reply queues from %d to %d\n",
1341 mrioc->num_op_reply_q,
1342 mrioc->facts.max_op_reply_q);
1343 return -EPERM;
1344 }
1345
1346 if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
1347 ioc_err(mrioc,
1348 "cannot reduce number of operational request queues from %d to %d\n",
1349 mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
1350 return -EPERM;
1351 }
1352
1353 if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
1354 ioc_err(mrioc, "Warning: The maximum data transfer length\n"
1355 "\tchanged after reset: previous(%d), new(%d),\n"
1356 "the driver cannot change this at run time\n",
1357 mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
1358
1359 if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
1360 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED))
1361 ioc_err(mrioc,
1362 "critical error: multipath capability is enabled at the\n"
1363 "\tcontroller while sas transport support is enabled at the\n"
1364 "\tdriver, please reboot the system or reload the driver\n");
1365
1366 if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
1367 removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
1368 GFP_KERNEL);
1369 if (!removepend_bitmap) {
1370 ioc_err(mrioc,
1371 "failed to increase removepend_bitmap bits from %d to %d\n",
1372 mrioc->dev_handle_bitmap_bits,
1373 mrioc->facts.max_devhandle);
1374 return -EPERM;
1375 }
1376 bitmap_free(mrioc->removepend_bitmap);
1377 mrioc->removepend_bitmap = removepend_bitmap;
1378 ioc_info(mrioc,
1379 "increased bits of dev_handle_bitmap from %d to %d\n",
1380 mrioc->dev_handle_bitmap_bits,
1381 mrioc->facts.max_devhandle);
1382 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
1383 }
1384
1385 return 0;
1386 }
1387
1388 /**
1389 * mpi3mr_bring_ioc_ready - Bring controller to ready state
1390 * @mrioc: Adapter instance reference
1391 *
1392 * Set Enable IOC bit in IOC configuration register and wait for
1393 * the controller to become ready.
1394 *
1395 * Return: 0 on success, appropriate error on failure.
1396 */
mpi3mr_bring_ioc_ready(struct mpi3mr_ioc * mrioc)1397 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1398 {
1399 u32 ioc_config, ioc_status, timeout, host_diagnostic;
1400 int retval = 0;
1401 enum mpi3mr_iocstate ioc_state;
1402 u64 base_info;
1403 u8 retry = 0;
1404 u64 start_time, elapsed_time_sec;
1405
1406 retry_bring_ioc_ready:
1407
1408 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1409 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1410 base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1411 ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1412 ioc_status, ioc_config, base_info);
1413
1414 if (!mpi3mr_is_fault_recoverable(mrioc)) {
1415 mrioc->unrecoverable = 1;
1416 goto out_device_not_present;
1417 }
1418
1419 /*The timeout value is in 2sec unit, changing it to seconds*/
1420 mrioc->ready_timeout =
1421 ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1422 MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1423
1424 ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1425
1426 ioc_state = mpi3mr_get_iocstate(mrioc);
1427 ioc_info(mrioc, "controller is in %s state during detection\n",
1428 mpi3mr_iocstate_name(ioc_state));
1429
1430 timeout = mrioc->ready_timeout * 10;
1431
1432 do {
1433 ioc_state = mpi3mr_get_iocstate(mrioc);
1434
1435 if (ioc_state != MRIOC_STATE_BECOMING_READY &&
1436 ioc_state != MRIOC_STATE_RESET_REQUESTED)
1437 break;
1438
1439 if (!pci_device_is_present(mrioc->pdev)) {
1440 mrioc->unrecoverable = 1;
1441 ioc_err(mrioc, "controller is not present while waiting to reset\n");
1442 goto out_device_not_present;
1443 }
1444
1445 msleep(100);
1446 } while (--timeout);
1447
1448 if (ioc_state == MRIOC_STATE_READY) {
1449 ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1450 retval = mpi3mr_issue_and_process_mur(mrioc,
1451 MPI3MR_RESET_FROM_BRINGUP);
1452 ioc_state = mpi3mr_get_iocstate(mrioc);
1453 if (retval)
1454 ioc_err(mrioc,
1455 "message unit reset failed with error %d current state %s\n",
1456 retval, mpi3mr_iocstate_name(ioc_state));
1457 }
1458 if (ioc_state != MRIOC_STATE_RESET) {
1459 if (ioc_state == MRIOC_STATE_FAULT) {
1460 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
1461 mpi3mr_print_fault_info(mrioc);
1462 do {
1463 host_diagnostic =
1464 readl(&mrioc->sysif_regs->host_diagnostic);
1465 if (!(host_diagnostic &
1466 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
1467 break;
1468 if (!pci_device_is_present(mrioc->pdev)) {
1469 mrioc->unrecoverable = 1;
1470 ioc_err(mrioc, "controller is not present at the bringup\n");
1471 goto out_device_not_present;
1472 }
1473 msleep(100);
1474 } while (--timeout);
1475 }
1476 mpi3mr_print_fault_info(mrioc);
1477 ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1478 retval = mpi3mr_issue_reset(mrioc,
1479 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1480 MPI3MR_RESET_FROM_BRINGUP);
1481 if (retval) {
1482 ioc_err(mrioc,
1483 "soft reset failed with error %d\n", retval);
1484 goto out_failed;
1485 }
1486 }
1487 ioc_state = mpi3mr_get_iocstate(mrioc);
1488 if (ioc_state != MRIOC_STATE_RESET) {
1489 ioc_err(mrioc,
1490 "cannot bring controller to reset state, current state: %s\n",
1491 mpi3mr_iocstate_name(ioc_state));
1492 goto out_failed;
1493 }
1494 mpi3mr_clear_reset_history(mrioc);
1495 retval = mpi3mr_setup_admin_qpair(mrioc);
1496 if (retval) {
1497 ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1498 retval);
1499 goto out_failed;
1500 }
1501
1502 ioc_info(mrioc, "bringing controller to ready state\n");
1503 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1504 ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1505 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1506
1507 if (retry == 0)
1508 start_time = jiffies;
1509
1510 timeout = mrioc->ready_timeout * 10;
1511 do {
1512 ioc_state = mpi3mr_get_iocstate(mrioc);
1513 if (ioc_state == MRIOC_STATE_READY) {
1514 ioc_info(mrioc,
1515 "successfully transitioned to %s state\n",
1516 mpi3mr_iocstate_name(ioc_state));
1517 return 0;
1518 }
1519 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1520 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1521 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1522 mpi3mr_print_fault_info(mrioc);
1523 goto out_failed;
1524 }
1525 if (!pci_device_is_present(mrioc->pdev)) {
1526 mrioc->unrecoverable = 1;
1527 ioc_err(mrioc,
1528 "controller is not present at the bringup\n");
1529 retval = -1;
1530 goto out_device_not_present;
1531 }
1532 msleep(100);
1533 elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1534 } while (elapsed_time_sec < mrioc->ready_timeout);
1535
1536 out_failed:
1537 elapsed_time_sec = jiffies_to_msecs(jiffies - start_time)/1000;
1538 if ((retry < 2) && (elapsed_time_sec < (mrioc->ready_timeout - 60))) {
1539 retry++;
1540
1541 ioc_warn(mrioc, "retrying to bring IOC ready, retry_count:%d\n"
1542 " elapsed time =%llu\n", retry, elapsed_time_sec);
1543
1544 goto retry_bring_ioc_ready;
1545 }
1546 ioc_state = mpi3mr_get_iocstate(mrioc);
1547 ioc_err(mrioc,
1548 "failed to bring to ready state, current state: %s\n",
1549 mpi3mr_iocstate_name(ioc_state));
1550 out_device_not_present:
1551 return retval;
1552 }
1553
1554 /**
1555 * mpi3mr_soft_reset_success - Check softreset is success or not
1556 * @ioc_status: IOC status register value
1557 * @ioc_config: IOC config register value
1558 *
1559 * Check whether the soft reset is successful or not based on
1560 * IOC status and IOC config register values.
1561 *
1562 * Return: True when the soft reset is success, false otherwise.
1563 */
1564 static inline bool
mpi3mr_soft_reset_success(u32 ioc_status,u32 ioc_config)1565 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1566 {
1567 if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1568 (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1569 return true;
1570 return false;
1571 }
1572
1573 /**
1574 * mpi3mr_diagfault_success - Check diag fault is success or not
1575 * @mrioc: Adapter reference
1576 * @ioc_status: IOC status register value
1577 *
1578 * Check whether the controller hit diag reset fault code.
1579 *
1580 * Return: True when there is diag fault, false otherwise.
1581 */
mpi3mr_diagfault_success(struct mpi3mr_ioc * mrioc,u32 ioc_status)1582 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1583 u32 ioc_status)
1584 {
1585 u32 fault;
1586
1587 if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1588 return false;
1589 fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1590 if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1591 mpi3mr_print_fault_info(mrioc);
1592 return true;
1593 }
1594 return false;
1595 }
1596
1597 /**
1598 * mpi3mr_set_diagsave - Set diag save bit for snapdump
1599 * @mrioc: Adapter reference
1600 *
1601 * Set diag save bit in IOC configuration register to enable
1602 * snapdump.
1603 *
1604 * Return: Nothing.
1605 */
mpi3mr_set_diagsave(struct mpi3mr_ioc * mrioc)1606 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1607 {
1608 u32 ioc_config;
1609
1610 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1611 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1612 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1613 }
1614
1615 /**
1616 * mpi3mr_issue_reset - Issue reset to the controller
1617 * @mrioc: Adapter reference
1618 * @reset_type: Reset type
1619 * @reset_reason: Reset reason code
1620 *
1621 * Unlock the host diagnostic registers and write the specific
1622 * reset type to that, wait for reset acknowledgment from the
1623 * controller, if the reset is not successful retry for the
1624 * predefined number of times.
1625 *
1626 * Return: 0 on success, non-zero on failure.
1627 */
mpi3mr_issue_reset(struct mpi3mr_ioc * mrioc,u16 reset_type,u16 reset_reason)1628 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1629 u16 reset_reason)
1630 {
1631 int retval = -1;
1632 u8 unlock_retry_count = 0;
1633 u32 host_diagnostic, ioc_status, ioc_config, scratch_pad0;
1634 u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1635
1636 if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1637 (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1638 return retval;
1639 if (mrioc->unrecoverable)
1640 return retval;
1641 if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1642 retval = 0;
1643 return retval;
1644 }
1645
1646 ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1647 mpi3mr_reset_type_name(reset_type),
1648 mpi3mr_reset_rc_name(reset_reason), reset_reason);
1649
1650 mpi3mr_clear_reset_history(mrioc);
1651 do {
1652 ioc_info(mrioc,
1653 "Write magic sequence to unlock host diag register (retry=%d)\n",
1654 ++unlock_retry_count);
1655 if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1656 ioc_err(mrioc,
1657 "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1658 mpi3mr_reset_type_name(reset_type),
1659 host_diagnostic);
1660 mrioc->unrecoverable = 1;
1661 return retval;
1662 }
1663
1664 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1665 &mrioc->sysif_regs->write_sequence);
1666 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1667 &mrioc->sysif_regs->write_sequence);
1668 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1669 &mrioc->sysif_regs->write_sequence);
1670 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1671 &mrioc->sysif_regs->write_sequence);
1672 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1673 &mrioc->sysif_regs->write_sequence);
1674 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1675 &mrioc->sysif_regs->write_sequence);
1676 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1677 &mrioc->sysif_regs->write_sequence);
1678 usleep_range(1000, 1100);
1679 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1680 ioc_info(mrioc,
1681 "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1682 unlock_retry_count, host_diagnostic);
1683 } while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1684
1685 scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_LINUX <<
1686 MPI3MR_RESET_REASON_OSTYPE_SHIFT) | (mrioc->facts.ioc_num <<
1687 MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
1688 writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1689 writel(host_diagnostic | reset_type,
1690 &mrioc->sysif_regs->host_diagnostic);
1691 switch (reset_type) {
1692 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1693 do {
1694 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1695 ioc_config =
1696 readl(&mrioc->sysif_regs->ioc_configuration);
1697 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1698 && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1699 ) {
1700 mpi3mr_clear_reset_history(mrioc);
1701 retval = 0;
1702 break;
1703 }
1704 msleep(100);
1705 } while (--timeout);
1706 mpi3mr_print_fault_info(mrioc);
1707 break;
1708 case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1709 do {
1710 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1711 if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1712 retval = 0;
1713 break;
1714 }
1715 msleep(100);
1716 } while (--timeout);
1717 break;
1718 default:
1719 break;
1720 }
1721
1722 writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1723 &mrioc->sysif_regs->write_sequence);
1724
1725 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1726 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1727 ioc_info(mrioc,
1728 "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1729 (!retval)?"successful":"failed", ioc_status,
1730 ioc_config);
1731 if (retval)
1732 mrioc->unrecoverable = 1;
1733 return retval;
1734 }
1735
1736 /**
1737 * mpi3mr_admin_request_post - Post request to admin queue
1738 * @mrioc: Adapter reference
1739 * @admin_req: MPI3 request
1740 * @admin_req_sz: Request size
1741 * @ignore_reset: Ignore reset in process
1742 *
1743 * Post the MPI3 request into admin request queue and
1744 * inform the controller, if the queue is full return
1745 * appropriate error.
1746 *
1747 * Return: 0 on success, non-zero on failure.
1748 */
mpi3mr_admin_request_post(struct mpi3mr_ioc * mrioc,void * admin_req,u16 admin_req_sz,u8 ignore_reset)1749 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1750 u16 admin_req_sz, u8 ignore_reset)
1751 {
1752 u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1753 int retval = 0;
1754 unsigned long flags;
1755 u8 *areq_entry;
1756
1757 if (mrioc->unrecoverable) {
1758 ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1759 return -EFAULT;
1760 }
1761
1762 spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1763 areq_pi = mrioc->admin_req_pi;
1764 areq_ci = mrioc->admin_req_ci;
1765 max_entries = mrioc->num_admin_req;
1766 if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1767 (areq_pi == (max_entries - 1)))) {
1768 ioc_err(mrioc, "AdminReqQ full condition detected\n");
1769 retval = -EAGAIN;
1770 goto out;
1771 }
1772 if (!ignore_reset && mrioc->reset_in_progress) {
1773 ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1774 retval = -EAGAIN;
1775 goto out;
1776 }
1777 if (mrioc->pci_err_recovery) {
1778 ioc_err(mrioc, "admin request queue submission failed due to pci error recovery in progress\n");
1779 retval = -EAGAIN;
1780 goto out;
1781 }
1782
1783 areq_entry = (u8 *)mrioc->admin_req_base +
1784 (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1785 memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1786 memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1787
1788 if (++areq_pi == max_entries)
1789 areq_pi = 0;
1790 mrioc->admin_req_pi = areq_pi;
1791
1792 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1793
1794 out:
1795 spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1796
1797 return retval;
1798 }
1799
1800 /**
1801 * mpi3mr_free_op_req_q_segments - free request memory segments
1802 * @mrioc: Adapter instance reference
1803 * @q_idx: operational request queue index
1804 *
1805 * Free memory segments allocated for operational request queue
1806 *
1807 * Return: Nothing.
1808 */
mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1809 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1810 {
1811 u16 j;
1812 int size;
1813 struct segments *segments;
1814
1815 segments = mrioc->req_qinfo[q_idx].q_segments;
1816 if (!segments)
1817 return;
1818
1819 if (mrioc->enable_segqueue) {
1820 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1821 if (mrioc->req_qinfo[q_idx].q_segment_list) {
1822 dma_free_coherent(&mrioc->pdev->dev,
1823 MPI3MR_MAX_SEG_LIST_SIZE,
1824 mrioc->req_qinfo[q_idx].q_segment_list,
1825 mrioc->req_qinfo[q_idx].q_segment_list_dma);
1826 mrioc->req_qinfo[q_idx].q_segment_list = NULL;
1827 }
1828 } else
1829 size = mrioc->req_qinfo[q_idx].segment_qd *
1830 mrioc->facts.op_req_sz;
1831
1832 for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1833 if (!segments[j].segment)
1834 continue;
1835 dma_free_coherent(&mrioc->pdev->dev,
1836 size, segments[j].segment, segments[j].segment_dma);
1837 segments[j].segment = NULL;
1838 }
1839 kfree(mrioc->req_qinfo[q_idx].q_segments);
1840 mrioc->req_qinfo[q_idx].q_segments = NULL;
1841 mrioc->req_qinfo[q_idx].qid = 0;
1842 }
1843
1844 /**
1845 * mpi3mr_free_op_reply_q_segments - free reply memory segments
1846 * @mrioc: Adapter instance reference
1847 * @q_idx: operational reply queue index
1848 *
1849 * Free memory segments allocated for operational reply queue
1850 *
1851 * Return: Nothing.
1852 */
mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 q_idx)1853 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1854 {
1855 u16 j;
1856 int size;
1857 struct segments *segments;
1858
1859 segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1860 if (!segments)
1861 return;
1862
1863 if (mrioc->enable_segqueue) {
1864 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1865 if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1866 dma_free_coherent(&mrioc->pdev->dev,
1867 MPI3MR_MAX_SEG_LIST_SIZE,
1868 mrioc->op_reply_qinfo[q_idx].q_segment_list,
1869 mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1870 mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1871 }
1872 } else
1873 size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1874 mrioc->op_reply_desc_sz;
1875
1876 for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1877 if (!segments[j].segment)
1878 continue;
1879 dma_free_coherent(&mrioc->pdev->dev,
1880 size, segments[j].segment, segments[j].segment_dma);
1881 segments[j].segment = NULL;
1882 }
1883
1884 kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1885 mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1886 mrioc->op_reply_qinfo[q_idx].qid = 0;
1887 }
1888
1889 /**
1890 * mpi3mr_delete_op_reply_q - delete operational reply queue
1891 * @mrioc: Adapter instance reference
1892 * @qidx: operational reply queue index
1893 *
1894 * Delete operatinal reply queue by issuing MPI request
1895 * through admin queue.
1896 *
1897 * Return: 0 on success, non-zero on failure.
1898 */
mpi3mr_delete_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)1899 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1900 {
1901 struct mpi3_delete_reply_queue_request delq_req;
1902 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1903 int retval = 0;
1904 u16 reply_qid = 0, midx;
1905
1906 reply_qid = op_reply_q->qid;
1907
1908 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1909
1910 if (!reply_qid) {
1911 retval = -1;
1912 ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1913 goto out;
1914 }
1915
1916 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
1917 mrioc->active_poll_qcount--;
1918
1919 memset(&delq_req, 0, sizeof(delq_req));
1920 mutex_lock(&mrioc->init_cmds.mutex);
1921 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1922 retval = -1;
1923 ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1924 mutex_unlock(&mrioc->init_cmds.mutex);
1925 goto out;
1926 }
1927 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1928 mrioc->init_cmds.is_waiting = 1;
1929 mrioc->init_cmds.callback = NULL;
1930 delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1931 delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1932 delq_req.queue_id = cpu_to_le16(reply_qid);
1933
1934 init_completion(&mrioc->init_cmds.done);
1935 retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1936 1);
1937 if (retval) {
1938 ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1939 goto out_unlock;
1940 }
1941 wait_for_completion_timeout(&mrioc->init_cmds.done,
1942 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1943 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1944 ioc_err(mrioc, "delete reply queue timed out\n");
1945 mpi3mr_check_rh_fault_ioc(mrioc,
1946 MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1947 retval = -1;
1948 goto out_unlock;
1949 }
1950 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1951 != MPI3_IOCSTATUS_SUCCESS) {
1952 ioc_err(mrioc,
1953 "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1954 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1955 mrioc->init_cmds.ioc_loginfo);
1956 retval = -1;
1957 goto out_unlock;
1958 }
1959 mrioc->intr_info[midx].op_reply_q = NULL;
1960
1961 mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1962 out_unlock:
1963 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1964 mutex_unlock(&mrioc->init_cmds.mutex);
1965 out:
1966
1967 return retval;
1968 }
1969
1970 /**
1971 * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1972 * @mrioc: Adapter instance reference
1973 * @qidx: request queue index
1974 *
1975 * Allocate segmented memory pools for operational reply
1976 * queue.
1977 *
1978 * Return: 0 on success, non-zero on failure.
1979 */
mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)1980 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1981 {
1982 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1983 int i, size;
1984 u64 *q_segment_list_entry = NULL;
1985 struct segments *segments;
1986
1987 if (mrioc->enable_segqueue) {
1988 op_reply_q->segment_qd =
1989 MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1990
1991 size = MPI3MR_OP_REP_Q_SEG_SIZE;
1992
1993 op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1994 MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1995 GFP_KERNEL);
1996 if (!op_reply_q->q_segment_list)
1997 return -ENOMEM;
1998 q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1999 } else {
2000 op_reply_q->segment_qd = op_reply_q->num_replies;
2001 size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
2002 }
2003
2004 op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
2005 op_reply_q->segment_qd);
2006
2007 op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
2008 sizeof(struct segments), GFP_KERNEL);
2009 if (!op_reply_q->q_segments)
2010 return -ENOMEM;
2011
2012 segments = op_reply_q->q_segments;
2013 for (i = 0; i < op_reply_q->num_segments; i++) {
2014 segments[i].segment =
2015 dma_alloc_coherent(&mrioc->pdev->dev,
2016 size, &segments[i].segment_dma, GFP_KERNEL);
2017 if (!segments[i].segment)
2018 return -ENOMEM;
2019 if (mrioc->enable_segqueue)
2020 q_segment_list_entry[i] =
2021 (unsigned long)segments[i].segment_dma;
2022 }
2023
2024 return 0;
2025 }
2026
2027 /**
2028 * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
2029 * @mrioc: Adapter instance reference
2030 * @qidx: request queue index
2031 *
2032 * Allocate segmented memory pools for operational request
2033 * queue.
2034 *
2035 * Return: 0 on success, non-zero on failure.
2036 */
mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc * mrioc,u16 qidx)2037 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
2038 {
2039 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
2040 int i, size;
2041 u64 *q_segment_list_entry = NULL;
2042 struct segments *segments;
2043
2044 if (mrioc->enable_segqueue) {
2045 op_req_q->segment_qd =
2046 MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
2047
2048 size = MPI3MR_OP_REQ_Q_SEG_SIZE;
2049
2050 op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
2051 MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
2052 GFP_KERNEL);
2053 if (!op_req_q->q_segment_list)
2054 return -ENOMEM;
2055 q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
2056
2057 } else {
2058 op_req_q->segment_qd = op_req_q->num_requests;
2059 size = op_req_q->num_requests * mrioc->facts.op_req_sz;
2060 }
2061
2062 op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
2063 op_req_q->segment_qd);
2064
2065 op_req_q->q_segments = kcalloc(op_req_q->num_segments,
2066 sizeof(struct segments), GFP_KERNEL);
2067 if (!op_req_q->q_segments)
2068 return -ENOMEM;
2069
2070 segments = op_req_q->q_segments;
2071 for (i = 0; i < op_req_q->num_segments; i++) {
2072 segments[i].segment =
2073 dma_alloc_coherent(&mrioc->pdev->dev,
2074 size, &segments[i].segment_dma, GFP_KERNEL);
2075 if (!segments[i].segment)
2076 return -ENOMEM;
2077 if (mrioc->enable_segqueue)
2078 q_segment_list_entry[i] =
2079 (unsigned long)segments[i].segment_dma;
2080 }
2081
2082 return 0;
2083 }
2084
2085 /**
2086 * mpi3mr_create_op_reply_q - create operational reply queue
2087 * @mrioc: Adapter instance reference
2088 * @qidx: operational reply queue index
2089 *
2090 * Create operatinal reply queue by issuing MPI request
2091 * through admin queue.
2092 *
2093 * Return: 0 on success, non-zero on failure.
2094 */
mpi3mr_create_op_reply_q(struct mpi3mr_ioc * mrioc,u16 qidx)2095 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
2096 {
2097 struct mpi3_create_reply_queue_request create_req;
2098 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
2099 int retval = 0;
2100 u16 reply_qid = 0, midx;
2101
2102 reply_qid = op_reply_q->qid;
2103
2104 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
2105
2106 if (reply_qid) {
2107 retval = -1;
2108 ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
2109 reply_qid);
2110
2111 return retval;
2112 }
2113
2114 reply_qid = qidx + 1;
2115
2116 if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
2117 if (mrioc->pdev->revision)
2118 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
2119 else
2120 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
2121 } else
2122 op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
2123
2124 op_reply_q->ci = 0;
2125 op_reply_q->ephase = 1;
2126 atomic_set(&op_reply_q->pend_ios, 0);
2127 atomic_set(&op_reply_q->in_use, 0);
2128 op_reply_q->enable_irq_poll = false;
2129 op_reply_q->qfull_watermark =
2130 op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
2131
2132 if (!op_reply_q->q_segments) {
2133 retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
2134 if (retval) {
2135 mpi3mr_free_op_reply_q_segments(mrioc, qidx);
2136 goto out;
2137 }
2138 }
2139
2140 memset(&create_req, 0, sizeof(create_req));
2141 mutex_lock(&mrioc->init_cmds.mutex);
2142 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2143 retval = -1;
2144 ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
2145 goto out_unlock;
2146 }
2147 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2148 mrioc->init_cmds.is_waiting = 1;
2149 mrioc->init_cmds.callback = NULL;
2150 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2151 create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
2152 create_req.queue_id = cpu_to_le16(reply_qid);
2153
2154 if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
2155 op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
2156 else
2157 op_reply_q->qtype = MPI3MR_POLL_QUEUE;
2158
2159 if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
2160 create_req.flags =
2161 MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
2162 create_req.msix_index =
2163 cpu_to_le16(mrioc->intr_info[midx].msix_index);
2164 } else {
2165 create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
2166 ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
2167 reply_qid, midx);
2168 if (!mrioc->active_poll_qcount)
2169 disable_irq_nosync(pci_irq_vector(mrioc->pdev,
2170 mrioc->intr_info_count - 1));
2171 }
2172
2173 if (mrioc->enable_segqueue) {
2174 create_req.flags |=
2175 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2176 create_req.base_address = cpu_to_le64(
2177 op_reply_q->q_segment_list_dma);
2178 } else
2179 create_req.base_address = cpu_to_le64(
2180 op_reply_q->q_segments[0].segment_dma);
2181
2182 create_req.size = cpu_to_le16(op_reply_q->num_replies);
2183
2184 init_completion(&mrioc->init_cmds.done);
2185 retval = mpi3mr_admin_request_post(mrioc, &create_req,
2186 sizeof(create_req), 1);
2187 if (retval) {
2188 ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
2189 goto out_unlock;
2190 }
2191 wait_for_completion_timeout(&mrioc->init_cmds.done,
2192 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2193 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2194 ioc_err(mrioc, "create reply queue timed out\n");
2195 mpi3mr_check_rh_fault_ioc(mrioc,
2196 MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
2197 retval = -1;
2198 goto out_unlock;
2199 }
2200 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2201 != MPI3_IOCSTATUS_SUCCESS) {
2202 ioc_err(mrioc,
2203 "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2204 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2205 mrioc->init_cmds.ioc_loginfo);
2206 retval = -1;
2207 goto out_unlock;
2208 }
2209 op_reply_q->qid = reply_qid;
2210 if (midx < mrioc->intr_info_count)
2211 mrioc->intr_info[midx].op_reply_q = op_reply_q;
2212
2213 (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
2214 mrioc->active_poll_qcount++;
2215
2216 out_unlock:
2217 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2218 mutex_unlock(&mrioc->init_cmds.mutex);
2219 out:
2220
2221 return retval;
2222 }
2223
2224 /**
2225 * mpi3mr_create_op_req_q - create operational request queue
2226 * @mrioc: Adapter instance reference
2227 * @idx: operational request queue index
2228 * @reply_qid: Reply queue ID
2229 *
2230 * Create operatinal request queue by issuing MPI request
2231 * through admin queue.
2232 *
2233 * Return: 0 on success, non-zero on failure.
2234 */
mpi3mr_create_op_req_q(struct mpi3mr_ioc * mrioc,u16 idx,u16 reply_qid)2235 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
2236 u16 reply_qid)
2237 {
2238 struct mpi3_create_request_queue_request create_req;
2239 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
2240 int retval = 0;
2241 u16 req_qid = 0;
2242
2243 req_qid = op_req_q->qid;
2244
2245 if (req_qid) {
2246 retval = -1;
2247 ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
2248 req_qid);
2249
2250 return retval;
2251 }
2252 req_qid = idx + 1;
2253
2254 op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
2255 op_req_q->ci = 0;
2256 op_req_q->pi = 0;
2257 op_req_q->reply_qid = reply_qid;
2258 spin_lock_init(&op_req_q->q_lock);
2259
2260 if (!op_req_q->q_segments) {
2261 retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
2262 if (retval) {
2263 mpi3mr_free_op_req_q_segments(mrioc, idx);
2264 goto out;
2265 }
2266 }
2267
2268 memset(&create_req, 0, sizeof(create_req));
2269 mutex_lock(&mrioc->init_cmds.mutex);
2270 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2271 retval = -1;
2272 ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
2273 goto out_unlock;
2274 }
2275 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2276 mrioc->init_cmds.is_waiting = 1;
2277 mrioc->init_cmds.callback = NULL;
2278 create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2279 create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
2280 create_req.queue_id = cpu_to_le16(req_qid);
2281 if (mrioc->enable_segqueue) {
2282 create_req.flags =
2283 MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
2284 create_req.base_address = cpu_to_le64(
2285 op_req_q->q_segment_list_dma);
2286 } else
2287 create_req.base_address = cpu_to_le64(
2288 op_req_q->q_segments[0].segment_dma);
2289 create_req.reply_queue_id = cpu_to_le16(reply_qid);
2290 create_req.size = cpu_to_le16(op_req_q->num_requests);
2291
2292 init_completion(&mrioc->init_cmds.done);
2293 retval = mpi3mr_admin_request_post(mrioc, &create_req,
2294 sizeof(create_req), 1);
2295 if (retval) {
2296 ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
2297 goto out_unlock;
2298 }
2299 wait_for_completion_timeout(&mrioc->init_cmds.done,
2300 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2301 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2302 ioc_err(mrioc, "create request queue timed out\n");
2303 mpi3mr_check_rh_fault_ioc(mrioc,
2304 MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
2305 retval = -1;
2306 goto out_unlock;
2307 }
2308 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2309 != MPI3_IOCSTATUS_SUCCESS) {
2310 ioc_err(mrioc,
2311 "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2312 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2313 mrioc->init_cmds.ioc_loginfo);
2314 retval = -1;
2315 goto out_unlock;
2316 }
2317 op_req_q->qid = req_qid;
2318
2319 out_unlock:
2320 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2321 mutex_unlock(&mrioc->init_cmds.mutex);
2322 out:
2323
2324 return retval;
2325 }
2326
2327 /**
2328 * mpi3mr_create_op_queues - create operational queue pairs
2329 * @mrioc: Adapter instance reference
2330 *
2331 * Allocate memory for operational queue meta data and call
2332 * create request and reply queue functions.
2333 *
2334 * Return: 0 on success, non-zero on failures.
2335 */
mpi3mr_create_op_queues(struct mpi3mr_ioc * mrioc)2336 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
2337 {
2338 int retval = 0;
2339 u16 num_queues = 0, i = 0, msix_count_op_q = 1;
2340
2341 num_queues = min_t(int, mrioc->facts.max_op_reply_q,
2342 mrioc->facts.max_op_req_q);
2343
2344 msix_count_op_q =
2345 mrioc->intr_info_count - mrioc->op_reply_q_offset;
2346 if (!mrioc->num_queues)
2347 mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
2348 /*
2349 * During reset set the num_queues to the number of queues
2350 * that was set before the reset.
2351 */
2352 num_queues = mrioc->num_op_reply_q ?
2353 mrioc->num_op_reply_q : mrioc->num_queues;
2354 ioc_info(mrioc, "trying to create %d operational queue pairs\n",
2355 num_queues);
2356
2357 if (!mrioc->req_qinfo) {
2358 mrioc->req_qinfo = kcalloc(num_queues,
2359 sizeof(struct op_req_qinfo), GFP_KERNEL);
2360 if (!mrioc->req_qinfo) {
2361 retval = -1;
2362 goto out_failed;
2363 }
2364
2365 mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
2366 num_queues, GFP_KERNEL);
2367 if (!mrioc->op_reply_qinfo) {
2368 retval = -1;
2369 goto out_failed;
2370 }
2371 }
2372
2373 if (mrioc->enable_segqueue)
2374 ioc_info(mrioc,
2375 "allocating operational queues through segmented queues\n");
2376
2377 for (i = 0; i < num_queues; i++) {
2378 if (mpi3mr_create_op_reply_q(mrioc, i)) {
2379 ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
2380 break;
2381 }
2382 if (mpi3mr_create_op_req_q(mrioc, i,
2383 mrioc->op_reply_qinfo[i].qid)) {
2384 ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
2385 mpi3mr_delete_op_reply_q(mrioc, i);
2386 break;
2387 }
2388 }
2389
2390 if (i == 0) {
2391 /* Not even one queue is created successfully*/
2392 retval = -1;
2393 goto out_failed;
2394 }
2395 mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
2396 ioc_info(mrioc,
2397 "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
2398 mrioc->num_op_reply_q, mrioc->default_qcount,
2399 mrioc->active_poll_qcount);
2400
2401 return retval;
2402 out_failed:
2403 kfree(mrioc->req_qinfo);
2404 mrioc->req_qinfo = NULL;
2405
2406 kfree(mrioc->op_reply_qinfo);
2407 mrioc->op_reply_qinfo = NULL;
2408
2409 return retval;
2410 }
2411
2412 /**
2413 * mpi3mr_op_request_post - Post request to operational queue
2414 * @mrioc: Adapter reference
2415 * @op_req_q: Operational request queue info
2416 * @req: MPI3 request
2417 *
2418 * Post the MPI3 request into operational request queue and
2419 * inform the controller, if the queue is full return
2420 * appropriate error.
2421 *
2422 * Return: 0 on success, non-zero on failure.
2423 */
mpi3mr_op_request_post(struct mpi3mr_ioc * mrioc,struct op_req_qinfo * op_req_q,u8 * req)2424 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
2425 struct op_req_qinfo *op_req_q, u8 *req)
2426 {
2427 u16 pi = 0, max_entries, reply_qidx = 0, midx;
2428 int retval = 0;
2429 unsigned long flags;
2430 u8 *req_entry;
2431 void *segment_base_addr;
2432 u16 req_sz = mrioc->facts.op_req_sz;
2433 struct segments *segments = op_req_q->q_segments;
2434 struct op_reply_qinfo *op_reply_q = NULL;
2435
2436 reply_qidx = op_req_q->reply_qid - 1;
2437 op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
2438
2439 if (mrioc->unrecoverable)
2440 return -EFAULT;
2441
2442 spin_lock_irqsave(&op_req_q->q_lock, flags);
2443 pi = op_req_q->pi;
2444 max_entries = op_req_q->num_requests;
2445
2446 if (mpi3mr_check_req_qfull(op_req_q)) {
2447 midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
2448 reply_qidx, mrioc->op_reply_q_offset);
2449 mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
2450
2451 if (mpi3mr_check_req_qfull(op_req_q)) {
2452 retval = -EAGAIN;
2453 goto out;
2454 }
2455 }
2456
2457 if (mrioc->reset_in_progress) {
2458 ioc_err(mrioc, "OpReqQ submit reset in progress\n");
2459 retval = -EAGAIN;
2460 goto out;
2461 }
2462 if (mrioc->pci_err_recovery) {
2463 ioc_err(mrioc, "operational request queue submission failed due to pci error recovery in progress\n");
2464 retval = -EAGAIN;
2465 goto out;
2466 }
2467
2468 /* Reply queue is nearing to get full, push back IOs to SML */
2469 if ((mrioc->prevent_reply_qfull == true) &&
2470 (atomic_read(&op_reply_q->pend_ios) >
2471 (op_reply_q->qfull_watermark))) {
2472 atomic_inc(&mrioc->reply_qfull_count);
2473 retval = -EAGAIN;
2474 goto out;
2475 }
2476
2477 segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
2478 req_entry = (u8 *)segment_base_addr +
2479 ((pi % op_req_q->segment_qd) * req_sz);
2480
2481 memset(req_entry, 0, req_sz);
2482 memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
2483
2484 if (++pi == max_entries)
2485 pi = 0;
2486 op_req_q->pi = pi;
2487
2488 #ifndef CONFIG_PREEMPT_RT
2489 if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
2490 > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
2491 mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
2492 #else
2493 atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
2494 #endif
2495
2496 writel(op_req_q->pi,
2497 &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
2498
2499 out:
2500 spin_unlock_irqrestore(&op_req_q->q_lock, flags);
2501 return retval;
2502 }
2503
2504 /**
2505 * mpi3mr_check_rh_fault_ioc - check reset history and fault
2506 * controller
2507 * @mrioc: Adapter instance reference
2508 * @reason_code: reason code for the fault.
2509 *
2510 * This routine will save snapdump and fault the controller with
2511 * the given reason code if it is not already in the fault or
2512 * not asynchronosuly reset. This will be used to handle
2513 * initilaization time faults/resets/timeout as in those cases
2514 * immediate soft reset invocation is not required.
2515 *
2516 * Return: None.
2517 */
mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc * mrioc,u32 reason_code)2518 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2519 {
2520 u32 ioc_status, host_diagnostic, timeout;
2521 union mpi3mr_trigger_data trigger_data;
2522
2523 if (mrioc->unrecoverable) {
2524 ioc_err(mrioc, "controller is unrecoverable\n");
2525 return;
2526 }
2527
2528 if (!pci_device_is_present(mrioc->pdev)) {
2529 mrioc->unrecoverable = 1;
2530 ioc_err(mrioc, "controller is not present\n");
2531 return;
2532 }
2533 memset(&trigger_data, 0, sizeof(trigger_data));
2534 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2535
2536 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2537 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2538 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2539 return;
2540 } else if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
2541 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2542 MPI3_SYSIF_FAULT_CODE_MASK);
2543
2544 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2545 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2546 mpi3mr_print_fault_info(mrioc);
2547 return;
2548 }
2549
2550 mpi3mr_set_diagsave(mrioc);
2551 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2552 reason_code);
2553 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
2554 MPI3_SYSIF_FAULT_CODE_MASK);
2555 mpi3mr_set_trigger_data_in_all_hdb(mrioc, MPI3MR_HDB_TRIGGER_TYPE_FAULT,
2556 &trigger_data, 0);
2557 timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2558 do {
2559 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2560 if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2561 break;
2562 msleep(100);
2563 } while (--timeout);
2564 }
2565
2566 /**
2567 * mpi3mr_sync_timestamp - Issue time stamp sync request
2568 * @mrioc: Adapter reference
2569 *
2570 * Issue IO unit control MPI request to synchornize firmware
2571 * timestamp with host time.
2572 *
2573 * Return: 0 on success, non-zero on failure.
2574 */
mpi3mr_sync_timestamp(struct mpi3mr_ioc * mrioc)2575 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2576 {
2577 ktime_t current_time;
2578 struct mpi3_iounit_control_request iou_ctrl;
2579 int retval = 0;
2580
2581 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2582 mutex_lock(&mrioc->init_cmds.mutex);
2583 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2584 retval = -1;
2585 ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2586 mutex_unlock(&mrioc->init_cmds.mutex);
2587 goto out;
2588 }
2589 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2590 mrioc->init_cmds.is_waiting = 1;
2591 mrioc->init_cmds.callback = NULL;
2592 iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2593 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2594 iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2595 current_time = ktime_get_real();
2596 iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2597
2598 init_completion(&mrioc->init_cmds.done);
2599 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2600 sizeof(iou_ctrl), 0);
2601 if (retval) {
2602 ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2603 goto out_unlock;
2604 }
2605
2606 wait_for_completion_timeout(&mrioc->init_cmds.done,
2607 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2608 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2609 ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2610 mrioc->init_cmds.is_waiting = 0;
2611 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2612 mpi3mr_check_rh_fault_ioc(mrioc,
2613 MPI3MR_RESET_FROM_TSU_TIMEOUT);
2614 retval = -1;
2615 goto out_unlock;
2616 }
2617 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2618 != MPI3_IOCSTATUS_SUCCESS) {
2619 ioc_err(mrioc,
2620 "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2621 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2622 mrioc->init_cmds.ioc_loginfo);
2623 retval = -1;
2624 goto out_unlock;
2625 }
2626
2627 out_unlock:
2628 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2629 mutex_unlock(&mrioc->init_cmds.mutex);
2630
2631 out:
2632 return retval;
2633 }
2634
2635 /**
2636 * mpi3mr_print_pkg_ver - display controller fw package version
2637 * @mrioc: Adapter reference
2638 *
2639 * Retrieve firmware package version from the component image
2640 * header of the controller flash and display it.
2641 *
2642 * Return: 0 on success and non-zero on failure.
2643 */
mpi3mr_print_pkg_ver(struct mpi3mr_ioc * mrioc)2644 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2645 {
2646 struct mpi3_ci_upload_request ci_upload;
2647 int retval = -1;
2648 void *data = NULL;
2649 dma_addr_t data_dma;
2650 struct mpi3_ci_manifest_mpi *manifest;
2651 u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2652 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2653
2654 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2655 GFP_KERNEL);
2656 if (!data)
2657 return -ENOMEM;
2658
2659 memset(&ci_upload, 0, sizeof(ci_upload));
2660 mutex_lock(&mrioc->init_cmds.mutex);
2661 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2662 ioc_err(mrioc, "sending get package version failed due to command in use\n");
2663 mutex_unlock(&mrioc->init_cmds.mutex);
2664 goto out;
2665 }
2666 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2667 mrioc->init_cmds.is_waiting = 1;
2668 mrioc->init_cmds.callback = NULL;
2669 ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2670 ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2671 ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2672 ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2673 ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2674 ci_upload.segment_size = cpu_to_le32(data_len);
2675
2676 mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2677 data_dma);
2678 init_completion(&mrioc->init_cmds.done);
2679 retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2680 sizeof(ci_upload), 1);
2681 if (retval) {
2682 ioc_err(mrioc, "posting get package version failed\n");
2683 goto out_unlock;
2684 }
2685 wait_for_completion_timeout(&mrioc->init_cmds.done,
2686 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2687 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2688 ioc_err(mrioc, "get package version timed out\n");
2689 mpi3mr_check_rh_fault_ioc(mrioc,
2690 MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2691 retval = -1;
2692 goto out_unlock;
2693 }
2694 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2695 == MPI3_IOCSTATUS_SUCCESS) {
2696 manifest = (struct mpi3_ci_manifest_mpi *) data;
2697 if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2698 ioc_info(mrioc,
2699 "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2700 manifest->package_version.gen_major,
2701 manifest->package_version.gen_minor,
2702 manifest->package_version.phase_major,
2703 manifest->package_version.phase_minor,
2704 manifest->package_version.customer_id,
2705 manifest->package_version.build_num);
2706 }
2707 }
2708 retval = 0;
2709 out_unlock:
2710 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2711 mutex_unlock(&mrioc->init_cmds.mutex);
2712
2713 out:
2714 if (data)
2715 dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2716 data_dma);
2717 return retval;
2718 }
2719
2720 /**
2721 * mpi3mr_watchdog_work - watchdog thread to monitor faults
2722 * @work: work struct
2723 *
2724 * Watch dog work periodically executed (1 second interval) to
2725 * monitor firmware fault and to issue periodic timer sync to
2726 * the firmware.
2727 *
2728 * Return: Nothing.
2729 */
mpi3mr_watchdog_work(struct work_struct * work)2730 static void mpi3mr_watchdog_work(struct work_struct *work)
2731 {
2732 struct mpi3mr_ioc *mrioc =
2733 container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2734 unsigned long flags;
2735 enum mpi3mr_iocstate ioc_state;
2736 u32 host_diagnostic, ioc_status;
2737 union mpi3mr_trigger_data trigger_data;
2738 u16 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
2739
2740 if (mrioc->reset_in_progress || mrioc->pci_err_recovery)
2741 return;
2742
2743 if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
2744 ioc_err(mrioc, "watchdog could not detect the controller\n");
2745 mrioc->unrecoverable = 1;
2746 }
2747
2748 if (mrioc->unrecoverable) {
2749 ioc_err(mrioc,
2750 "flush pending commands for unrecoverable controller\n");
2751 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
2752 return;
2753 }
2754
2755 if (!(mrioc->facts.ioc_capabilities &
2756 MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
2757 (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
2758
2759 mrioc->ts_update_counter = 0;
2760 mpi3mr_sync_timestamp(mrioc);
2761 }
2762
2763 if ((mrioc->prepare_for_reset) &&
2764 ((mrioc->prepare_for_reset_timeout_counter++) >=
2765 MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
2766 mpi3mr_soft_reset_handler(mrioc,
2767 MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
2768 return;
2769 }
2770
2771 memset(&trigger_data, 0, sizeof(trigger_data));
2772 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2773 if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
2774 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2775 MPI3MR_HDB_TRIGGER_TYPE_FW_RELEASED, NULL, 0);
2776 mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
2777 return;
2778 }
2779
2780 /*Check for fault state every one second and issue Soft reset*/
2781 ioc_state = mpi3mr_get_iocstate(mrioc);
2782 if (ioc_state != MRIOC_STATE_FAULT)
2783 goto schedule_work;
2784
2785 trigger_data.fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
2786 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
2787 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
2788 host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2789 if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2790 if (!mrioc->diagsave_timeout) {
2791 mpi3mr_print_fault_info(mrioc);
2792 ioc_warn(mrioc, "diag save in progress\n");
2793 }
2794 if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2795 goto schedule_work;
2796 }
2797
2798 mpi3mr_print_fault_info(mrioc);
2799 mrioc->diagsave_timeout = 0;
2800
2801 if (!mpi3mr_is_fault_recoverable(mrioc)) {
2802 mrioc->unrecoverable = 1;
2803 goto schedule_work;
2804 }
2805
2806 switch (trigger_data.fault) {
2807 case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
2808 case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
2809 ioc_warn(mrioc,
2810 "controller requires system power cycle, marking controller as unrecoverable\n");
2811 mrioc->unrecoverable = 1;
2812 goto schedule_work;
2813 case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
2814 goto schedule_work;
2815 case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
2816 reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
2817 break;
2818 default:
2819 break;
2820 }
2821 mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
2822 return;
2823
2824 schedule_work:
2825 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2826 if (mrioc->watchdog_work_q)
2827 queue_delayed_work(mrioc->watchdog_work_q,
2828 &mrioc->watchdog_work,
2829 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2830 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2831 return;
2832 }
2833
2834 /**
2835 * mpi3mr_start_watchdog - Start watchdog
2836 * @mrioc: Adapter instance reference
2837 *
2838 * Create and start the watchdog thread to monitor controller
2839 * faults.
2840 *
2841 * Return: Nothing.
2842 */
mpi3mr_start_watchdog(struct mpi3mr_ioc * mrioc)2843 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2844 {
2845 if (mrioc->watchdog_work_q)
2846 return;
2847
2848 INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2849 snprintf(mrioc->watchdog_work_q_name,
2850 sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2851 mrioc->id);
2852 mrioc->watchdog_work_q = alloc_ordered_workqueue(
2853 "%s", WQ_MEM_RECLAIM, mrioc->watchdog_work_q_name);
2854 if (!mrioc->watchdog_work_q) {
2855 ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2856 return;
2857 }
2858
2859 if (mrioc->watchdog_work_q)
2860 queue_delayed_work(mrioc->watchdog_work_q,
2861 &mrioc->watchdog_work,
2862 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2863 }
2864
2865 /**
2866 * mpi3mr_stop_watchdog - Stop watchdog
2867 * @mrioc: Adapter instance reference
2868 *
2869 * Stop the watchdog thread created to monitor controller
2870 * faults.
2871 *
2872 * Return: Nothing.
2873 */
mpi3mr_stop_watchdog(struct mpi3mr_ioc * mrioc)2874 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2875 {
2876 unsigned long flags;
2877 struct workqueue_struct *wq;
2878
2879 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2880 wq = mrioc->watchdog_work_q;
2881 mrioc->watchdog_work_q = NULL;
2882 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2883 if (wq) {
2884 if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2885 flush_workqueue(wq);
2886 destroy_workqueue(wq);
2887 }
2888 }
2889
2890 /**
2891 * mpi3mr_setup_admin_qpair - Setup admin queue pair
2892 * @mrioc: Adapter instance reference
2893 *
2894 * Allocate memory for admin queue pair if required and register
2895 * the admin queue with the controller.
2896 *
2897 * Return: 0 on success, non-zero on failures.
2898 */
mpi3mr_setup_admin_qpair(struct mpi3mr_ioc * mrioc)2899 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2900 {
2901 int retval = 0;
2902 u32 num_admin_entries = 0;
2903
2904 mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2905 mrioc->num_admin_req = mrioc->admin_req_q_sz /
2906 MPI3MR_ADMIN_REQ_FRAME_SZ;
2907 mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2908
2909 mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2910 mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2911 MPI3MR_ADMIN_REPLY_FRAME_SZ;
2912 mrioc->admin_reply_ci = 0;
2913 mrioc->admin_reply_ephase = 1;
2914 atomic_set(&mrioc->admin_reply_q_in_use, 0);
2915
2916 if (!mrioc->admin_req_base) {
2917 mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2918 mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2919
2920 if (!mrioc->admin_req_base) {
2921 retval = -1;
2922 goto out_failed;
2923 }
2924
2925 mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2926 mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2927 GFP_KERNEL);
2928
2929 if (!mrioc->admin_reply_base) {
2930 retval = -1;
2931 goto out_failed;
2932 }
2933 }
2934
2935 num_admin_entries = (mrioc->num_admin_replies << 16) |
2936 (mrioc->num_admin_req);
2937 writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2938 mpi3mr_writeq(mrioc->admin_req_dma,
2939 &mrioc->sysif_regs->admin_request_queue_address,
2940 &mrioc->adm_req_q_bar_writeq_lock);
2941 mpi3mr_writeq(mrioc->admin_reply_dma,
2942 &mrioc->sysif_regs->admin_reply_queue_address,
2943 &mrioc->adm_reply_q_bar_writeq_lock);
2944 writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2945 writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2946 return retval;
2947
2948 out_failed:
2949
2950 if (mrioc->admin_reply_base) {
2951 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2952 mrioc->admin_reply_base, mrioc->admin_reply_dma);
2953 mrioc->admin_reply_base = NULL;
2954 }
2955 if (mrioc->admin_req_base) {
2956 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2957 mrioc->admin_req_base, mrioc->admin_req_dma);
2958 mrioc->admin_req_base = NULL;
2959 }
2960 return retval;
2961 }
2962
2963 /**
2964 * mpi3mr_issue_iocfacts - Send IOC Facts
2965 * @mrioc: Adapter instance reference
2966 * @facts_data: Cached IOC facts data
2967 *
2968 * Issue IOC Facts MPI request through admin queue and wait for
2969 * the completion of it or time out.
2970 *
2971 * Return: 0 on success, non-zero on failures.
2972 */
mpi3mr_issue_iocfacts(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)2973 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2974 struct mpi3_ioc_facts_data *facts_data)
2975 {
2976 struct mpi3_ioc_facts_request iocfacts_req;
2977 void *data = NULL;
2978 dma_addr_t data_dma;
2979 u32 data_len = sizeof(*facts_data);
2980 int retval = 0;
2981 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2982
2983 data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2984 GFP_KERNEL);
2985
2986 if (!data) {
2987 retval = -1;
2988 goto out;
2989 }
2990
2991 memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2992 mutex_lock(&mrioc->init_cmds.mutex);
2993 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2994 retval = -1;
2995 ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2996 mutex_unlock(&mrioc->init_cmds.mutex);
2997 goto out;
2998 }
2999 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3000 mrioc->init_cmds.is_waiting = 1;
3001 mrioc->init_cmds.callback = NULL;
3002 iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3003 iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
3004
3005 mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
3006 data_dma);
3007
3008 init_completion(&mrioc->init_cmds.done);
3009 retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
3010 sizeof(iocfacts_req), 1);
3011 if (retval) {
3012 ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
3013 goto out_unlock;
3014 }
3015 wait_for_completion_timeout(&mrioc->init_cmds.done,
3016 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3017 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3018 ioc_err(mrioc, "ioc_facts timed out\n");
3019 mpi3mr_check_rh_fault_ioc(mrioc,
3020 MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
3021 retval = -1;
3022 goto out_unlock;
3023 }
3024 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3025 != MPI3_IOCSTATUS_SUCCESS) {
3026 ioc_err(mrioc,
3027 "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3028 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3029 mrioc->init_cmds.ioc_loginfo);
3030 retval = -1;
3031 goto out_unlock;
3032 }
3033 memcpy(facts_data, (u8 *)data, data_len);
3034 mpi3mr_process_factsdata(mrioc, facts_data);
3035 out_unlock:
3036 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3037 mutex_unlock(&mrioc->init_cmds.mutex);
3038
3039 out:
3040 if (data)
3041 dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
3042
3043 return retval;
3044 }
3045
3046 /**
3047 * mpi3mr_check_reset_dma_mask - Process IOC facts data
3048 * @mrioc: Adapter instance reference
3049 *
3050 * Check whether the new DMA mask requested through IOCFacts by
3051 * firmware needs to be set, if so set it .
3052 *
3053 * Return: 0 on success, non-zero on failure.
3054 */
mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc * mrioc)3055 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
3056 {
3057 struct pci_dev *pdev = mrioc->pdev;
3058 int r;
3059 u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
3060
3061 if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
3062 return 0;
3063
3064 ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
3065 mrioc->dma_mask, facts_dma_mask);
3066
3067 r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
3068 if (r) {
3069 ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
3070 facts_dma_mask, r);
3071 return r;
3072 }
3073 mrioc->dma_mask = facts_dma_mask;
3074 return r;
3075 }
3076
3077 /**
3078 * mpi3mr_process_factsdata - Process IOC facts data
3079 * @mrioc: Adapter instance reference
3080 * @facts_data: Cached IOC facts data
3081 *
3082 * Convert IOC facts data into cpu endianness and cache it in
3083 * the driver .
3084 *
3085 * Return: Nothing.
3086 */
mpi3mr_process_factsdata(struct mpi3mr_ioc * mrioc,struct mpi3_ioc_facts_data * facts_data)3087 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
3088 struct mpi3_ioc_facts_data *facts_data)
3089 {
3090 u32 ioc_config, req_sz, facts_flags;
3091
3092 if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
3093 (sizeof(*facts_data) / 4)) {
3094 ioc_warn(mrioc,
3095 "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
3096 sizeof(*facts_data),
3097 le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
3098 }
3099
3100 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3101 req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
3102 MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
3103 if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
3104 ioc_err(mrioc,
3105 "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
3106 req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
3107 }
3108
3109 memset(&mrioc->facts, 0, sizeof(mrioc->facts));
3110
3111 facts_flags = le32_to_cpu(facts_data->flags);
3112 mrioc->facts.op_req_sz = req_sz;
3113 mrioc->op_reply_desc_sz = 1 << ((ioc_config &
3114 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
3115 MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
3116
3117 mrioc->facts.ioc_num = facts_data->ioc_number;
3118 mrioc->facts.who_init = facts_data->who_init;
3119 mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
3120 mrioc->facts.personality = (facts_flags &
3121 MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
3122 mrioc->facts.dma_mask = (facts_flags &
3123 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3124 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3125 mrioc->facts.dma_mask = (facts_flags &
3126 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
3127 MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
3128 mrioc->facts.protocol_flags = facts_data->protocol_flags;
3129 mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
3130 mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
3131 mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
3132 mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
3133 mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
3134 mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
3135 mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
3136 mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
3137 mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
3138 mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
3139 mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
3140 mrioc->facts.max_pcie_switches =
3141 le16_to_cpu(facts_data->max_pcie_switches);
3142 mrioc->facts.max_sasexpanders =
3143 le16_to_cpu(facts_data->max_sas_expanders);
3144 mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
3145 mrioc->facts.max_sasinitiators =
3146 le16_to_cpu(facts_data->max_sas_initiators);
3147 mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
3148 mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
3149 mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
3150 mrioc->facts.max_op_req_q =
3151 le16_to_cpu(facts_data->max_operational_request_queues);
3152 mrioc->facts.max_op_reply_q =
3153 le16_to_cpu(facts_data->max_operational_reply_queues);
3154 mrioc->facts.ioc_capabilities =
3155 le32_to_cpu(facts_data->ioc_capabilities);
3156 mrioc->facts.fw_ver.build_num =
3157 le16_to_cpu(facts_data->fw_version.build_num);
3158 mrioc->facts.fw_ver.cust_id =
3159 le16_to_cpu(facts_data->fw_version.customer_id);
3160 mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
3161 mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
3162 mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
3163 mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
3164 mrioc->msix_count = min_t(int, mrioc->msix_count,
3165 mrioc->facts.max_msix_vectors);
3166 mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
3167 mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
3168 mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
3169 mrioc->facts.shutdown_timeout =
3170 le16_to_cpu(facts_data->shutdown_timeout);
3171 mrioc->facts.diag_trace_sz =
3172 le32_to_cpu(facts_data->diag_trace_size);
3173 mrioc->facts.diag_fw_sz =
3174 le32_to_cpu(facts_data->diag_fw_size);
3175 mrioc->facts.diag_drvr_sz = le32_to_cpu(facts_data->diag_driver_size);
3176 mrioc->facts.max_dev_per_tg =
3177 facts_data->max_devices_per_throttle_group;
3178 mrioc->facts.io_throttle_data_length =
3179 le16_to_cpu(facts_data->io_throttle_data_length);
3180 mrioc->facts.max_io_throttle_group =
3181 le16_to_cpu(facts_data->max_io_throttle_group);
3182 mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
3183 mrioc->facts.io_throttle_high =
3184 le16_to_cpu(facts_data->io_throttle_high);
3185
3186 if (mrioc->facts.max_data_length ==
3187 MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
3188 mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
3189 else
3190 mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
3191 /* Store in 512b block count */
3192 if (mrioc->facts.io_throttle_data_length)
3193 mrioc->io_throttle_data_length =
3194 (mrioc->facts.io_throttle_data_length * 2 * 4);
3195 else
3196 /* set the length to 1MB + 1K to disable throttle */
3197 mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
3198
3199 mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
3200 mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
3201
3202 ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
3203 mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
3204 mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
3205 ioc_info(mrioc,
3206 "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
3207 mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
3208 mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
3209 ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
3210 mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
3211 mrioc->facts.sge_mod_shift);
3212 ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
3213 mrioc->facts.dma_mask, (facts_flags &
3214 MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
3215 ioc_info(mrioc,
3216 "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
3217 mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
3218 ioc_info(mrioc,
3219 "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
3220 mrioc->facts.io_throttle_data_length * 4,
3221 mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
3222 }
3223
3224 /**
3225 * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
3226 * @mrioc: Adapter instance reference
3227 *
3228 * Allocate and initialize the reply free buffers, sense
3229 * buffers, reply free queue and sense buffer queue.
3230 *
3231 * Return: 0 on success, non-zero on failures.
3232 */
mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc * mrioc)3233 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
3234 {
3235 int retval = 0;
3236 u32 sz, i;
3237
3238 if (mrioc->init_cmds.reply)
3239 return retval;
3240
3241 mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3242 if (!mrioc->init_cmds.reply)
3243 goto out_failed;
3244
3245 mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3246 if (!mrioc->bsg_cmds.reply)
3247 goto out_failed;
3248
3249 mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3250 if (!mrioc->transport_cmds.reply)
3251 goto out_failed;
3252
3253 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3254 mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
3255 GFP_KERNEL);
3256 if (!mrioc->dev_rmhs_cmds[i].reply)
3257 goto out_failed;
3258 }
3259
3260 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
3261 mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
3262 GFP_KERNEL);
3263 if (!mrioc->evtack_cmds[i].reply)
3264 goto out_failed;
3265 }
3266
3267 mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3268 if (!mrioc->host_tm_cmds.reply)
3269 goto out_failed;
3270
3271 mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3272 if (!mrioc->pel_cmds.reply)
3273 goto out_failed;
3274
3275 mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
3276 if (!mrioc->pel_abort_cmd.reply)
3277 goto out_failed;
3278
3279 mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
3280 mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
3281 GFP_KERNEL);
3282 if (!mrioc->removepend_bitmap)
3283 goto out_failed;
3284
3285 mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
3286 if (!mrioc->devrem_bitmap)
3287 goto out_failed;
3288
3289 mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
3290 GFP_KERNEL);
3291 if (!mrioc->evtack_cmds_bitmap)
3292 goto out_failed;
3293
3294 mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
3295 mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
3296 mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
3297 mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
3298
3299 /* reply buffer pool, 16 byte align */
3300 sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3301 mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
3302 &mrioc->pdev->dev, sz, 16, 0);
3303 if (!mrioc->reply_buf_pool) {
3304 ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
3305 goto out_failed;
3306 }
3307
3308 mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
3309 &mrioc->reply_buf_dma);
3310 if (!mrioc->reply_buf)
3311 goto out_failed;
3312
3313 mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
3314
3315 /* reply free queue, 8 byte align */
3316 sz = mrioc->reply_free_qsz * 8;
3317 mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
3318 &mrioc->pdev->dev, sz, 8, 0);
3319 if (!mrioc->reply_free_q_pool) {
3320 ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
3321 goto out_failed;
3322 }
3323 mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
3324 GFP_KERNEL, &mrioc->reply_free_q_dma);
3325 if (!mrioc->reply_free_q)
3326 goto out_failed;
3327
3328 /* sense buffer pool, 4 byte align */
3329 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3330 mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
3331 &mrioc->pdev->dev, sz, 4, 0);
3332 if (!mrioc->sense_buf_pool) {
3333 ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
3334 goto out_failed;
3335 }
3336 mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
3337 &mrioc->sense_buf_dma);
3338 if (!mrioc->sense_buf)
3339 goto out_failed;
3340
3341 /* sense buffer queue, 8 byte align */
3342 sz = mrioc->sense_buf_q_sz * 8;
3343 mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
3344 &mrioc->pdev->dev, sz, 8, 0);
3345 if (!mrioc->sense_buf_q_pool) {
3346 ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
3347 goto out_failed;
3348 }
3349 mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
3350 GFP_KERNEL, &mrioc->sense_buf_q_dma);
3351 if (!mrioc->sense_buf_q)
3352 goto out_failed;
3353
3354 return retval;
3355
3356 out_failed:
3357 retval = -1;
3358 return retval;
3359 }
3360
3361 /**
3362 * mpimr_initialize_reply_sbuf_queues - initialize reply sense
3363 * buffers
3364 * @mrioc: Adapter instance reference
3365 *
3366 * Helper function to initialize reply and sense buffers along
3367 * with some debug prints.
3368 *
3369 * Return: None.
3370 */
mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc * mrioc)3371 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
3372 {
3373 u32 sz, i;
3374 dma_addr_t phy_addr;
3375
3376 sz = mrioc->num_reply_bufs * mrioc->reply_sz;
3377 ioc_info(mrioc,
3378 "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3379 mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
3380 (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
3381 sz = mrioc->reply_free_qsz * 8;
3382 ioc_info(mrioc,
3383 "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
3384 mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
3385 (unsigned long long)mrioc->reply_free_q_dma);
3386 sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
3387 ioc_info(mrioc,
3388 "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3389 mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
3390 (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
3391 sz = mrioc->sense_buf_q_sz * 8;
3392 ioc_info(mrioc,
3393 "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
3394 mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
3395 (unsigned long long)mrioc->sense_buf_q_dma);
3396
3397 /* initialize Reply buffer Queue */
3398 for (i = 0, phy_addr = mrioc->reply_buf_dma;
3399 i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
3400 mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
3401 mrioc->reply_free_q[i] = cpu_to_le64(0);
3402
3403 /* initialize Sense Buffer Queue */
3404 for (i = 0, phy_addr = mrioc->sense_buf_dma;
3405 i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
3406 mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
3407 mrioc->sense_buf_q[i] = cpu_to_le64(0);
3408 }
3409
3410 /**
3411 * mpi3mr_issue_iocinit - Send IOC Init
3412 * @mrioc: Adapter instance reference
3413 *
3414 * Issue IOC Init MPI request through admin queue and wait for
3415 * the completion of it or time out.
3416 *
3417 * Return: 0 on success, non-zero on failures.
3418 */
mpi3mr_issue_iocinit(struct mpi3mr_ioc * mrioc)3419 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
3420 {
3421 struct mpi3_ioc_init_request iocinit_req;
3422 struct mpi3_driver_info_layout *drv_info;
3423 dma_addr_t data_dma;
3424 u32 data_len = sizeof(*drv_info);
3425 int retval = 0;
3426 ktime_t current_time;
3427
3428 drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
3429 GFP_KERNEL);
3430 if (!drv_info) {
3431 retval = -1;
3432 goto out;
3433 }
3434 mpimr_initialize_reply_sbuf_queues(mrioc);
3435
3436 drv_info->information_length = cpu_to_le32(data_len);
3437 strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
3438 strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
3439 strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
3440 strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
3441 strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
3442 strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
3443 sizeof(drv_info->driver_release_date));
3444 drv_info->driver_capabilities = 0;
3445 memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
3446 sizeof(mrioc->driver_info));
3447
3448 memset(&iocinit_req, 0, sizeof(iocinit_req));
3449 mutex_lock(&mrioc->init_cmds.mutex);
3450 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3451 retval = -1;
3452 ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
3453 mutex_unlock(&mrioc->init_cmds.mutex);
3454 goto out;
3455 }
3456 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3457 mrioc->init_cmds.is_waiting = 1;
3458 mrioc->init_cmds.callback = NULL;
3459 iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3460 iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
3461 iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
3462 iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
3463 iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
3464 iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
3465 iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
3466 iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
3467 iocinit_req.reply_free_queue_address =
3468 cpu_to_le64(mrioc->reply_free_q_dma);
3469 iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
3470 iocinit_req.sense_buffer_free_queue_depth =
3471 cpu_to_le16(mrioc->sense_buf_q_sz);
3472 iocinit_req.sense_buffer_free_queue_address =
3473 cpu_to_le64(mrioc->sense_buf_q_dma);
3474 iocinit_req.driver_information_address = cpu_to_le64(data_dma);
3475
3476 current_time = ktime_get_real();
3477 iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
3478
3479 iocinit_req.msg_flags |=
3480 MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED;
3481 iocinit_req.msg_flags |=
3482 MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
3483
3484 init_completion(&mrioc->init_cmds.done);
3485 retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
3486 sizeof(iocinit_req), 1);
3487 if (retval) {
3488 ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
3489 goto out_unlock;
3490 }
3491 wait_for_completion_timeout(&mrioc->init_cmds.done,
3492 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3493 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3494 mpi3mr_check_rh_fault_ioc(mrioc,
3495 MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
3496 ioc_err(mrioc, "ioc_init timed out\n");
3497 retval = -1;
3498 goto out_unlock;
3499 }
3500 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3501 != MPI3_IOCSTATUS_SUCCESS) {
3502 ioc_err(mrioc,
3503 "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3504 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3505 mrioc->init_cmds.ioc_loginfo);
3506 retval = -1;
3507 goto out_unlock;
3508 }
3509
3510 mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3511 writel(mrioc->reply_free_queue_host_index,
3512 &mrioc->sysif_regs->reply_free_host_index);
3513
3514 mrioc->sbq_host_index = mrioc->num_sense_bufs;
3515 writel(mrioc->sbq_host_index,
3516 &mrioc->sysif_regs->sense_buffer_free_host_index);
3517 out_unlock:
3518 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3519 mutex_unlock(&mrioc->init_cmds.mutex);
3520
3521 out:
3522 if (drv_info)
3523 dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
3524 data_dma);
3525
3526 return retval;
3527 }
3528
3529 /**
3530 * mpi3mr_unmask_events - Unmask events in event mask bitmap
3531 * @mrioc: Adapter instance reference
3532 * @event: MPI event ID
3533 *
3534 * Un mask the specific event by resetting the event_mask
3535 * bitmap.
3536 *
3537 * Return: 0 on success, non-zero on failures.
3538 */
mpi3mr_unmask_events(struct mpi3mr_ioc * mrioc,u16 event)3539 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
3540 {
3541 u32 desired_event;
3542 u8 word;
3543
3544 if (event >= 128)
3545 return;
3546
3547 desired_event = (1 << (event % 32));
3548 word = event / 32;
3549
3550 mrioc->event_masks[word] &= ~desired_event;
3551 }
3552
3553 /**
3554 * mpi3mr_issue_event_notification - Send event notification
3555 * @mrioc: Adapter instance reference
3556 *
3557 * Issue event notification MPI request through admin queue and
3558 * wait for the completion of it or time out.
3559 *
3560 * Return: 0 on success, non-zero on failures.
3561 */
mpi3mr_issue_event_notification(struct mpi3mr_ioc * mrioc)3562 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
3563 {
3564 struct mpi3_event_notification_request evtnotify_req;
3565 int retval = 0;
3566 u8 i;
3567
3568 memset(&evtnotify_req, 0, sizeof(evtnotify_req));
3569 mutex_lock(&mrioc->init_cmds.mutex);
3570 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3571 retval = -1;
3572 ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
3573 mutex_unlock(&mrioc->init_cmds.mutex);
3574 goto out;
3575 }
3576 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3577 mrioc->init_cmds.is_waiting = 1;
3578 mrioc->init_cmds.callback = NULL;
3579 evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3580 evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
3581 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3582 evtnotify_req.event_masks[i] =
3583 cpu_to_le32(mrioc->event_masks[i]);
3584 init_completion(&mrioc->init_cmds.done);
3585 retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
3586 sizeof(evtnotify_req), 1);
3587 if (retval) {
3588 ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
3589 goto out_unlock;
3590 }
3591 wait_for_completion_timeout(&mrioc->init_cmds.done,
3592 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3593 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3594 ioc_err(mrioc, "event notification timed out\n");
3595 mpi3mr_check_rh_fault_ioc(mrioc,
3596 MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
3597 retval = -1;
3598 goto out_unlock;
3599 }
3600 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3601 != MPI3_IOCSTATUS_SUCCESS) {
3602 ioc_err(mrioc,
3603 "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3604 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3605 mrioc->init_cmds.ioc_loginfo);
3606 retval = -1;
3607 goto out_unlock;
3608 }
3609
3610 out_unlock:
3611 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3612 mutex_unlock(&mrioc->init_cmds.mutex);
3613 out:
3614 return retval;
3615 }
3616
3617 /**
3618 * mpi3mr_process_event_ack - Process event acknowledgment
3619 * @mrioc: Adapter instance reference
3620 * @event: MPI3 event ID
3621 * @event_ctx: event context
3622 *
3623 * Send event acknowledgment through admin queue and wait for
3624 * it to complete.
3625 *
3626 * Return: 0 on success, non-zero on failures.
3627 */
mpi3mr_process_event_ack(struct mpi3mr_ioc * mrioc,u8 event,u32 event_ctx)3628 int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3629 u32 event_ctx)
3630 {
3631 struct mpi3_event_ack_request evtack_req;
3632 int retval = 0;
3633
3634 memset(&evtack_req, 0, sizeof(evtack_req));
3635 mutex_lock(&mrioc->init_cmds.mutex);
3636 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3637 retval = -1;
3638 ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3639 mutex_unlock(&mrioc->init_cmds.mutex);
3640 goto out;
3641 }
3642 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3643 mrioc->init_cmds.is_waiting = 1;
3644 mrioc->init_cmds.callback = NULL;
3645 evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3646 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3647 evtack_req.event = event;
3648 evtack_req.event_context = cpu_to_le32(event_ctx);
3649
3650 init_completion(&mrioc->init_cmds.done);
3651 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3652 sizeof(evtack_req), 1);
3653 if (retval) {
3654 ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3655 goto out_unlock;
3656 }
3657 wait_for_completion_timeout(&mrioc->init_cmds.done,
3658 (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3659 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3660 ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3661 if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3662 mpi3mr_check_rh_fault_ioc(mrioc,
3663 MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
3664 retval = -1;
3665 goto out_unlock;
3666 }
3667 if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3668 != MPI3_IOCSTATUS_SUCCESS) {
3669 ioc_err(mrioc,
3670 "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3671 (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3672 mrioc->init_cmds.ioc_loginfo);
3673 retval = -1;
3674 goto out_unlock;
3675 }
3676
3677 out_unlock:
3678 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3679 mutex_unlock(&mrioc->init_cmds.mutex);
3680 out:
3681 return retval;
3682 }
3683
3684 /**
3685 * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3686 * @mrioc: Adapter instance reference
3687 *
3688 * Allocate chain buffers and set a bitmap to indicate free
3689 * chain buffers. Chain buffers are used to pass the SGE
3690 * information along with MPI3 SCSI IO requests for host I/O.
3691 *
3692 * Return: 0 on success, non-zero on failure
3693 */
mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc * mrioc)3694 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3695 {
3696 int retval = 0;
3697 u32 sz, i;
3698 u16 num_chains;
3699
3700 if (mrioc->chain_sgl_list)
3701 return retval;
3702
3703 num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3704
3705 if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3706 | SHOST_DIX_TYPE1_PROTECTION
3707 | SHOST_DIX_TYPE2_PROTECTION
3708 | SHOST_DIX_TYPE3_PROTECTION))
3709 num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3710
3711 mrioc->chain_buf_count = num_chains;
3712 sz = sizeof(struct chain_element) * num_chains;
3713 mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3714 if (!mrioc->chain_sgl_list)
3715 goto out_failed;
3716
3717 if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
3718 MPI3MR_PAGE_SIZE_4K))
3719 mrioc->max_sgl_entries = mrioc->facts.max_data_length /
3720 MPI3MR_PAGE_SIZE_4K;
3721 sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
3722 ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
3723 mrioc->max_sgl_entries, sz/1024);
3724
3725 mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3726 &mrioc->pdev->dev, sz, 16, 0);
3727 if (!mrioc->chain_buf_pool) {
3728 ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3729 goto out_failed;
3730 }
3731
3732 for (i = 0; i < num_chains; i++) {
3733 mrioc->chain_sgl_list[i].addr =
3734 dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3735 &mrioc->chain_sgl_list[i].dma_addr);
3736
3737 if (!mrioc->chain_sgl_list[i].addr)
3738 goto out_failed;
3739 }
3740 mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
3741 if (!mrioc->chain_bitmap)
3742 goto out_failed;
3743 return retval;
3744 out_failed:
3745 retval = -1;
3746 return retval;
3747 }
3748
3749 /**
3750 * mpi3mr_port_enable_complete - Mark port enable complete
3751 * @mrioc: Adapter instance reference
3752 * @drv_cmd: Internal command tracker
3753 *
3754 * Call back for asynchronous port enable request sets the
3755 * driver command to indicate port enable request is complete.
3756 *
3757 * Return: Nothing
3758 */
mpi3mr_port_enable_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)3759 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3760 struct mpi3mr_drv_cmd *drv_cmd)
3761 {
3762 drv_cmd->callback = NULL;
3763 mrioc->scan_started = 0;
3764 if (drv_cmd->state & MPI3MR_CMD_RESET)
3765 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3766 else
3767 mrioc->scan_failed = drv_cmd->ioc_status;
3768 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3769 }
3770
3771 /**
3772 * mpi3mr_issue_port_enable - Issue Port Enable
3773 * @mrioc: Adapter instance reference
3774 * @async: Flag to wait for completion or not
3775 *
3776 * Issue Port Enable MPI request through admin queue and if the
3777 * async flag is not set wait for the completion of the port
3778 * enable or time out.
3779 *
3780 * Return: 0 on success, non-zero on failures.
3781 */
mpi3mr_issue_port_enable(struct mpi3mr_ioc * mrioc,u8 async)3782 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3783 {
3784 struct mpi3_port_enable_request pe_req;
3785 int retval = 0;
3786 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3787
3788 memset(&pe_req, 0, sizeof(pe_req));
3789 mutex_lock(&mrioc->init_cmds.mutex);
3790 if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3791 retval = -1;
3792 ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3793 mutex_unlock(&mrioc->init_cmds.mutex);
3794 goto out;
3795 }
3796 mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3797 if (async) {
3798 mrioc->init_cmds.is_waiting = 0;
3799 mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3800 } else {
3801 mrioc->init_cmds.is_waiting = 1;
3802 mrioc->init_cmds.callback = NULL;
3803 init_completion(&mrioc->init_cmds.done);
3804 }
3805 pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3806 pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3807
3808 retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3809 if (retval) {
3810 ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3811 goto out_unlock;
3812 }
3813 if (async) {
3814 mutex_unlock(&mrioc->init_cmds.mutex);
3815 goto out;
3816 }
3817
3818 wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3819 if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3820 ioc_err(mrioc, "port enable timed out\n");
3821 retval = -1;
3822 mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3823 goto out_unlock;
3824 }
3825 mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3826
3827 out_unlock:
3828 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3829 mutex_unlock(&mrioc->init_cmds.mutex);
3830 out:
3831 return retval;
3832 }
3833
3834 /* Protocol type to name mapper structure */
3835 static const struct {
3836 u8 protocol;
3837 char *name;
3838 } mpi3mr_protocols[] = {
3839 { MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3840 { MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3841 { MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3842 };
3843
3844 /* Capability to name mapper structure*/
3845 static const struct {
3846 u32 capability;
3847 char *name;
3848 } mpi3mr_capabilities[] = {
3849 { MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED, "RAID" },
3850 { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED, "MultiPath" },
3851 };
3852
3853 /**
3854 * mpi3mr_repost_diag_bufs - repost host diag buffers
3855 * @mrioc: Adapter instance reference
3856 *
3857 * repost firmware and trace diag buffers based on global
3858 * trigger flag from driver page 2
3859 *
3860 * Return: 0 on success, non-zero on failures.
3861 */
mpi3mr_repost_diag_bufs(struct mpi3mr_ioc * mrioc)3862 static int mpi3mr_repost_diag_bufs(struct mpi3mr_ioc *mrioc)
3863 {
3864 u64 global_trigger;
3865 union mpi3mr_trigger_data prev_trigger_data;
3866 struct diag_buffer_desc *trace_hdb = NULL;
3867 struct diag_buffer_desc *fw_hdb = NULL;
3868 int retval = 0;
3869 bool trace_repost_needed = false;
3870 bool fw_repost_needed = false;
3871 u8 prev_trigger_type;
3872
3873 retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
3874 if (retval)
3875 return -1;
3876
3877 trace_hdb = mpi3mr_diag_buffer_for_type(mrioc,
3878 MPI3_DIAG_BUFFER_TYPE_TRACE);
3879
3880 if (trace_hdb &&
3881 trace_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3882 trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3883 trace_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3884 trace_repost_needed = true;
3885
3886 fw_hdb = mpi3mr_diag_buffer_for_type(mrioc, MPI3_DIAG_BUFFER_TYPE_FW);
3887
3888 if (fw_hdb && fw_hdb->status != MPI3MR_HDB_BUFSTATUS_NOT_ALLOCATED &&
3889 fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_GLOBAL &&
3890 fw_hdb->trigger_type != MPI3MR_HDB_TRIGGER_TYPE_ELEMENT)
3891 fw_repost_needed = true;
3892
3893 if (trace_repost_needed || fw_repost_needed) {
3894 global_trigger = le64_to_cpu(mrioc->driver_pg2->global_trigger);
3895 if (global_trigger &
3896 MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_TRACE_DISABLED)
3897 trace_repost_needed = false;
3898 if (global_trigger &
3899 MPI3_DRIVER2_GLOBALTRIGGER_POST_DIAG_FW_DISABLED)
3900 fw_repost_needed = false;
3901 }
3902
3903 if (trace_repost_needed) {
3904 prev_trigger_type = trace_hdb->trigger_type;
3905 memcpy(&prev_trigger_data, &trace_hdb->trigger_data,
3906 sizeof(trace_hdb->trigger_data));
3907 retval = mpi3mr_issue_diag_buf_post(mrioc, trace_hdb);
3908 if (!retval) {
3909 dprint_init(mrioc, "trace diag buffer reposted");
3910 mpi3mr_set_trigger_data_in_hdb(trace_hdb,
3911 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3912 } else {
3913 trace_hdb->trigger_type = prev_trigger_type;
3914 memcpy(&trace_hdb->trigger_data, &prev_trigger_data,
3915 sizeof(prev_trigger_data));
3916 ioc_err(mrioc, "trace diag buffer repost failed");
3917 return -1;
3918 }
3919 }
3920
3921 if (fw_repost_needed) {
3922 prev_trigger_type = fw_hdb->trigger_type;
3923 memcpy(&prev_trigger_data, &fw_hdb->trigger_data,
3924 sizeof(fw_hdb->trigger_data));
3925 retval = mpi3mr_issue_diag_buf_post(mrioc, fw_hdb);
3926 if (!retval) {
3927 dprint_init(mrioc, "firmware diag buffer reposted");
3928 mpi3mr_set_trigger_data_in_hdb(fw_hdb,
3929 MPI3MR_HDB_TRIGGER_TYPE_UNKNOWN, NULL, 1);
3930 } else {
3931 fw_hdb->trigger_type = prev_trigger_type;
3932 memcpy(&fw_hdb->trigger_data, &prev_trigger_data,
3933 sizeof(prev_trigger_data));
3934 ioc_err(mrioc, "firmware diag buffer repost failed");
3935 return -1;
3936 }
3937 }
3938 return retval;
3939 }
3940
3941 /**
3942 * mpi3mr_read_tsu_interval - Update time stamp interval
3943 * @mrioc: Adapter instance reference
3944 *
3945 * Update time stamp interval if its defined in driver page 1,
3946 * otherwise use default value.
3947 *
3948 * Return: Nothing
3949 */
3950 static void
mpi3mr_read_tsu_interval(struct mpi3mr_ioc * mrioc)3951 mpi3mr_read_tsu_interval(struct mpi3mr_ioc *mrioc)
3952 {
3953 struct mpi3_driver_page1 driver_pg1;
3954 u16 pg_sz = sizeof(driver_pg1);
3955 int retval = 0;
3956
3957 mrioc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
3958
3959 retval = mpi3mr_cfg_get_driver_pg1(mrioc, &driver_pg1, pg_sz);
3960 if (!retval && driver_pg1.time_stamp_update)
3961 mrioc->ts_update_interval = (driver_pg1.time_stamp_update * 60);
3962 }
3963
3964 /**
3965 * mpi3mr_print_ioc_info - Display controller information
3966 * @mrioc: Adapter instance reference
3967 *
3968 * Display controller personality, capability, supported
3969 * protocols etc.
3970 *
3971 * Return: Nothing
3972 */
3973 static void
mpi3mr_print_ioc_info(struct mpi3mr_ioc * mrioc)3974 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3975 {
3976 int i = 0, bytes_written = 0;
3977 const char *personality;
3978 char protocol[50] = {0};
3979 char capabilities[100] = {0};
3980 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3981
3982 switch (mrioc->facts.personality) {
3983 case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3984 personality = "Enhanced HBA";
3985 break;
3986 case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3987 personality = "RAID";
3988 break;
3989 default:
3990 personality = "Unknown";
3991 break;
3992 }
3993
3994 ioc_info(mrioc, "Running in %s Personality", personality);
3995
3996 ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3997 fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3998 fwver->ph_minor, fwver->cust_id, fwver->build_num);
3999
4000 for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
4001 if (mrioc->facts.protocol_flags &
4002 mpi3mr_protocols[i].protocol) {
4003 bytes_written += scnprintf(protocol + bytes_written,
4004 sizeof(protocol) - bytes_written, "%s%s",
4005 bytes_written ? "," : "",
4006 mpi3mr_protocols[i].name);
4007 }
4008 }
4009
4010 bytes_written = 0;
4011 for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
4012 if (mrioc->facts.protocol_flags &
4013 mpi3mr_capabilities[i].capability) {
4014 bytes_written += scnprintf(capabilities + bytes_written,
4015 sizeof(capabilities) - bytes_written, "%s%s",
4016 bytes_written ? "," : "",
4017 mpi3mr_capabilities[i].name);
4018 }
4019 }
4020
4021 ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
4022 protocol, capabilities);
4023 }
4024
4025 /**
4026 * mpi3mr_cleanup_resources - Free PCI resources
4027 * @mrioc: Adapter instance reference
4028 *
4029 * Unmap PCI device memory and disable PCI device.
4030 *
4031 * Return: 0 on success and non-zero on failure.
4032 */
mpi3mr_cleanup_resources(struct mpi3mr_ioc * mrioc)4033 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
4034 {
4035 struct pci_dev *pdev = mrioc->pdev;
4036
4037 mpi3mr_cleanup_isr(mrioc);
4038
4039 if (mrioc->sysif_regs) {
4040 iounmap((void __iomem *)mrioc->sysif_regs);
4041 mrioc->sysif_regs = NULL;
4042 }
4043
4044 if (pci_is_enabled(pdev)) {
4045 if (mrioc->bars)
4046 pci_release_selected_regions(pdev, mrioc->bars);
4047 pci_disable_device(pdev);
4048 }
4049 }
4050
4051 /**
4052 * mpi3mr_setup_resources - Enable PCI resources
4053 * @mrioc: Adapter instance reference
4054 *
4055 * Enable PCI device memory, MSI-x registers and set DMA mask.
4056 *
4057 * Return: 0 on success and non-zero on failure.
4058 */
mpi3mr_setup_resources(struct mpi3mr_ioc * mrioc)4059 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
4060 {
4061 struct pci_dev *pdev = mrioc->pdev;
4062 u32 memap_sz = 0;
4063 int i, retval = 0, capb = 0;
4064 u16 message_control;
4065 u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
4066 ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
4067
4068 if (pci_enable_device_mem(pdev)) {
4069 ioc_err(mrioc, "pci_enable_device_mem: failed\n");
4070 retval = -ENODEV;
4071 goto out_failed;
4072 }
4073
4074 capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4075 if (!capb) {
4076 ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
4077 retval = -ENODEV;
4078 goto out_failed;
4079 }
4080 mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
4081
4082 if (pci_request_selected_regions(pdev, mrioc->bars,
4083 mrioc->driver_name)) {
4084 ioc_err(mrioc, "pci_request_selected_regions: failed\n");
4085 retval = -ENODEV;
4086 goto out_failed;
4087 }
4088
4089 for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
4090 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
4091 mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
4092 memap_sz = pci_resource_len(pdev, i);
4093 mrioc->sysif_regs =
4094 ioremap(mrioc->sysif_regs_phys, memap_sz);
4095 break;
4096 }
4097 }
4098
4099 pci_set_master(pdev);
4100
4101 retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
4102 if (retval) {
4103 if (dma_mask != DMA_BIT_MASK(32)) {
4104 ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
4105 dma_mask = DMA_BIT_MASK(32);
4106 retval = dma_set_mask_and_coherent(&pdev->dev,
4107 dma_mask);
4108 }
4109 if (retval) {
4110 mrioc->dma_mask = 0;
4111 ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
4112 goto out_failed;
4113 }
4114 }
4115 mrioc->dma_mask = dma_mask;
4116
4117 if (!mrioc->sysif_regs) {
4118 ioc_err(mrioc,
4119 "Unable to map adapter memory or resource not found\n");
4120 retval = -EINVAL;
4121 goto out_failed;
4122 }
4123
4124 pci_read_config_word(pdev, capb + 2, &message_control);
4125 mrioc->msix_count = (message_control & 0x3FF) + 1;
4126
4127 pci_save_state(pdev);
4128
4129 pci_set_drvdata(pdev, mrioc->shost);
4130
4131 mpi3mr_ioc_disable_intr(mrioc);
4132
4133 ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
4134 (unsigned long long)mrioc->sysif_regs_phys,
4135 mrioc->sysif_regs, memap_sz);
4136 ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
4137 mrioc->msix_count);
4138
4139 if (!reset_devices && poll_queues > 0)
4140 mrioc->requested_poll_qcount = min_t(int, poll_queues,
4141 mrioc->msix_count - 2);
4142 return retval;
4143
4144 out_failed:
4145 mpi3mr_cleanup_resources(mrioc);
4146 return retval;
4147 }
4148
4149 /**
4150 * mpi3mr_enable_events - Enable required events
4151 * @mrioc: Adapter instance reference
4152 *
4153 * This routine unmasks the events required by the driver by
4154 * sennding appropriate event mask bitmapt through an event
4155 * notification request.
4156 *
4157 * Return: 0 on success and non-zero on failure.
4158 */
mpi3mr_enable_events(struct mpi3mr_ioc * mrioc)4159 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
4160 {
4161 int retval = 0;
4162 u32 i;
4163
4164 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4165 mrioc->event_masks[i] = -1;
4166
4167 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
4168 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
4169 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
4170 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
4171 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
4172 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4173 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
4174 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4175 mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
4176 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
4177 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
4178 mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
4179 mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
4180 mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
4181 mpi3mr_unmask_events(mrioc, MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE);
4182
4183 retval = mpi3mr_issue_event_notification(mrioc);
4184 if (retval)
4185 ioc_err(mrioc, "failed to issue event notification %d\n",
4186 retval);
4187 return retval;
4188 }
4189
4190 /**
4191 * mpi3mr_init_ioc - Initialize the controller
4192 * @mrioc: Adapter instance reference
4193 *
4194 * This the controller initialization routine, executed either
4195 * after soft reset or from pci probe callback.
4196 * Setup the required resources, memory map the controller
4197 * registers, create admin and operational reply queue pairs,
4198 * allocate required memory for reply pool, sense buffer pool,
4199 * issue IOC init request to the firmware, unmask the events and
4200 * issue port enable to discover SAS/SATA/NVMe devies and RAID
4201 * volumes.
4202 *
4203 * Return: 0 on success and non-zero on failure.
4204 */
mpi3mr_init_ioc(struct mpi3mr_ioc * mrioc)4205 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
4206 {
4207 int retval = 0;
4208 u8 retry = 0;
4209 struct mpi3_ioc_facts_data facts_data;
4210 u32 sz;
4211
4212 retry_init:
4213 retval = mpi3mr_bring_ioc_ready(mrioc);
4214 if (retval) {
4215 ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
4216 retval);
4217 goto out_failed_noretry;
4218 }
4219
4220 retval = mpi3mr_setup_isr(mrioc, 1);
4221 if (retval) {
4222 ioc_err(mrioc, "Failed to setup ISR error %d\n",
4223 retval);
4224 goto out_failed_noretry;
4225 }
4226
4227 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4228 if (retval) {
4229 ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
4230 retval);
4231 goto out_failed;
4232 }
4233
4234 mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
4235 mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
4236 mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
4237 atomic_set(&mrioc->pend_large_data_sz, 0);
4238
4239 if (reset_devices)
4240 mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
4241 MPI3MR_HOST_IOS_KDUMP);
4242
4243 if (!(mrioc->facts.ioc_capabilities &
4244 MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED)) {
4245 mrioc->sas_transport_enabled = 1;
4246 mrioc->scsi_device_channel = 1;
4247 mrioc->shost->max_channel = 1;
4248 mrioc->shost->transportt = mpi3mr_transport_template;
4249 }
4250
4251 if (mrioc->facts.max_req_limit)
4252 mrioc->prevent_reply_qfull = true;
4253
4254 mrioc->reply_sz = mrioc->facts.reply_sz;
4255
4256 retval = mpi3mr_check_reset_dma_mask(mrioc);
4257 if (retval) {
4258 ioc_err(mrioc, "Resetting dma mask failed %d\n",
4259 retval);
4260 goto out_failed_noretry;
4261 }
4262
4263 mpi3mr_read_tsu_interval(mrioc);
4264 mpi3mr_print_ioc_info(mrioc);
4265
4266 dprint_init(mrioc, "allocating host diag buffers\n");
4267 mpi3mr_alloc_diag_bufs(mrioc);
4268
4269 dprint_init(mrioc, "allocating ioctl dma buffers\n");
4270 mpi3mr_alloc_ioctl_dma_memory(mrioc);
4271
4272 dprint_init(mrioc, "posting host diag buffers\n");
4273 retval = mpi3mr_post_diag_bufs(mrioc);
4274
4275 if (retval)
4276 ioc_warn(mrioc, "failed to post host diag buffers\n");
4277
4278 if (!mrioc->init_cmds.reply) {
4279 retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
4280 if (retval) {
4281 ioc_err(mrioc,
4282 "%s :Failed to allocated reply sense buffers %d\n",
4283 __func__, retval);
4284 goto out_failed_noretry;
4285 }
4286 }
4287
4288 if (!mrioc->chain_sgl_list) {
4289 retval = mpi3mr_alloc_chain_bufs(mrioc);
4290 if (retval) {
4291 ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
4292 retval);
4293 goto out_failed_noretry;
4294 }
4295 }
4296
4297 retval = mpi3mr_issue_iocinit(mrioc);
4298 if (retval) {
4299 ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
4300 retval);
4301 goto out_failed;
4302 }
4303
4304 retval = mpi3mr_print_pkg_ver(mrioc);
4305 if (retval) {
4306 ioc_err(mrioc, "failed to get package version\n");
4307 goto out_failed;
4308 }
4309
4310 retval = mpi3mr_setup_isr(mrioc, 0);
4311 if (retval) {
4312 ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
4313 retval);
4314 goto out_failed_noretry;
4315 }
4316
4317 retval = mpi3mr_create_op_queues(mrioc);
4318 if (retval) {
4319 ioc_err(mrioc, "Failed to create OpQueues error %d\n",
4320 retval);
4321 goto out_failed;
4322 }
4323
4324 if (!mrioc->pel_seqnum_virt) {
4325 dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
4326 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4327 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4328 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4329 GFP_KERNEL);
4330 if (!mrioc->pel_seqnum_virt) {
4331 retval = -ENOMEM;
4332 goto out_failed_noretry;
4333 }
4334 }
4335
4336 if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
4337 dprint_init(mrioc, "allocating memory for throttle groups\n");
4338 sz = sizeof(struct mpi3mr_throttle_group_info);
4339 mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
4340 if (!mrioc->throttle_groups) {
4341 retval = -1;
4342 goto out_failed_noretry;
4343 }
4344 }
4345
4346 retval = mpi3mr_enable_events(mrioc);
4347 if (retval) {
4348 ioc_err(mrioc, "failed to enable events %d\n",
4349 retval);
4350 goto out_failed;
4351 }
4352
4353 retval = mpi3mr_refresh_trigger(mrioc, MPI3_CONFIG_ACTION_READ_CURRENT);
4354 if (retval) {
4355 ioc_err(mrioc, "failed to refresh triggers\n");
4356 goto out_failed;
4357 }
4358
4359 ioc_info(mrioc, "controller initialization completed successfully\n");
4360 return retval;
4361 out_failed:
4362 if (retry < 2) {
4363 retry++;
4364 ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
4365 retry);
4366 mpi3mr_memset_buffers(mrioc);
4367 goto retry_init;
4368 }
4369 retval = -1;
4370 out_failed_noretry:
4371 ioc_err(mrioc, "controller initialization failed\n");
4372 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4373 MPI3MR_RESET_FROM_CTLR_CLEANUP);
4374 mrioc->unrecoverable = 1;
4375 return retval;
4376 }
4377
4378 /**
4379 * mpi3mr_reinit_ioc - Re-Initialize the controller
4380 * @mrioc: Adapter instance reference
4381 * @is_resume: Called from resume or reset path
4382 *
4383 * This the controller re-initialization routine, executed from
4384 * the soft reset handler or resume callback. Creates
4385 * operational reply queue pairs, allocate required memory for
4386 * reply pool, sense buffer pool, issue IOC init request to the
4387 * firmware, unmask the events and issue port enable to discover
4388 * SAS/SATA/NVMe devices and RAID volumes.
4389 *
4390 * Return: 0 on success and non-zero on failure.
4391 */
mpi3mr_reinit_ioc(struct mpi3mr_ioc * mrioc,u8 is_resume)4392 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
4393 {
4394 int retval = 0;
4395 u8 retry = 0;
4396 struct mpi3_ioc_facts_data facts_data;
4397 u32 pe_timeout, ioc_status;
4398
4399 retry_init:
4400 pe_timeout =
4401 (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
4402
4403 dprint_reset(mrioc, "bringing up the controller to ready state\n");
4404 retval = mpi3mr_bring_ioc_ready(mrioc);
4405 if (retval) {
4406 ioc_err(mrioc, "failed to bring to ready state\n");
4407 goto out_failed_noretry;
4408 }
4409
4410 mrioc->io_admin_reset_sync = 0;
4411 if (is_resume || mrioc->block_on_pci_err) {
4412 dprint_reset(mrioc, "setting up single ISR\n");
4413 retval = mpi3mr_setup_isr(mrioc, 1);
4414 if (retval) {
4415 ioc_err(mrioc, "failed to setup ISR\n");
4416 goto out_failed_noretry;
4417 }
4418 } else
4419 mpi3mr_ioc_enable_intr(mrioc);
4420
4421 dprint_reset(mrioc, "getting ioc_facts\n");
4422 retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
4423 if (retval) {
4424 ioc_err(mrioc, "failed to get ioc_facts\n");
4425 goto out_failed;
4426 }
4427
4428 dprint_reset(mrioc, "validating ioc_facts\n");
4429 retval = mpi3mr_revalidate_factsdata(mrioc);
4430 if (retval) {
4431 ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
4432 goto out_failed_noretry;
4433 }
4434
4435 mpi3mr_read_tsu_interval(mrioc);
4436 mpi3mr_print_ioc_info(mrioc);
4437
4438 if (is_resume) {
4439 dprint_reset(mrioc, "posting host diag buffers\n");
4440 retval = mpi3mr_post_diag_bufs(mrioc);
4441 if (retval)
4442 ioc_warn(mrioc, "failed to post host diag buffers\n");
4443 } else {
4444 retval = mpi3mr_repost_diag_bufs(mrioc);
4445 if (retval)
4446 ioc_warn(mrioc, "failed to re post host diag buffers\n");
4447 }
4448
4449 dprint_reset(mrioc, "sending ioc_init\n");
4450 retval = mpi3mr_issue_iocinit(mrioc);
4451 if (retval) {
4452 ioc_err(mrioc, "failed to send ioc_init\n");
4453 goto out_failed;
4454 }
4455
4456 dprint_reset(mrioc, "getting package version\n");
4457 retval = mpi3mr_print_pkg_ver(mrioc);
4458 if (retval) {
4459 ioc_err(mrioc, "failed to get package version\n");
4460 goto out_failed;
4461 }
4462
4463 if (is_resume || mrioc->block_on_pci_err) {
4464 dprint_reset(mrioc, "setting up multiple ISR\n");
4465 retval = mpi3mr_setup_isr(mrioc, 0);
4466 if (retval) {
4467 ioc_err(mrioc, "failed to re-setup ISR\n");
4468 goto out_failed_noretry;
4469 }
4470 }
4471
4472 dprint_reset(mrioc, "creating operational queue pairs\n");
4473 retval = mpi3mr_create_op_queues(mrioc);
4474 if (retval) {
4475 ioc_err(mrioc, "failed to create operational queue pairs\n");
4476 goto out_failed;
4477 }
4478
4479 if (!mrioc->pel_seqnum_virt) {
4480 dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
4481 mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
4482 mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
4483 mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
4484 GFP_KERNEL);
4485 if (!mrioc->pel_seqnum_virt) {
4486 retval = -ENOMEM;
4487 goto out_failed_noretry;
4488 }
4489 }
4490
4491 if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
4492 ioc_err(mrioc,
4493 "cannot create minimum number of operational queues expected:%d created:%d\n",
4494 mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
4495 retval = -1;
4496 goto out_failed_noretry;
4497 }
4498
4499 dprint_reset(mrioc, "enabling events\n");
4500 retval = mpi3mr_enable_events(mrioc);
4501 if (retval) {
4502 ioc_err(mrioc, "failed to enable events\n");
4503 goto out_failed;
4504 }
4505
4506 mrioc->device_refresh_on = 1;
4507 mpi3mr_add_event_wait_for_device_refresh(mrioc);
4508
4509 ioc_info(mrioc, "sending port enable\n");
4510 retval = mpi3mr_issue_port_enable(mrioc, 1);
4511 if (retval) {
4512 ioc_err(mrioc, "failed to issue port enable\n");
4513 goto out_failed;
4514 }
4515 do {
4516 ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
4517 if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
4518 break;
4519 if (!pci_device_is_present(mrioc->pdev))
4520 mrioc->unrecoverable = 1;
4521 if (mrioc->unrecoverable) {
4522 retval = -1;
4523 goto out_failed_noretry;
4524 }
4525 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4526 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4527 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4528 mpi3mr_print_fault_info(mrioc);
4529 mrioc->init_cmds.is_waiting = 0;
4530 mrioc->init_cmds.callback = NULL;
4531 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4532 goto out_failed;
4533 }
4534 } while (--pe_timeout);
4535
4536 if (!pe_timeout) {
4537 ioc_err(mrioc, "port enable timed out\n");
4538 mpi3mr_check_rh_fault_ioc(mrioc,
4539 MPI3MR_RESET_FROM_PE_TIMEOUT);
4540 mrioc->init_cmds.is_waiting = 0;
4541 mrioc->init_cmds.callback = NULL;
4542 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4543 goto out_failed;
4544 } else if (mrioc->scan_failed) {
4545 ioc_err(mrioc,
4546 "port enable failed with status=0x%04x\n",
4547 mrioc->scan_failed);
4548 } else
4549 ioc_info(mrioc, "port enable completed successfully\n");
4550
4551 ioc_info(mrioc, "controller %s completed successfully\n",
4552 (is_resume)?"resume":"re-initialization");
4553 return retval;
4554 out_failed:
4555 if (retry < 2) {
4556 retry++;
4557 ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
4558 (is_resume)?"resume":"re-initialization", retry);
4559 mpi3mr_memset_buffers(mrioc);
4560 goto retry_init;
4561 }
4562 retval = -1;
4563 out_failed_noretry:
4564 ioc_err(mrioc, "controller %s is failed\n",
4565 (is_resume)?"resume":"re-initialization");
4566 mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
4567 MPI3MR_RESET_FROM_CTLR_CLEANUP);
4568 mrioc->unrecoverable = 1;
4569 return retval;
4570 }
4571
4572 /**
4573 * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
4574 * segments
4575 * @mrioc: Adapter instance reference
4576 * @qidx: Operational reply queue index
4577 *
4578 * Return: Nothing.
4579 */
mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4580 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4581 {
4582 struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
4583 struct segments *segments;
4584 int i, size;
4585
4586 if (!op_reply_q->q_segments)
4587 return;
4588
4589 size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
4590 segments = op_reply_q->q_segments;
4591 for (i = 0; i < op_reply_q->num_segments; i++)
4592 memset(segments[i].segment, 0, size);
4593 }
4594
4595 /**
4596 * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
4597 * segments
4598 * @mrioc: Adapter instance reference
4599 * @qidx: Operational request queue index
4600 *
4601 * Return: Nothing.
4602 */
mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc * mrioc,u16 qidx)4603 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
4604 {
4605 struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
4606 struct segments *segments;
4607 int i, size;
4608
4609 if (!op_req_q->q_segments)
4610 return;
4611
4612 size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
4613 segments = op_req_q->q_segments;
4614 for (i = 0; i < op_req_q->num_segments; i++)
4615 memset(segments[i].segment, 0, size);
4616 }
4617
4618 /**
4619 * mpi3mr_memset_buffers - memset memory for a controller
4620 * @mrioc: Adapter instance reference
4621 *
4622 * clear all the memory allocated for a controller, typically
4623 * called post reset to reuse the memory allocated during the
4624 * controller init.
4625 *
4626 * Return: Nothing.
4627 */
mpi3mr_memset_buffers(struct mpi3mr_ioc * mrioc)4628 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
4629 {
4630 u16 i;
4631 struct mpi3mr_throttle_group_info *tg;
4632
4633 mrioc->change_count = 0;
4634 mrioc->active_poll_qcount = 0;
4635 mrioc->default_qcount = 0;
4636 if (mrioc->admin_req_base)
4637 memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
4638 if (mrioc->admin_reply_base)
4639 memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
4640 atomic_set(&mrioc->admin_reply_q_in_use, 0);
4641
4642 if (mrioc->init_cmds.reply) {
4643 memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
4644 memset(mrioc->bsg_cmds.reply, 0,
4645 sizeof(*mrioc->bsg_cmds.reply));
4646 memset(mrioc->host_tm_cmds.reply, 0,
4647 sizeof(*mrioc->host_tm_cmds.reply));
4648 memset(mrioc->pel_cmds.reply, 0,
4649 sizeof(*mrioc->pel_cmds.reply));
4650 memset(mrioc->pel_abort_cmd.reply, 0,
4651 sizeof(*mrioc->pel_abort_cmd.reply));
4652 memset(mrioc->transport_cmds.reply, 0,
4653 sizeof(*mrioc->transport_cmds.reply));
4654 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4655 memset(mrioc->dev_rmhs_cmds[i].reply, 0,
4656 sizeof(*mrioc->dev_rmhs_cmds[i].reply));
4657 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
4658 memset(mrioc->evtack_cmds[i].reply, 0,
4659 sizeof(*mrioc->evtack_cmds[i].reply));
4660 bitmap_clear(mrioc->removepend_bitmap, 0,
4661 mrioc->dev_handle_bitmap_bits);
4662 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
4663 bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
4664 MPI3MR_NUM_EVTACKCMD);
4665 }
4666
4667 for (i = 0; i < mrioc->num_queues; i++) {
4668 mrioc->op_reply_qinfo[i].qid = 0;
4669 mrioc->op_reply_qinfo[i].ci = 0;
4670 mrioc->op_reply_qinfo[i].num_replies = 0;
4671 mrioc->op_reply_qinfo[i].ephase = 0;
4672 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
4673 atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
4674 mpi3mr_memset_op_reply_q_buffers(mrioc, i);
4675
4676 mrioc->req_qinfo[i].ci = 0;
4677 mrioc->req_qinfo[i].pi = 0;
4678 mrioc->req_qinfo[i].num_requests = 0;
4679 mrioc->req_qinfo[i].qid = 0;
4680 mrioc->req_qinfo[i].reply_qid = 0;
4681 spin_lock_init(&mrioc->req_qinfo[i].q_lock);
4682 mpi3mr_memset_op_req_q_buffers(mrioc, i);
4683 }
4684
4685 atomic_set(&mrioc->pend_large_data_sz, 0);
4686 if (mrioc->throttle_groups) {
4687 tg = mrioc->throttle_groups;
4688 for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
4689 tg->id = 0;
4690 tg->fw_qd = 0;
4691 tg->modified_qd = 0;
4692 tg->io_divert = 0;
4693 tg->need_qd_reduction = 0;
4694 tg->high = 0;
4695 tg->low = 0;
4696 tg->qd_reduction = 0;
4697 atomic_set(&tg->pend_large_data_sz, 0);
4698 }
4699 }
4700 }
4701
4702 /**
4703 * mpi3mr_free_mem - Free memory allocated for a controller
4704 * @mrioc: Adapter instance reference
4705 *
4706 * Free all the memory allocated for a controller.
4707 *
4708 * Return: Nothing.
4709 */
mpi3mr_free_mem(struct mpi3mr_ioc * mrioc)4710 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
4711 {
4712 u16 i;
4713 struct mpi3mr_intr_info *intr_info;
4714 struct diag_buffer_desc *diag_buffer;
4715
4716 mpi3mr_free_enclosure_list(mrioc);
4717 mpi3mr_free_ioctl_dma_memory(mrioc);
4718
4719 if (mrioc->sense_buf_pool) {
4720 if (mrioc->sense_buf)
4721 dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
4722 mrioc->sense_buf_dma);
4723 dma_pool_destroy(mrioc->sense_buf_pool);
4724 mrioc->sense_buf = NULL;
4725 mrioc->sense_buf_pool = NULL;
4726 }
4727 if (mrioc->sense_buf_q_pool) {
4728 if (mrioc->sense_buf_q)
4729 dma_pool_free(mrioc->sense_buf_q_pool,
4730 mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
4731 dma_pool_destroy(mrioc->sense_buf_q_pool);
4732 mrioc->sense_buf_q = NULL;
4733 mrioc->sense_buf_q_pool = NULL;
4734 }
4735
4736 if (mrioc->reply_buf_pool) {
4737 if (mrioc->reply_buf)
4738 dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
4739 mrioc->reply_buf_dma);
4740 dma_pool_destroy(mrioc->reply_buf_pool);
4741 mrioc->reply_buf = NULL;
4742 mrioc->reply_buf_pool = NULL;
4743 }
4744 if (mrioc->reply_free_q_pool) {
4745 if (mrioc->reply_free_q)
4746 dma_pool_free(mrioc->reply_free_q_pool,
4747 mrioc->reply_free_q, mrioc->reply_free_q_dma);
4748 dma_pool_destroy(mrioc->reply_free_q_pool);
4749 mrioc->reply_free_q = NULL;
4750 mrioc->reply_free_q_pool = NULL;
4751 }
4752
4753 for (i = 0; i < mrioc->num_op_req_q; i++)
4754 mpi3mr_free_op_req_q_segments(mrioc, i);
4755
4756 for (i = 0; i < mrioc->num_op_reply_q; i++)
4757 mpi3mr_free_op_reply_q_segments(mrioc, i);
4758
4759 for (i = 0; i < mrioc->intr_info_count; i++) {
4760 intr_info = mrioc->intr_info + i;
4761 intr_info->op_reply_q = NULL;
4762 }
4763
4764 kfree(mrioc->req_qinfo);
4765 mrioc->req_qinfo = NULL;
4766 mrioc->num_op_req_q = 0;
4767
4768 kfree(mrioc->op_reply_qinfo);
4769 mrioc->op_reply_qinfo = NULL;
4770 mrioc->num_op_reply_q = 0;
4771
4772 kfree(mrioc->init_cmds.reply);
4773 mrioc->init_cmds.reply = NULL;
4774
4775 kfree(mrioc->bsg_cmds.reply);
4776 mrioc->bsg_cmds.reply = NULL;
4777
4778 kfree(mrioc->host_tm_cmds.reply);
4779 mrioc->host_tm_cmds.reply = NULL;
4780
4781 kfree(mrioc->pel_cmds.reply);
4782 mrioc->pel_cmds.reply = NULL;
4783
4784 kfree(mrioc->pel_abort_cmd.reply);
4785 mrioc->pel_abort_cmd.reply = NULL;
4786
4787 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
4788 kfree(mrioc->evtack_cmds[i].reply);
4789 mrioc->evtack_cmds[i].reply = NULL;
4790 }
4791
4792 bitmap_free(mrioc->removepend_bitmap);
4793 mrioc->removepend_bitmap = NULL;
4794
4795 bitmap_free(mrioc->devrem_bitmap);
4796 mrioc->devrem_bitmap = NULL;
4797
4798 bitmap_free(mrioc->evtack_cmds_bitmap);
4799 mrioc->evtack_cmds_bitmap = NULL;
4800
4801 bitmap_free(mrioc->chain_bitmap);
4802 mrioc->chain_bitmap = NULL;
4803
4804 kfree(mrioc->transport_cmds.reply);
4805 mrioc->transport_cmds.reply = NULL;
4806
4807 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4808 kfree(mrioc->dev_rmhs_cmds[i].reply);
4809 mrioc->dev_rmhs_cmds[i].reply = NULL;
4810 }
4811
4812 if (mrioc->chain_buf_pool) {
4813 for (i = 0; i < mrioc->chain_buf_count; i++) {
4814 if (mrioc->chain_sgl_list[i].addr) {
4815 dma_pool_free(mrioc->chain_buf_pool,
4816 mrioc->chain_sgl_list[i].addr,
4817 mrioc->chain_sgl_list[i].dma_addr);
4818 mrioc->chain_sgl_list[i].addr = NULL;
4819 }
4820 }
4821 dma_pool_destroy(mrioc->chain_buf_pool);
4822 mrioc->chain_buf_pool = NULL;
4823 }
4824
4825 kfree(mrioc->chain_sgl_list);
4826 mrioc->chain_sgl_list = NULL;
4827
4828 if (mrioc->admin_reply_base) {
4829 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
4830 mrioc->admin_reply_base, mrioc->admin_reply_dma);
4831 mrioc->admin_reply_base = NULL;
4832 }
4833 if (mrioc->admin_req_base) {
4834 dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
4835 mrioc->admin_req_base, mrioc->admin_req_dma);
4836 mrioc->admin_req_base = NULL;
4837 }
4838
4839 if (mrioc->pel_seqnum_virt) {
4840 dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
4841 mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
4842 mrioc->pel_seqnum_virt = NULL;
4843 }
4844
4845 for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
4846 diag_buffer = &mrioc->diag_buffers[i];
4847 if (diag_buffer->addr) {
4848 dma_free_coherent(&mrioc->pdev->dev,
4849 diag_buffer->size, diag_buffer->addr,
4850 diag_buffer->dma_addr);
4851 diag_buffer->addr = NULL;
4852 diag_buffer->size = 0;
4853 diag_buffer->type = 0;
4854 diag_buffer->status = 0;
4855 }
4856 }
4857
4858 kfree(mrioc->throttle_groups);
4859 mrioc->throttle_groups = NULL;
4860
4861 kfree(mrioc->logdata_buf);
4862 mrioc->logdata_buf = NULL;
4863
4864 }
4865
4866 /**
4867 * mpi3mr_issue_ioc_shutdown - shutdown controller
4868 * @mrioc: Adapter instance reference
4869 *
4870 * Send shutodwn notification to the controller and wait for the
4871 * shutdown_timeout for it to be completed.
4872 *
4873 * Return: Nothing.
4874 */
mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc * mrioc)4875 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
4876 {
4877 u32 ioc_config, ioc_status;
4878 u8 retval = 1;
4879 u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
4880
4881 ioc_info(mrioc, "Issuing shutdown Notification\n");
4882 if (mrioc->unrecoverable) {
4883 ioc_warn(mrioc,
4884 "IOC is unrecoverable shutdown is not issued\n");
4885 return;
4886 }
4887 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4888 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4889 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
4890 ioc_info(mrioc, "shutdown already in progress\n");
4891 return;
4892 }
4893
4894 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4895 ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
4896 ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
4897
4898 writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
4899
4900 if (mrioc->facts.shutdown_timeout)
4901 timeout = mrioc->facts.shutdown_timeout * 10;
4902
4903 do {
4904 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4905 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4906 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
4907 retval = 0;
4908 break;
4909 }
4910 msleep(100);
4911 } while (--timeout);
4912
4913 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4914 ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
4915
4916 if (retval) {
4917 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
4918 == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
4919 ioc_warn(mrioc,
4920 "shutdown still in progress after timeout\n");
4921 }
4922
4923 ioc_info(mrioc,
4924 "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
4925 (!retval) ? "successful" : "failed", ioc_status,
4926 ioc_config);
4927 }
4928
4929 /**
4930 * mpi3mr_cleanup_ioc - Cleanup controller
4931 * @mrioc: Adapter instance reference
4932 *
4933 * controller cleanup handler, Message unit reset or soft reset
4934 * and shutdown notification is issued to the controller.
4935 *
4936 * Return: Nothing.
4937 */
mpi3mr_cleanup_ioc(struct mpi3mr_ioc * mrioc)4938 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
4939 {
4940 enum mpi3mr_iocstate ioc_state;
4941
4942 dprint_exit(mrioc, "cleaning up the controller\n");
4943 mpi3mr_ioc_disable_intr(mrioc);
4944
4945 ioc_state = mpi3mr_get_iocstate(mrioc);
4946
4947 if (!mrioc->unrecoverable && !mrioc->reset_in_progress &&
4948 !mrioc->pci_err_recovery &&
4949 (ioc_state == MRIOC_STATE_READY)) {
4950 if (mpi3mr_issue_and_process_mur(mrioc,
4951 MPI3MR_RESET_FROM_CTLR_CLEANUP))
4952 mpi3mr_issue_reset(mrioc,
4953 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
4954 MPI3MR_RESET_FROM_MUR_FAILURE);
4955 mpi3mr_issue_ioc_shutdown(mrioc);
4956 }
4957 dprint_exit(mrioc, "controller cleanup completed\n");
4958 }
4959
4960 /**
4961 * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
4962 * @mrioc: Adapter instance reference
4963 * @cmdptr: Internal command tracker
4964 *
4965 * Complete an internal driver commands with state indicating it
4966 * is completed due to reset.
4967 *
4968 * Return: Nothing.
4969 */
mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * cmdptr)4970 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
4971 struct mpi3mr_drv_cmd *cmdptr)
4972 {
4973 if (cmdptr->state & MPI3MR_CMD_PENDING) {
4974 cmdptr->state |= MPI3MR_CMD_RESET;
4975 cmdptr->state &= ~MPI3MR_CMD_PENDING;
4976 if (cmdptr->is_waiting) {
4977 complete(&cmdptr->done);
4978 cmdptr->is_waiting = 0;
4979 } else if (cmdptr->callback)
4980 cmdptr->callback(mrioc, cmdptr);
4981 }
4982 }
4983
4984 /**
4985 * mpi3mr_flush_drv_cmds - Flush internaldriver commands
4986 * @mrioc: Adapter instance reference
4987 *
4988 * Flush all internal driver commands post reset
4989 *
4990 * Return: Nothing.
4991 */
mpi3mr_flush_drv_cmds(struct mpi3mr_ioc * mrioc)4992 void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
4993 {
4994 struct mpi3mr_drv_cmd *cmdptr;
4995 u8 i;
4996
4997 cmdptr = &mrioc->init_cmds;
4998 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4999
5000 cmdptr = &mrioc->cfg_cmds;
5001 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5002
5003 cmdptr = &mrioc->bsg_cmds;
5004 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5005 cmdptr = &mrioc->host_tm_cmds;
5006 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5007
5008 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
5009 cmdptr = &mrioc->dev_rmhs_cmds[i];
5010 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5011 }
5012
5013 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
5014 cmdptr = &mrioc->evtack_cmds[i];
5015 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5016 }
5017
5018 cmdptr = &mrioc->pel_cmds;
5019 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5020
5021 cmdptr = &mrioc->pel_abort_cmd;
5022 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5023
5024 cmdptr = &mrioc->transport_cmds;
5025 mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
5026 }
5027
5028 /**
5029 * mpi3mr_pel_wait_post - Issue PEL Wait
5030 * @mrioc: Adapter instance reference
5031 * @drv_cmd: Internal command tracker
5032 *
5033 * Issue PEL Wait MPI request through admin queue and return.
5034 *
5035 * Return: Nothing.
5036 */
mpi3mr_pel_wait_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5037 static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
5038 struct mpi3mr_drv_cmd *drv_cmd)
5039 {
5040 struct mpi3_pel_req_action_wait pel_wait;
5041
5042 mrioc->pel_abort_requested = false;
5043
5044 memset(&pel_wait, 0, sizeof(pel_wait));
5045 drv_cmd->state = MPI3MR_CMD_PENDING;
5046 drv_cmd->is_waiting = 0;
5047 drv_cmd->callback = mpi3mr_pel_wait_complete;
5048 drv_cmd->ioc_status = 0;
5049 drv_cmd->ioc_loginfo = 0;
5050 pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5051 pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5052 pel_wait.action = MPI3_PEL_ACTION_WAIT;
5053 pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
5054 pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
5055 pel_wait.class = cpu_to_le16(mrioc->pel_class);
5056 pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
5057 dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
5058 mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
5059
5060 if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
5061 dprint_bsg_err(mrioc,
5062 "Issuing PELWait: Admin post failed\n");
5063 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5064 drv_cmd->callback = NULL;
5065 drv_cmd->retry_count = 0;
5066 mrioc->pel_enabled = false;
5067 }
5068 }
5069
5070 /**
5071 * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
5072 * @mrioc: Adapter instance reference
5073 * @drv_cmd: Internal command tracker
5074 *
5075 * Issue PEL get sequence number MPI request through admin queue
5076 * and return.
5077 *
5078 * Return: 0 on success, non-zero on failure.
5079 */
mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5080 int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
5081 struct mpi3mr_drv_cmd *drv_cmd)
5082 {
5083 struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
5084 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5085 int retval = 0;
5086
5087 memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
5088 mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
5089 mrioc->pel_cmds.is_waiting = 0;
5090 mrioc->pel_cmds.ioc_status = 0;
5091 mrioc->pel_cmds.ioc_loginfo = 0;
5092 mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
5093 pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
5094 pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
5095 pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
5096 mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
5097 mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
5098
5099 retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
5100 sizeof(pel_getseq_req), 0);
5101 if (retval) {
5102 if (drv_cmd) {
5103 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5104 drv_cmd->callback = NULL;
5105 drv_cmd->retry_count = 0;
5106 }
5107 mrioc->pel_enabled = false;
5108 }
5109
5110 return retval;
5111 }
5112
5113 /**
5114 * mpi3mr_pel_wait_complete - PELWait Completion callback
5115 * @mrioc: Adapter instance reference
5116 * @drv_cmd: Internal command tracker
5117 *
5118 * This is a callback handler for the PELWait request and
5119 * firmware completes a PELWait request when it is aborted or a
5120 * new PEL entry is available. This sends AEN to the application
5121 * and if the PELwait completion is not due to PELAbort then
5122 * this will send a request for new PEL Sequence number
5123 *
5124 * Return: Nothing.
5125 */
mpi3mr_pel_wait_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5126 static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
5127 struct mpi3mr_drv_cmd *drv_cmd)
5128 {
5129 struct mpi3_pel_reply *pel_reply = NULL;
5130 u16 ioc_status, pe_log_status;
5131 bool do_retry = false;
5132
5133 if (drv_cmd->state & MPI3MR_CMD_RESET)
5134 goto cleanup_drv_cmd;
5135
5136 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5137 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5138 ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
5139 __func__, ioc_status, drv_cmd->ioc_loginfo);
5140 dprint_bsg_err(mrioc,
5141 "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5142 ioc_status, drv_cmd->ioc_loginfo);
5143 do_retry = true;
5144 }
5145
5146 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5147 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5148
5149 if (!pel_reply) {
5150 dprint_bsg_err(mrioc,
5151 "pel_wait: failed due to no reply\n");
5152 goto out_failed;
5153 }
5154
5155 pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
5156 if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
5157 (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
5158 ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
5159 __func__, pe_log_status);
5160 dprint_bsg_err(mrioc,
5161 "pel_wait: failed due to pel_log_status(0x%04x)\n",
5162 pe_log_status);
5163 do_retry = true;
5164 }
5165
5166 if (do_retry) {
5167 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5168 drv_cmd->retry_count++;
5169 dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
5170 drv_cmd->retry_count);
5171 mpi3mr_pel_wait_post(mrioc, drv_cmd);
5172 return;
5173 }
5174 dprint_bsg_err(mrioc,
5175 "pel_wait: failed after all retries(%d)\n",
5176 drv_cmd->retry_count);
5177 goto out_failed;
5178 }
5179 atomic64_inc(&event_counter);
5180 if (!mrioc->pel_abort_requested) {
5181 mrioc->pel_cmds.retry_count = 0;
5182 mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
5183 }
5184
5185 return;
5186 out_failed:
5187 mrioc->pel_enabled = false;
5188 cleanup_drv_cmd:
5189 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5190 drv_cmd->callback = NULL;
5191 drv_cmd->retry_count = 0;
5192 }
5193
5194 /**
5195 * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
5196 * @mrioc: Adapter instance reference
5197 * @drv_cmd: Internal command tracker
5198 *
5199 * This is a callback handler for the PEL get sequence number
5200 * request and a new PEL wait request will be issued to the
5201 * firmware from this
5202 *
5203 * Return: Nothing.
5204 */
mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc * mrioc,struct mpi3mr_drv_cmd * drv_cmd)5205 void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
5206 struct mpi3mr_drv_cmd *drv_cmd)
5207 {
5208 struct mpi3_pel_reply *pel_reply = NULL;
5209 struct mpi3_pel_seq *pel_seqnum_virt;
5210 u16 ioc_status;
5211 bool do_retry = false;
5212
5213 pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
5214
5215 if (drv_cmd->state & MPI3MR_CMD_RESET)
5216 goto cleanup_drv_cmd;
5217
5218 ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5219 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5220 dprint_bsg_err(mrioc,
5221 "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
5222 ioc_status, drv_cmd->ioc_loginfo);
5223 do_retry = true;
5224 }
5225
5226 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
5227 pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
5228 if (!pel_reply) {
5229 dprint_bsg_err(mrioc,
5230 "pel_get_seqnum: failed due to no reply\n");
5231 goto out_failed;
5232 }
5233
5234 if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
5235 dprint_bsg_err(mrioc,
5236 "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
5237 le16_to_cpu(pel_reply->pe_log_status));
5238 do_retry = true;
5239 }
5240
5241 if (do_retry) {
5242 if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
5243 drv_cmd->retry_count++;
5244 dprint_bsg_err(mrioc,
5245 "pel_get_seqnum: retrying(%d)\n",
5246 drv_cmd->retry_count);
5247 mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
5248 return;
5249 }
5250
5251 dprint_bsg_err(mrioc,
5252 "pel_get_seqnum: failed after all retries(%d)\n",
5253 drv_cmd->retry_count);
5254 goto out_failed;
5255 }
5256 mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
5257 drv_cmd->retry_count = 0;
5258 mpi3mr_pel_wait_post(mrioc, drv_cmd);
5259
5260 return;
5261 out_failed:
5262 mrioc->pel_enabled = false;
5263 cleanup_drv_cmd:
5264 drv_cmd->state = MPI3MR_CMD_NOTUSED;
5265 drv_cmd->callback = NULL;
5266 drv_cmd->retry_count = 0;
5267 }
5268
5269 /**
5270 * mpi3mr_check_op_admin_proc -
5271 * @mrioc: Adapter instance reference
5272 *
5273 * Check if any of the operation reply queues
5274 * or the admin reply queue are currently in use.
5275 * If any queue is in use, this function waits for
5276 * a maximum of 10 seconds for them to become available.
5277 *
5278 * Return: 0 on success, non-zero on failure.
5279 */
mpi3mr_check_op_admin_proc(struct mpi3mr_ioc * mrioc)5280 static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
5281 {
5282
5283 u16 timeout = 10 * 10;
5284 u16 elapsed_time = 0;
5285 bool op_admin_in_use = false;
5286
5287 do {
5288 op_admin_in_use = false;
5289
5290 /* Check admin_reply queue first to exit early */
5291 if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
5292 op_admin_in_use = true;
5293 else {
5294 /* Check op_reply queues */
5295 int i;
5296
5297 for (i = 0; i < mrioc->num_queues; i++) {
5298 if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
5299 op_admin_in_use = true;
5300 break;
5301 }
5302 }
5303 }
5304
5305 if (!op_admin_in_use)
5306 break;
5307
5308 msleep(100);
5309
5310 } while (++elapsed_time < timeout);
5311
5312 if (op_admin_in_use)
5313 return 1;
5314
5315 return 0;
5316 }
5317
5318 /**
5319 * mpi3mr_soft_reset_handler - Reset the controller
5320 * @mrioc: Adapter instance reference
5321 * @reset_reason: Reset reason code
5322 * @snapdump: Flag to generate snapdump in firmware or not
5323 *
5324 * This is an handler for recovering controller by issuing soft
5325 * reset are diag fault reset. This is a blocking function and
5326 * when one reset is executed if any other resets they will be
5327 * blocked. All BSG requests will be blocked during the reset. If
5328 * controller reset is successful then the controller will be
5329 * reinitalized, otherwise the controller will be marked as not
5330 * recoverable
5331 *
5332 * In snapdump bit is set, the controller is issued with diag
5333 * fault reset so that the firmware can create a snap dump and
5334 * post that the firmware will result in F000 fault and the
5335 * driver will issue soft reset to recover from that.
5336 *
5337 * Return: 0 on success, non-zero on failure.
5338 */
mpi3mr_soft_reset_handler(struct mpi3mr_ioc * mrioc,u16 reset_reason,u8 snapdump)5339 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
5340 u16 reset_reason, u8 snapdump)
5341 {
5342 int retval = 0, i;
5343 unsigned long flags;
5344 u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
5345 union mpi3mr_trigger_data trigger_data;
5346
5347 /* Block the reset handler until diag save in progress*/
5348 dprint_reset(mrioc,
5349 "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
5350 mrioc->diagsave_timeout);
5351 while (mrioc->diagsave_timeout)
5352 ssleep(1);
5353 /*
5354 * Block new resets until the currently executing one is finished and
5355 * return the status of the existing reset for all blocked resets
5356 */
5357 dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
5358 if (!mutex_trylock(&mrioc->reset_mutex)) {
5359 ioc_info(mrioc,
5360 "controller reset triggered by %s is blocked due to another reset in progress\n",
5361 mpi3mr_reset_rc_name(reset_reason));
5362 do {
5363 ssleep(1);
5364 } while (mrioc->reset_in_progress == 1);
5365 ioc_info(mrioc,
5366 "returning previous reset result(%d) for the reset triggered by %s\n",
5367 mrioc->prev_reset_result,
5368 mpi3mr_reset_rc_name(reset_reason));
5369 return mrioc->prev_reset_result;
5370 }
5371 ioc_info(mrioc, "controller reset is triggered by %s\n",
5372 mpi3mr_reset_rc_name(reset_reason));
5373
5374 mrioc->device_refresh_on = 0;
5375 mrioc->reset_in_progress = 1;
5376 mrioc->stop_bsgs = 1;
5377 mrioc->prev_reset_result = -1;
5378 memset(&trigger_data, 0, sizeof(trigger_data));
5379
5380 if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
5381 (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
5382 (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
5383 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5384 MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5385 dprint_reset(mrioc,
5386 "soft_reset_handler: releasing host diagnostic buffers\n");
5387 mpi3mr_release_diag_bufs(mrioc, 0);
5388 for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5389 mrioc->event_masks[i] = -1;
5390
5391 dprint_reset(mrioc, "soft_reset_handler: masking events\n");
5392 mpi3mr_issue_event_notification(mrioc);
5393 }
5394
5395 mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
5396
5397 mpi3mr_ioc_disable_intr(mrioc);
5398 mrioc->io_admin_reset_sync = 1;
5399
5400 if (snapdump) {
5401 mpi3mr_set_diagsave(mrioc);
5402 retval = mpi3mr_issue_reset(mrioc,
5403 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5404 if (!retval) {
5405 trigger_data.fault = (readl(&mrioc->sysif_regs->fault) &
5406 MPI3_SYSIF_FAULT_CODE_MASK);
5407 do {
5408 host_diagnostic =
5409 readl(&mrioc->sysif_regs->host_diagnostic);
5410 if (!(host_diagnostic &
5411 MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
5412 break;
5413 msleep(100);
5414 } while (--timeout);
5415 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5416 MPI3MR_HDB_TRIGGER_TYPE_FAULT, &trigger_data, 0);
5417 }
5418 }
5419
5420 retval = mpi3mr_issue_reset(mrioc,
5421 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
5422 if (retval) {
5423 ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
5424 goto out;
5425 }
5426
5427 retval = mpi3mr_check_op_admin_proc(mrioc);
5428 if (retval) {
5429 ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
5430 "thread still processing replies even after a 10 second\n"
5431 "timeout. Marking the controller as unrecoverable!\n");
5432
5433 goto out;
5434 }
5435
5436 if (mrioc->num_io_throttle_group !=
5437 mrioc->facts.max_io_throttle_group) {
5438 ioc_err(mrioc,
5439 "max io throttle group doesn't match old(%d), new(%d)\n",
5440 mrioc->num_io_throttle_group,
5441 mrioc->facts.max_io_throttle_group);
5442 retval = -EPERM;
5443 goto out;
5444 }
5445
5446 mpi3mr_flush_delayed_cmd_lists(mrioc);
5447 mpi3mr_flush_drv_cmds(mrioc);
5448 bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
5449 bitmap_clear(mrioc->removepend_bitmap, 0,
5450 mrioc->dev_handle_bitmap_bits);
5451 bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
5452 mpi3mr_flush_host_io(mrioc);
5453 mpi3mr_cleanup_fwevt_list(mrioc);
5454 mpi3mr_invalidate_devhandles(mrioc);
5455 mpi3mr_free_enclosure_list(mrioc);
5456
5457 if (mrioc->prepare_for_reset) {
5458 mrioc->prepare_for_reset = 0;
5459 mrioc->prepare_for_reset_timeout_counter = 0;
5460 }
5461 mpi3mr_memset_buffers(mrioc);
5462 mpi3mr_release_diag_bufs(mrioc, 1);
5463 mrioc->fw_release_trigger_active = false;
5464 mrioc->trace_release_trigger_active = false;
5465 mrioc->snapdump_trigger_active = false;
5466 mpi3mr_set_trigger_data_in_all_hdb(mrioc,
5467 MPI3MR_HDB_TRIGGER_TYPE_SOFT_RESET, NULL, 0);
5468
5469 dprint_reset(mrioc,
5470 "soft_reset_handler: reinitializing the controller\n");
5471 retval = mpi3mr_reinit_ioc(mrioc, 0);
5472 if (retval) {
5473 pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
5474 mrioc->name, reset_reason);
5475 goto out;
5476 }
5477 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5478
5479 out:
5480 if (!retval) {
5481 mrioc->diagsave_timeout = 0;
5482 mrioc->reset_in_progress = 0;
5483 mrioc->pel_abort_requested = 0;
5484 if (mrioc->pel_enabled) {
5485 mrioc->pel_cmds.retry_count = 0;
5486 mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
5487 }
5488
5489 mrioc->device_refresh_on = 0;
5490
5491 mrioc->ts_update_counter = 0;
5492 spin_lock_irqsave(&mrioc->watchdog_lock, flags);
5493 if (mrioc->watchdog_work_q)
5494 queue_delayed_work(mrioc->watchdog_work_q,
5495 &mrioc->watchdog_work,
5496 msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
5497 spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
5498 mrioc->stop_bsgs = 0;
5499 if (mrioc->pel_enabled)
5500 atomic64_inc(&event_counter);
5501 } else {
5502 mpi3mr_issue_reset(mrioc,
5503 MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
5504 mrioc->device_refresh_on = 0;
5505 mrioc->unrecoverable = 1;
5506 mrioc->reset_in_progress = 0;
5507 mrioc->stop_bsgs = 0;
5508 retval = -1;
5509 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5510 }
5511 mrioc->prev_reset_result = retval;
5512 mutex_unlock(&mrioc->reset_mutex);
5513 ioc_info(mrioc, "controller reset is %s\n",
5514 ((retval == 0) ? "successful" : "failed"));
5515 return retval;
5516 }
5517
5518 /**
5519 * mpi3mr_post_cfg_req - Issue config requests and wait
5520 * @mrioc: Adapter instance reference
5521 * @cfg_req: Configuration request
5522 * @timeout: Timeout in seconds
5523 * @ioc_status: Pointer to return ioc status
5524 *
5525 * A generic function for posting MPI3 configuration request to
5526 * the firmware. This blocks for the completion of request for
5527 * timeout seconds and if the request times out this function
5528 * faults the controller with proper reason code.
5529 *
5530 * On successful completion of the request this function returns
5531 * appropriate ioc status from the firmware back to the caller.
5532 *
5533 * Return: 0 on success, non-zero on failure.
5534 */
mpi3mr_post_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,int timeout,u16 * ioc_status)5535 static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
5536 struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
5537 {
5538 int retval = 0;
5539
5540 mutex_lock(&mrioc->cfg_cmds.mutex);
5541 if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
5542 retval = -1;
5543 ioc_err(mrioc, "sending config request failed due to command in use\n");
5544 mutex_unlock(&mrioc->cfg_cmds.mutex);
5545 goto out;
5546 }
5547 mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
5548 mrioc->cfg_cmds.is_waiting = 1;
5549 mrioc->cfg_cmds.callback = NULL;
5550 mrioc->cfg_cmds.ioc_status = 0;
5551 mrioc->cfg_cmds.ioc_loginfo = 0;
5552
5553 cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
5554 cfg_req->function = MPI3_FUNCTION_CONFIG;
5555
5556 init_completion(&mrioc->cfg_cmds.done);
5557 dprint_cfg_info(mrioc, "posting config request\n");
5558 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5559 dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
5560 "mpi3_cfg_req");
5561 retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
5562 if (retval) {
5563 ioc_err(mrioc, "posting config request failed\n");
5564 goto out_unlock;
5565 }
5566 wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
5567 if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
5568 mpi3mr_check_rh_fault_ioc(mrioc,
5569 MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
5570 ioc_err(mrioc, "config request timed out\n");
5571 retval = -1;
5572 goto out_unlock;
5573 }
5574 *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
5575 if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
5576 dprint_cfg_err(mrioc,
5577 "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
5578 *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
5579
5580 out_unlock:
5581 mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
5582 mutex_unlock(&mrioc->cfg_cmds.mutex);
5583
5584 out:
5585 return retval;
5586 }
5587
5588 /**
5589 * mpi3mr_process_cfg_req - config page request processor
5590 * @mrioc: Adapter instance reference
5591 * @cfg_req: Configuration request
5592 * @cfg_hdr: Configuration page header
5593 * @timeout: Timeout in seconds
5594 * @ioc_status: Pointer to return ioc status
5595 * @cfg_buf: Memory pointer to copy config page or header
5596 * @cfg_buf_sz: Size of the memory to get config page or header
5597 *
5598 * This is handler for config page read, write and config page
5599 * header read operations.
5600 *
5601 * This function expects the cfg_req to be populated with page
5602 * type, page number, action for the header read and with page
5603 * address for all other operations.
5604 *
5605 * The cfg_hdr can be passed as null for reading required header
5606 * details for read/write pages the cfg_hdr should point valid
5607 * configuration page header.
5608 *
5609 * This allocates dmaable memory based on the size of the config
5610 * buffer and set the SGE of the cfg_req.
5611 *
5612 * For write actions, the config page data has to be passed in
5613 * the cfg_buf and size of the data has to be mentioned in the
5614 * cfg_buf_sz.
5615 *
5616 * For read/header actions, on successful completion of the
5617 * request with successful ioc_status the data will be copied
5618 * into the cfg_buf limited to a minimum of actual page size and
5619 * cfg_buf_sz
5620 *
5621 *
5622 * Return: 0 on success, non-zero on failure.
5623 */
mpi3mr_process_cfg_req(struct mpi3mr_ioc * mrioc,struct mpi3_config_request * cfg_req,struct mpi3_config_page_header * cfg_hdr,int timeout,u16 * ioc_status,void * cfg_buf,u32 cfg_buf_sz)5624 static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
5625 struct mpi3_config_request *cfg_req,
5626 struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
5627 void *cfg_buf, u32 cfg_buf_sz)
5628 {
5629 struct dma_memory_desc mem_desc;
5630 int retval = -1;
5631 u8 invalid_action = 0;
5632 u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
5633
5634 memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
5635
5636 if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
5637 mem_desc.size = sizeof(struct mpi3_config_page_header);
5638 else {
5639 if (!cfg_hdr) {
5640 ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
5641 cfg_req->action, cfg_req->page_type,
5642 cfg_req->page_number);
5643 goto out;
5644 }
5645 switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
5646 case MPI3_CONFIG_PAGEATTR_READ_ONLY:
5647 if (cfg_req->action
5648 != MPI3_CONFIG_ACTION_READ_CURRENT)
5649 invalid_action = 1;
5650 break;
5651 case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
5652 if ((cfg_req->action ==
5653 MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
5654 (cfg_req->action ==
5655 MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
5656 invalid_action = 1;
5657 break;
5658 case MPI3_CONFIG_PAGEATTR_PERSISTENT:
5659 default:
5660 break;
5661 }
5662 if (invalid_action) {
5663 ioc_err(mrioc,
5664 "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
5665 cfg_req->action, cfg_req->page_type,
5666 cfg_req->page_number, cfg_hdr->page_attribute);
5667 goto out;
5668 }
5669 mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
5670 cfg_req->page_length = cfg_hdr->page_length;
5671 cfg_req->page_version = cfg_hdr->page_version;
5672 }
5673
5674 mem_desc.addr = dma_alloc_coherent(&mrioc->pdev->dev,
5675 mem_desc.size, &mem_desc.dma_addr, GFP_KERNEL);
5676
5677 if (!mem_desc.addr)
5678 return retval;
5679
5680 mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
5681 mem_desc.dma_addr);
5682
5683 if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
5684 (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5685 memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
5686 cfg_buf_sz));
5687 dprint_cfg_info(mrioc, "config buffer to be written\n");
5688 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5689 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5690 }
5691
5692 if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
5693 goto out;
5694
5695 retval = 0;
5696 if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
5697 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
5698 (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
5699 memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
5700 cfg_buf_sz));
5701 dprint_cfg_info(mrioc, "config buffer read\n");
5702 if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
5703 dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
5704 }
5705
5706 out:
5707 if (mem_desc.addr) {
5708 dma_free_coherent(&mrioc->pdev->dev, mem_desc.size,
5709 mem_desc.addr, mem_desc.dma_addr);
5710 mem_desc.addr = NULL;
5711 }
5712
5713 return retval;
5714 }
5715
5716 /**
5717 * mpi3mr_cfg_get_dev_pg0 - Read current device page0
5718 * @mrioc: Adapter instance reference
5719 * @ioc_status: Pointer to return ioc status
5720 * @dev_pg0: Pointer to return device page 0
5721 * @pg_sz: Size of the memory allocated to the page pointer
5722 * @form: The form to be used for addressing the page
5723 * @form_spec: Form specific information like device handle
5724 *
5725 * This is handler for config page read for a specific device
5726 * page0. The ioc_status has the controller returned ioc_status.
5727 * This routine doesn't check ioc_status to decide whether the
5728 * page read is success or not and it is the callers
5729 * responsibility.
5730 *
5731 * Return: 0 on success, non-zero on failure.
5732 */
mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_device_page0 * dev_pg0,u16 pg_sz,u32 form,u32 form_spec)5733 int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5734 struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
5735 {
5736 struct mpi3_config_page_header cfg_hdr;
5737 struct mpi3_config_request cfg_req;
5738 u32 page_address;
5739
5740 memset(dev_pg0, 0, pg_sz);
5741 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5742 memset(&cfg_req, 0, sizeof(cfg_req));
5743
5744 cfg_req.function = MPI3_FUNCTION_CONFIG;
5745 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5746 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
5747 cfg_req.page_number = 0;
5748 cfg_req.page_address = 0;
5749
5750 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5751 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5752 ioc_err(mrioc, "device page0 header read failed\n");
5753 goto out_failed;
5754 }
5755 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5756 ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
5757 *ioc_status);
5758 goto out_failed;
5759 }
5760 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5761 page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
5762 (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
5763 cfg_req.page_address = cpu_to_le32(page_address);
5764 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5765 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
5766 ioc_err(mrioc, "device page0 read failed\n");
5767 goto out_failed;
5768 }
5769 return 0;
5770 out_failed:
5771 return -1;
5772 }
5773
5774
5775 /**
5776 * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
5777 * @mrioc: Adapter instance reference
5778 * @ioc_status: Pointer to return ioc status
5779 * @phy_pg0: Pointer to return SAS Phy page 0
5780 * @pg_sz: Size of the memory allocated to the page pointer
5781 * @form: The form to be used for addressing the page
5782 * @form_spec: Form specific information like phy number
5783 *
5784 * This is handler for config page read for a specific SAS Phy
5785 * page0. The ioc_status has the controller returned ioc_status.
5786 * This routine doesn't check ioc_status to decide whether the
5787 * page read is success or not and it is the callers
5788 * responsibility.
5789 *
5790 * Return: 0 on success, non-zero on failure.
5791 */
mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page0 * phy_pg0,u16 pg_sz,u32 form,u32 form_spec)5792 int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5793 struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
5794 u32 form_spec)
5795 {
5796 struct mpi3_config_page_header cfg_hdr;
5797 struct mpi3_config_request cfg_req;
5798 u32 page_address;
5799
5800 memset(phy_pg0, 0, pg_sz);
5801 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5802 memset(&cfg_req, 0, sizeof(cfg_req));
5803
5804 cfg_req.function = MPI3_FUNCTION_CONFIG;
5805 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5806 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5807 cfg_req.page_number = 0;
5808 cfg_req.page_address = 0;
5809
5810 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5811 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5812 ioc_err(mrioc, "sas phy page0 header read failed\n");
5813 goto out_failed;
5814 }
5815 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5816 ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
5817 *ioc_status);
5818 goto out_failed;
5819 }
5820 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5821 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5822 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5823 cfg_req.page_address = cpu_to_le32(page_address);
5824 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5825 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
5826 ioc_err(mrioc, "sas phy page0 read failed\n");
5827 goto out_failed;
5828 }
5829 return 0;
5830 out_failed:
5831 return -1;
5832 }
5833
5834 /**
5835 * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
5836 * @mrioc: Adapter instance reference
5837 * @ioc_status: Pointer to return ioc status
5838 * @phy_pg1: Pointer to return SAS Phy page 1
5839 * @pg_sz: Size of the memory allocated to the page pointer
5840 * @form: The form to be used for addressing the page
5841 * @form_spec: Form specific information like phy number
5842 *
5843 * This is handler for config page read for a specific SAS Phy
5844 * page1. The ioc_status has the controller returned ioc_status.
5845 * This routine doesn't check ioc_status to decide whether the
5846 * page read is success or not and it is the callers
5847 * responsibility.
5848 *
5849 * Return: 0 on success, non-zero on failure.
5850 */
mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_phy_page1 * phy_pg1,u16 pg_sz,u32 form,u32 form_spec)5851 int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5852 struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
5853 u32 form_spec)
5854 {
5855 struct mpi3_config_page_header cfg_hdr;
5856 struct mpi3_config_request cfg_req;
5857 u32 page_address;
5858
5859 memset(phy_pg1, 0, pg_sz);
5860 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5861 memset(&cfg_req, 0, sizeof(cfg_req));
5862
5863 cfg_req.function = MPI3_FUNCTION_CONFIG;
5864 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5865 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
5866 cfg_req.page_number = 1;
5867 cfg_req.page_address = 0;
5868
5869 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5870 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5871 ioc_err(mrioc, "sas phy page1 header read failed\n");
5872 goto out_failed;
5873 }
5874 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5875 ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
5876 *ioc_status);
5877 goto out_failed;
5878 }
5879 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5880 page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
5881 (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
5882 cfg_req.page_address = cpu_to_le32(page_address);
5883 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5884 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
5885 ioc_err(mrioc, "sas phy page1 read failed\n");
5886 goto out_failed;
5887 }
5888 return 0;
5889 out_failed:
5890 return -1;
5891 }
5892
5893
5894 /**
5895 * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
5896 * @mrioc: Adapter instance reference
5897 * @ioc_status: Pointer to return ioc status
5898 * @exp_pg0: Pointer to return SAS Expander page 0
5899 * @pg_sz: Size of the memory allocated to the page pointer
5900 * @form: The form to be used for addressing the page
5901 * @form_spec: Form specific information like device handle
5902 *
5903 * This is handler for config page read for a specific SAS
5904 * Expander page0. The ioc_status has the controller returned
5905 * ioc_status. This routine doesn't check ioc_status to decide
5906 * whether the page read is success or not and it is the callers
5907 * responsibility.
5908 *
5909 * Return: 0 on success, non-zero on failure.
5910 */
mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page0 * exp_pg0,u16 pg_sz,u32 form,u32 form_spec)5911 int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5912 struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
5913 u32 form_spec)
5914 {
5915 struct mpi3_config_page_header cfg_hdr;
5916 struct mpi3_config_request cfg_req;
5917 u32 page_address;
5918
5919 memset(exp_pg0, 0, pg_sz);
5920 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5921 memset(&cfg_req, 0, sizeof(cfg_req));
5922
5923 cfg_req.function = MPI3_FUNCTION_CONFIG;
5924 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5925 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5926 cfg_req.page_number = 0;
5927 cfg_req.page_address = 0;
5928
5929 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5930 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5931 ioc_err(mrioc, "expander page0 header read failed\n");
5932 goto out_failed;
5933 }
5934 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5935 ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
5936 *ioc_status);
5937 goto out_failed;
5938 }
5939 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
5940 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
5941 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
5942 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
5943 cfg_req.page_address = cpu_to_le32(page_address);
5944 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
5945 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
5946 ioc_err(mrioc, "expander page0 read failed\n");
5947 goto out_failed;
5948 }
5949 return 0;
5950 out_failed:
5951 return -1;
5952 }
5953
5954 /**
5955 * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
5956 * @mrioc: Adapter instance reference
5957 * @ioc_status: Pointer to return ioc status
5958 * @exp_pg1: Pointer to return SAS Expander page 1
5959 * @pg_sz: Size of the memory allocated to the page pointer
5960 * @form: The form to be used for addressing the page
5961 * @form_spec: Form specific information like phy number
5962 *
5963 * This is handler for config page read for a specific SAS
5964 * Expander page1. The ioc_status has the controller returned
5965 * ioc_status. This routine doesn't check ioc_status to decide
5966 * whether the page read is success or not and it is the callers
5967 * responsibility.
5968 *
5969 * Return: 0 on success, non-zero on failure.
5970 */
mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_sas_expander_page1 * exp_pg1,u16 pg_sz,u32 form,u32 form_spec)5971 int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
5972 struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
5973 u32 form_spec)
5974 {
5975 struct mpi3_config_page_header cfg_hdr;
5976 struct mpi3_config_request cfg_req;
5977 u32 page_address;
5978
5979 memset(exp_pg1, 0, pg_sz);
5980 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
5981 memset(&cfg_req, 0, sizeof(cfg_req));
5982
5983 cfg_req.function = MPI3_FUNCTION_CONFIG;
5984 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
5985 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
5986 cfg_req.page_number = 1;
5987 cfg_req.page_address = 0;
5988
5989 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
5990 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
5991 ioc_err(mrioc, "expander page1 header read failed\n");
5992 goto out_failed;
5993 }
5994 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
5995 ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
5996 *ioc_status);
5997 goto out_failed;
5998 }
5999 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6000 page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
6001 (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
6002 MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
6003 cfg_req.page_address = cpu_to_le32(page_address);
6004 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6005 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
6006 ioc_err(mrioc, "expander page1 read failed\n");
6007 goto out_failed;
6008 }
6009 return 0;
6010 out_failed:
6011 return -1;
6012 }
6013
6014 /**
6015 * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
6016 * @mrioc: Adapter instance reference
6017 * @ioc_status: Pointer to return ioc status
6018 * @encl_pg0: Pointer to return Enclosure page 0
6019 * @pg_sz: Size of the memory allocated to the page pointer
6020 * @form: The form to be used for addressing the page
6021 * @form_spec: Form specific information like device handle
6022 *
6023 * This is handler for config page read for a specific Enclosure
6024 * page0. The ioc_status has the controller returned ioc_status.
6025 * This routine doesn't check ioc_status to decide whether the
6026 * page read is success or not and it is the callers
6027 * responsibility.
6028 *
6029 * Return: 0 on success, non-zero on failure.
6030 */
mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc * mrioc,u16 * ioc_status,struct mpi3_enclosure_page0 * encl_pg0,u16 pg_sz,u32 form,u32 form_spec)6031 int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
6032 struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
6033 u32 form_spec)
6034 {
6035 struct mpi3_config_page_header cfg_hdr;
6036 struct mpi3_config_request cfg_req;
6037 u32 page_address;
6038
6039 memset(encl_pg0, 0, pg_sz);
6040 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6041 memset(&cfg_req, 0, sizeof(cfg_req));
6042
6043 cfg_req.function = MPI3_FUNCTION_CONFIG;
6044 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6045 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
6046 cfg_req.page_number = 0;
6047 cfg_req.page_address = 0;
6048
6049 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6050 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6051 ioc_err(mrioc, "enclosure page0 header read failed\n");
6052 goto out_failed;
6053 }
6054 if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6055 ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
6056 *ioc_status);
6057 goto out_failed;
6058 }
6059 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6060 page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
6061 (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
6062 cfg_req.page_address = cpu_to_le32(page_address);
6063 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6064 MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
6065 ioc_err(mrioc, "enclosure page0 read failed\n");
6066 goto out_failed;
6067 }
6068 return 0;
6069 out_failed:
6070 return -1;
6071 }
6072
6073
6074 /**
6075 * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
6076 * @mrioc: Adapter instance reference
6077 * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
6078 * @pg_sz: Size of the memory allocated to the page pointer
6079 *
6080 * This is handler for config page read for the SAS IO Unit
6081 * page0. This routine checks ioc_status to decide whether the
6082 * page read is success or not.
6083 *
6084 * Return: 0 on success, non-zero on failure.
6085 */
mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page0 * sas_io_unit_pg0,u16 pg_sz)6086 int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
6087 struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
6088 {
6089 struct mpi3_config_page_header cfg_hdr;
6090 struct mpi3_config_request cfg_req;
6091 u16 ioc_status = 0;
6092
6093 memset(sas_io_unit_pg0, 0, pg_sz);
6094 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6095 memset(&cfg_req, 0, sizeof(cfg_req));
6096
6097 cfg_req.function = MPI3_FUNCTION_CONFIG;
6098 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6099 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6100 cfg_req.page_number = 0;
6101 cfg_req.page_address = 0;
6102
6103 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6104 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6105 ioc_err(mrioc, "sas io unit page0 header read failed\n");
6106 goto out_failed;
6107 }
6108 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6109 ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
6110 ioc_status);
6111 goto out_failed;
6112 }
6113 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6114
6115 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6116 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
6117 ioc_err(mrioc, "sas io unit page0 read failed\n");
6118 goto out_failed;
6119 }
6120 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6121 ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
6122 ioc_status);
6123 goto out_failed;
6124 }
6125 return 0;
6126 out_failed:
6127 return -1;
6128 }
6129
6130 /**
6131 * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
6132 * @mrioc: Adapter instance reference
6133 * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
6134 * @pg_sz: Size of the memory allocated to the page pointer
6135 *
6136 * This is handler for config page read for the SAS IO Unit
6137 * page1. This routine checks ioc_status to decide whether the
6138 * page read is success or not.
6139 *
6140 * Return: 0 on success, non-zero on failure.
6141 */
mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6142 int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6143 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6144 {
6145 struct mpi3_config_page_header cfg_hdr;
6146 struct mpi3_config_request cfg_req;
6147 u16 ioc_status = 0;
6148
6149 memset(sas_io_unit_pg1, 0, pg_sz);
6150 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6151 memset(&cfg_req, 0, sizeof(cfg_req));
6152
6153 cfg_req.function = MPI3_FUNCTION_CONFIG;
6154 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6155 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6156 cfg_req.page_number = 1;
6157 cfg_req.page_address = 0;
6158
6159 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6160 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6161 ioc_err(mrioc, "sas io unit page1 header read failed\n");
6162 goto out_failed;
6163 }
6164 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6165 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6166 ioc_status);
6167 goto out_failed;
6168 }
6169 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6170
6171 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6172 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6173 ioc_err(mrioc, "sas io unit page1 read failed\n");
6174 goto out_failed;
6175 }
6176 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6177 ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
6178 ioc_status);
6179 goto out_failed;
6180 }
6181 return 0;
6182 out_failed:
6183 return -1;
6184 }
6185
6186 /**
6187 * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
6188 * @mrioc: Adapter instance reference
6189 * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
6190 * @pg_sz: Size of the memory allocated to the page pointer
6191 *
6192 * This is handler for config page write for the SAS IO Unit
6193 * page1. This routine checks ioc_status to decide whether the
6194 * page read is success or not. This will modify both current
6195 * and persistent page.
6196 *
6197 * Return: 0 on success, non-zero on failure.
6198 */
mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_sas_io_unit_page1 * sas_io_unit_pg1,u16 pg_sz)6199 int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
6200 struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
6201 {
6202 struct mpi3_config_page_header cfg_hdr;
6203 struct mpi3_config_request cfg_req;
6204 u16 ioc_status = 0;
6205
6206 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6207 memset(&cfg_req, 0, sizeof(cfg_req));
6208
6209 cfg_req.function = MPI3_FUNCTION_CONFIG;
6210 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6211 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
6212 cfg_req.page_number = 1;
6213 cfg_req.page_address = 0;
6214
6215 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6216 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6217 ioc_err(mrioc, "sas io unit page1 header read failed\n");
6218 goto out_failed;
6219 }
6220 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6221 ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
6222 ioc_status);
6223 goto out_failed;
6224 }
6225 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
6226
6227 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6228 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6229 ioc_err(mrioc, "sas io unit page1 write current failed\n");
6230 goto out_failed;
6231 }
6232 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6233 ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
6234 ioc_status);
6235 goto out_failed;
6236 }
6237
6238 cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
6239
6240 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6241 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
6242 ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
6243 goto out_failed;
6244 }
6245 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6246 ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
6247 ioc_status);
6248 goto out_failed;
6249 }
6250 return 0;
6251 out_failed:
6252 return -1;
6253 }
6254
6255 /**
6256 * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
6257 * @mrioc: Adapter instance reference
6258 * @driver_pg1: Pointer to return Driver page 1
6259 * @pg_sz: Size of the memory allocated to the page pointer
6260 *
6261 * This is handler for config page read for the Driver page1.
6262 * This routine checks ioc_status to decide whether the page
6263 * read is success or not.
6264 *
6265 * Return: 0 on success, non-zero on failure.
6266 */
mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page1 * driver_pg1,u16 pg_sz)6267 int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
6268 struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
6269 {
6270 struct mpi3_config_page_header cfg_hdr;
6271 struct mpi3_config_request cfg_req;
6272 u16 ioc_status = 0;
6273
6274 memset(driver_pg1, 0, pg_sz);
6275 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6276 memset(&cfg_req, 0, sizeof(cfg_req));
6277
6278 cfg_req.function = MPI3_FUNCTION_CONFIG;
6279 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6280 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6281 cfg_req.page_number = 1;
6282 cfg_req.page_address = 0;
6283
6284 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6285 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6286 ioc_err(mrioc, "driver page1 header read failed\n");
6287 goto out_failed;
6288 }
6289 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6290 ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
6291 ioc_status);
6292 goto out_failed;
6293 }
6294 cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
6295
6296 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6297 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
6298 ioc_err(mrioc, "driver page1 read failed\n");
6299 goto out_failed;
6300 }
6301 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6302 ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
6303 ioc_status);
6304 goto out_failed;
6305 }
6306 return 0;
6307 out_failed:
6308 return -1;
6309 }
6310
6311 /**
6312 * mpi3mr_cfg_get_driver_pg2 - Read current driver page2
6313 * @mrioc: Adapter instance reference
6314 * @driver_pg2: Pointer to return driver page 2
6315 * @pg_sz: Size of the memory allocated to the page pointer
6316 * @page_action: Page action
6317 *
6318 * This is handler for config page read for the driver page2.
6319 * This routine checks ioc_status to decide whether the page
6320 * read is success or not.
6321 *
6322 * Return: 0 on success, non-zero on failure.
6323 */
mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc * mrioc,struct mpi3_driver_page2 * driver_pg2,u16 pg_sz,u8 page_action)6324 int mpi3mr_cfg_get_driver_pg2(struct mpi3mr_ioc *mrioc,
6325 struct mpi3_driver_page2 *driver_pg2, u16 pg_sz, u8 page_action)
6326 {
6327 struct mpi3_config_page_header cfg_hdr;
6328 struct mpi3_config_request cfg_req;
6329 u16 ioc_status = 0;
6330
6331 memset(driver_pg2, 0, pg_sz);
6332 memset(&cfg_hdr, 0, sizeof(cfg_hdr));
6333 memset(&cfg_req, 0, sizeof(cfg_req));
6334
6335 cfg_req.function = MPI3_FUNCTION_CONFIG;
6336 cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
6337 cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
6338 cfg_req.page_number = 2;
6339 cfg_req.page_address = 0;
6340 cfg_req.page_version = MPI3_DRIVER2_PAGEVERSION;
6341
6342 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
6343 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
6344 ioc_err(mrioc, "driver page2 header read failed\n");
6345 goto out_failed;
6346 }
6347 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6348 ioc_err(mrioc, "driver page2 header read failed with\n"
6349 "ioc_status(0x%04x)\n",
6350 ioc_status);
6351 goto out_failed;
6352 }
6353 cfg_req.action = page_action;
6354
6355 if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
6356 MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg2, pg_sz)) {
6357 ioc_err(mrioc, "driver page2 read failed\n");
6358 goto out_failed;
6359 }
6360 if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
6361 ioc_err(mrioc, "driver page2 read failed with\n"
6362 "ioc_status(0x%04x)\n",
6363 ioc_status);
6364 goto out_failed;
6365 }
6366 return 0;
6367 out_failed:
6368 return -1;
6369 }
6370
6371