1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90 * Global Data
91 */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static DEFINE_SPINLOCK(ipr_driver_lock);
103
104 /* This table describes the differences between DMA controller chips */
105 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
106 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
107 .mailbox = 0x0042C,
108 .max_cmds = 100,
109 .cache_line_size = 0x20,
110 .clear_isr = 1,
111 .iopoll_weight = 0,
112 {
113 .set_interrupt_mask_reg = 0x0022C,
114 .clr_interrupt_mask_reg = 0x00230,
115 .clr_interrupt_mask_reg32 = 0x00230,
116 .sense_interrupt_mask_reg = 0x0022C,
117 .sense_interrupt_mask_reg32 = 0x0022C,
118 .clr_interrupt_reg = 0x00228,
119 .clr_interrupt_reg32 = 0x00228,
120 .sense_interrupt_reg = 0x00224,
121 .sense_interrupt_reg32 = 0x00224,
122 .ioarrin_reg = 0x00404,
123 .sense_uproc_interrupt_reg = 0x00214,
124 .sense_uproc_interrupt_reg32 = 0x00214,
125 .set_uproc_interrupt_reg = 0x00214,
126 .set_uproc_interrupt_reg32 = 0x00214,
127 .clr_uproc_interrupt_reg = 0x00218,
128 .clr_uproc_interrupt_reg32 = 0x00218
129 }
130 },
131 { /* Snipe and Scamp */
132 .mailbox = 0x0052C,
133 .max_cmds = 100,
134 .cache_line_size = 0x20,
135 .clear_isr = 1,
136 .iopoll_weight = 0,
137 {
138 .set_interrupt_mask_reg = 0x00288,
139 .clr_interrupt_mask_reg = 0x0028C,
140 .clr_interrupt_mask_reg32 = 0x0028C,
141 .sense_interrupt_mask_reg = 0x00288,
142 .sense_interrupt_mask_reg32 = 0x00288,
143 .clr_interrupt_reg = 0x00284,
144 .clr_interrupt_reg32 = 0x00284,
145 .sense_interrupt_reg = 0x00280,
146 .sense_interrupt_reg32 = 0x00280,
147 .ioarrin_reg = 0x00504,
148 .sense_uproc_interrupt_reg = 0x00290,
149 .sense_uproc_interrupt_reg32 = 0x00290,
150 .set_uproc_interrupt_reg = 0x00290,
151 .set_uproc_interrupt_reg32 = 0x00290,
152 .clr_uproc_interrupt_reg = 0x00294,
153 .clr_uproc_interrupt_reg32 = 0x00294
154 }
155 },
156 { /* CRoC */
157 .mailbox = 0x00044,
158 .max_cmds = 1000,
159 .cache_line_size = 0x20,
160 .clear_isr = 0,
161 .iopoll_weight = 64,
162 {
163 .set_interrupt_mask_reg = 0x00010,
164 .clr_interrupt_mask_reg = 0x00018,
165 .clr_interrupt_mask_reg32 = 0x0001C,
166 .sense_interrupt_mask_reg = 0x00010,
167 .sense_interrupt_mask_reg32 = 0x00014,
168 .clr_interrupt_reg = 0x00008,
169 .clr_interrupt_reg32 = 0x0000C,
170 .sense_interrupt_reg = 0x00000,
171 .sense_interrupt_reg32 = 0x00004,
172 .ioarrin_reg = 0x00070,
173 .sense_uproc_interrupt_reg = 0x00020,
174 .sense_uproc_interrupt_reg32 = 0x00024,
175 .set_uproc_interrupt_reg = 0x00020,
176 .set_uproc_interrupt_reg32 = 0x00024,
177 .clr_uproc_interrupt_reg = 0x00028,
178 .clr_uproc_interrupt_reg32 = 0x0002C,
179 .init_feedback_reg = 0x0005C,
180 .dump_addr_reg = 0x00064,
181 .dump_data_reg = 0x00068,
182 .endian_swap_reg = 0x00084
183 }
184 },
185 };
186
187 static const struct ipr_chip_t ipr_chip[] = {
188 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
189 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
194 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
197 };
198
199 static int ipr_max_bus_speeds[] = {
200 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
201 };
202
203 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
204 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
205 module_param_named(max_speed, ipr_max_speed, uint, 0);
206 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
207 module_param_named(log_level, ipr_log_level, uint, 0);
208 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
209 module_param_named(testmode, ipr_testmode, int, 0);
210 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
211 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
213 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
214 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
215 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
216 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
217 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
218 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
219 module_param_named(max_devs, ipr_max_devs, int, 0);
220 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
221 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
222 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
223 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(IPR_DRIVER_VERSION);
226
227 /* A constant array of IOASCs/URCs/Error Messages */
228 static const
229 struct ipr_error_table_t ipr_error_table[] = {
230 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
231 "8155: An unknown error was received"},
232 {0x00330000, 0, 0,
233 "Soft underlength error"},
234 {0x005A0000, 0, 0,
235 "Command to be cancelled not found"},
236 {0x00808000, 0, 0,
237 "Qualified success"},
238 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
239 "FFFE: Soft device bus error recovered by the IOA"},
240 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4101: Soft device bus fabric error"},
242 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFC: Logical block guard error recovered by the device"},
244 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFC: Logical block reference tag error recovered by the device"},
246 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
247 "4171: Recovered scatter list tag / sequence number error"},
248 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
250 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
252 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FFFD: Recovered logical block reference tag error detected by the IOA"},
254 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
255 "FFFD: Logical block guard error recovered by the IOA"},
256 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFF9: Device sector reassign successful"},
258 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFF7: Media error recovered by device rewrite procedures"},
260 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
261 "7001: IOA sector reassignment successful"},
262 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
263 "FFF9: Soft media error. Sector reassignment recommended"},
264 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
265 "FFF7: Media error recovered by IOA rewrite procedures"},
266 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
267 "FF3D: Soft PCI bus error recovered by the IOA"},
268 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
269 "FFF6: Device hardware error recovered by the IOA"},
270 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
271 "FFF6: Device hardware error recovered by the device"},
272 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
273 "FF3D: Soft IOA error recovered by the IOA"},
274 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
275 "FFFA: Undefined device response recovered by the IOA"},
276 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
277 "FFF6: Device bus error, message or command phase"},
278 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
279 "FFFE: Task Management Function failed"},
280 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
281 "FFF6: Failure prediction threshold exceeded"},
282 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
283 "8009: Impending cache battery pack failure"},
284 {0x02040100, 0, 0,
285 "Logical Unit in process of becoming ready"},
286 {0x02040200, 0, 0,
287 "Initializing command required"},
288 {0x02040400, 0, 0,
289 "34FF: Disk device format in progress"},
290 {0x02040C00, 0, 0,
291 "Logical unit not accessible, target port in unavailable state"},
292 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
293 "9070: IOA requested reset"},
294 {0x023F0000, 0, 0,
295 "Synchronization required"},
296 {0x02408500, 0, 0,
297 "IOA microcode download required"},
298 {0x02408600, 0, 0,
299 "Device bus connection is prohibited by host"},
300 {0x024E0000, 0, 0,
301 "No ready, IOA shutdown"},
302 {0x025A0000, 0, 0,
303 "Not ready, IOA has been shutdown"},
304 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
305 "3020: Storage subsystem configuration error"},
306 {0x03110B00, 0, 0,
307 "FFF5: Medium error, data unreadable, recommend reassign"},
308 {0x03110C00, 0, 0,
309 "7000: Medium error, data unreadable, do not reassign"},
310 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
311 "FFF3: Disk media format bad"},
312 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
313 "3002: Addressed device failed to respond to selection"},
314 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
315 "3100: Device bus error"},
316 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
317 "3109: IOA timed out a device command"},
318 {0x04088000, 0, 0,
319 "3120: SCSI bus is not operational"},
320 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
321 "4100: Hard device bus fabric error"},
322 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310C: Logical block guard error detected by the device"},
324 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310C: Logical block reference tag error detected by the device"},
326 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
327 "4170: Scatter list tag / sequence number error"},
328 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
329 "8150: Logical block CRC error on IOA to Host transfer"},
330 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Logical block sequence number error on IOA to Host transfer"},
332 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
333 "310D: Logical block reference tag error detected by the IOA"},
334 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
335 "310D: Logical block guard error detected by the IOA"},
336 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "9000: IOA reserved area data check"},
338 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "9001: IOA reserved area invalid data pattern"},
340 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
341 "9002: IOA reserved area LRC error"},
342 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
343 "Hardware Error, IOA metadata access error"},
344 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
345 "102E: Out of alternate sectors for disk storage"},
346 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
347 "FFF4: Data transfer underlength error"},
348 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
349 "FFF4: Data transfer overlength error"},
350 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
351 "3400: Logical unit failure"},
352 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
353 "FFF4: Device microcode is corrupt"},
354 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
355 "8150: PCI bus error"},
356 {0x04430000, 1, 0,
357 "Unsupported device bus message received"},
358 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
359 "FFF4: Disk device problem"},
360 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
361 "8150: Permanent IOA failure"},
362 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
363 "3010: Disk device returned wrong response to IOA"},
364 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
365 "8151: IOA microcode error"},
366 {0x04448500, 0, 0,
367 "Device bus status error"},
368 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
369 "8157: IOA error requiring IOA reset to recover"},
370 {0x04448700, 0, 0,
371 "ATA device status error"},
372 {0x04490000, 0, 0,
373 "Message reject received from the device"},
374 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
375 "8008: A permanent cache battery pack failure occurred"},
376 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
377 "9090: Disk unit has been modified after the last known status"},
378 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
379 "9081: IOA detected device error"},
380 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
381 "9082: IOA detected device error"},
382 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
383 "3110: Device bus error, message or command phase"},
384 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
385 "3110: SAS Command / Task Management Function failed"},
386 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
387 "9091: Incorrect hardware configuration change has been detected"},
388 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
389 "9073: Invalid multi-adapter configuration"},
390 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
391 "4010: Incorrect connection between cascaded expanders"},
392 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
393 "4020: Connections exceed IOA design limits"},
394 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
395 "4030: Incorrect multipath connection"},
396 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
397 "4110: Unsupported enclosure function"},
398 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
399 "4120: SAS cable VPD cannot be read"},
400 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
401 "FFF4: Command to logical unit failed"},
402 {0x05240000, 1, 0,
403 "Illegal request, invalid request type or request packet"},
404 {0x05250000, 0, 0,
405 "Illegal request, invalid resource handle"},
406 {0x05258000, 0, 0,
407 "Illegal request, commands not allowed to this device"},
408 {0x05258100, 0, 0,
409 "Illegal request, command not allowed to a secondary adapter"},
410 {0x05258200, 0, 0,
411 "Illegal request, command not allowed to a non-optimized resource"},
412 {0x05260000, 0, 0,
413 "Illegal request, invalid field in parameter list"},
414 {0x05260100, 0, 0,
415 "Illegal request, parameter not supported"},
416 {0x05260200, 0, 0,
417 "Illegal request, parameter value invalid"},
418 {0x052C0000, 0, 0,
419 "Illegal request, command sequence error"},
420 {0x052C8000, 1, 0,
421 "Illegal request, dual adapter support not enabled"},
422 {0x052C8100, 1, 0,
423 "Illegal request, another cable connector was physically disabled"},
424 {0x054E8000, 1, 0,
425 "Illegal request, inconsistent group id/group count"},
426 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
427 "9031: Array protection temporarily suspended, protection resuming"},
428 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
429 "9040: Array protection temporarily suspended, protection resuming"},
430 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
431 "4080: IOA exceeded maximum operating temperature"},
432 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
433 "4085: Service required"},
434 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
435 "3140: Device bus not ready to ready transition"},
436 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "FFFB: SCSI bus was reset"},
438 {0x06290500, 0, 0,
439 "FFFE: SCSI bus transition to single ended"},
440 {0x06290600, 0, 0,
441 "FFFE: SCSI bus transition to LVD"},
442 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
443 "FFFB: SCSI bus was reset by another initiator"},
444 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
445 "3029: A device replacement has occurred"},
446 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
447 "4102: Device bus fabric performance degradation"},
448 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
449 "9051: IOA cache data exists for a missing or failed device"},
450 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
451 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
452 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
453 "9025: Disk unit is not supported at its physical location"},
454 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
455 "3020: IOA detected a SCSI bus configuration error"},
456 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
457 "3150: SCSI bus configuration error"},
458 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
459 "9074: Asymmetric advanced function disk configuration"},
460 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
461 "4040: Incomplete multipath connection between IOA and enclosure"},
462 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
463 "4041: Incomplete multipath connection between enclosure and device"},
464 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
465 "9075: Incomplete multipath connection between IOA and remote IOA"},
466 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
467 "9076: Configuration error, missing remote IOA"},
468 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4050: Enclosure does not support a required multipath function"},
470 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4121: Configuration error, required cable is missing"},
472 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
473 "4122: Cable is not plugged into the correct location on remote IOA"},
474 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4123: Configuration error, invalid cable vital product data"},
476 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4124: Configuration error, both cable ends are plugged into the same IOA"},
478 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4070: Logically bad block written on device"},
480 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
481 "9041: Array protection temporarily suspended"},
482 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
483 "9042: Corrupt array parity detected on specified device"},
484 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
485 "9030: Array no longer protected due to missing or failed disk unit"},
486 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
487 "9071: Link operational transition"},
488 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
489 "9072: Link not operational transition"},
490 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
491 "9032: Array exposed but still protected"},
492 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
493 "70DD: Device forced failed by disrupt device command"},
494 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
495 "4061: Multipath redundancy level got better"},
496 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
497 "4060: Multipath redundancy level got worse"},
498 {0x07270000, 0, 0,
499 "Failure due to other device"},
500 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
501 "9008: IOA does not support functions expected by devices"},
502 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
503 "9010: Cache data associated with attached devices cannot be found"},
504 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
505 "9011: Cache data belongs to devices other than those attached"},
506 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
507 "9020: Array missing 2 or more devices with only 1 device present"},
508 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
509 "9021: Array missing 2 or more devices with 2 or more devices present"},
510 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
511 "9022: Exposed array is missing a required device"},
512 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
513 "9023: Array member(s) not at required physical locations"},
514 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
515 "9024: Array not functional due to present hardware configuration"},
516 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
517 "9026: Array not functional due to present hardware configuration"},
518 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
519 "9027: Array is missing a device and parity is out of sync"},
520 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
521 "9028: Maximum number of arrays already exist"},
522 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
523 "9050: Required cache data cannot be located for a disk unit"},
524 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
525 "9052: Cache data exists for a device that has been modified"},
526 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
527 "9054: IOA resources not available due to previous problems"},
528 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
529 "9092: Disk unit requires initialization before use"},
530 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
531 "9029: Incorrect hardware configuration change has been detected"},
532 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
533 "9060: One or more disk pairs are missing from an array"},
534 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
535 "9061: One or more disks are missing from an array"},
536 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
537 "9062: One or more disks are missing from an array"},
538 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
539 "9063: Maximum number of functional arrays has been exceeded"},
540 {0x07279A00, 0, 0,
541 "Data protect, other volume set problem"},
542 {0x0B260000, 0, 0,
543 "Aborted command, invalid descriptor"},
544 {0x0B3F9000, 0, 0,
545 "Target operating conditions have changed, dual adapter takeover"},
546 {0x0B530200, 0, 0,
547 "Aborted command, medium removal prevented"},
548 {0x0B5A0000, 0, 0,
549 "Command terminated by host"},
550 {0x0B5B8000, 0, 0,
551 "Aborted command, command terminated by host"}
552 };
553
554 static const struct ipr_ses_table_entry ipr_ses_table[] = {
555 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
556 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
558 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
559 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
560 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
561 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
563 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
564 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
565 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
566 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
567 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
568 };
569
570 /*
571 * Function Prototypes
572 */
573 static int ipr_reset_alert(struct ipr_cmnd *);
574 static void ipr_process_ccn(struct ipr_cmnd *);
575 static void ipr_process_error(struct ipr_cmnd *);
576 static void ipr_reset_ioa_job(struct ipr_cmnd *);
577 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
578 enum ipr_shutdown_type);
579
580 #ifdef CONFIG_SCSI_IPR_TRACE
581 /**
582 * ipr_trc_hook - Add a trace entry to the driver trace
583 * @ipr_cmd: ipr command struct
584 * @type: trace type
585 * @add_data: additional data
586 *
587 * Return value:
588 * none
589 **/
ipr_trc_hook(struct ipr_cmnd * ipr_cmd,u8 type,u32 add_data)590 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
591 u8 type, u32 add_data)
592 {
593 struct ipr_trace_entry *trace_entry;
594 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
595 unsigned int trace_index;
596
597 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
598 trace_entry = &ioa_cfg->trace[trace_index];
599 trace_entry->time = jiffies;
600 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
601 trace_entry->type = type;
602 if (ipr_cmd->ioa_cfg->sis64)
603 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
604 else
605 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
606 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
607 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
608 trace_entry->u.add_data = add_data;
609 wmb();
610 }
611 #else
612 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
613 #endif
614
615 /**
616 * ipr_lock_and_done - Acquire lock and complete command
617 * @ipr_cmd: ipr command struct
618 *
619 * Return value:
620 * none
621 **/
ipr_lock_and_done(struct ipr_cmnd * ipr_cmd)622 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
623 {
624 unsigned long lock_flags;
625 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
626
627 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
628 ipr_cmd->done(ipr_cmd);
629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
630 }
631
632 /**
633 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
634 * @ipr_cmd: ipr command struct
635 *
636 * Return value:
637 * none
638 **/
ipr_reinit_ipr_cmnd(struct ipr_cmnd * ipr_cmd)639 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
640 {
641 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
642 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
643 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
644 dma_addr_t dma_addr = ipr_cmd->dma_addr;
645 int hrrq_id;
646
647 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
648 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
649 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
650 ioarcb->data_transfer_length = 0;
651 ioarcb->read_data_transfer_length = 0;
652 ioarcb->ioadl_len = 0;
653 ioarcb->read_ioadl_len = 0;
654
655 if (ipr_cmd->ioa_cfg->sis64) {
656 ioarcb->u.sis64_addr_data.data_ioadl_addr =
657 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
658 ioasa64->u.gata.status = 0;
659 } else {
660 ioarcb->write_ioadl_addr =
661 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
662 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
663 ioasa->u.gata.status = 0;
664 }
665
666 ioasa->hdr.ioasc = 0;
667 ioasa->hdr.residual_data_len = 0;
668 ipr_cmd->scsi_cmd = NULL;
669 ipr_cmd->qc = NULL;
670 ipr_cmd->sense_buffer[0] = 0;
671 ipr_cmd->dma_use_sg = 0;
672 }
673
674 /**
675 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
676 * @ipr_cmd: ipr command struct
677 *
678 * Return value:
679 * none
680 **/
ipr_init_ipr_cmnd(struct ipr_cmnd * ipr_cmd,void (* fast_done)(struct ipr_cmnd *))681 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
682 void (*fast_done) (struct ipr_cmnd *))
683 {
684 ipr_reinit_ipr_cmnd(ipr_cmd);
685 ipr_cmd->u.scratch = 0;
686 ipr_cmd->sibling = NULL;
687 ipr_cmd->eh_comp = NULL;
688 ipr_cmd->fast_done = fast_done;
689 init_timer(&ipr_cmd->timer);
690 }
691
692 /**
693 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
694 * @ioa_cfg: ioa config struct
695 *
696 * Return value:
697 * pointer to ipr command struct
698 **/
699 static
__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue * hrrq)700 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
701 {
702 struct ipr_cmnd *ipr_cmd = NULL;
703
704 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
705 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
706 struct ipr_cmnd, queue);
707 list_del(&ipr_cmd->queue);
708 }
709
710
711 return ipr_cmd;
712 }
713
714 /**
715 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
716 * @ioa_cfg: ioa config struct
717 *
718 * Return value:
719 * pointer to ipr command struct
720 **/
721 static
ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg * ioa_cfg)722 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
723 {
724 struct ipr_cmnd *ipr_cmd =
725 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
726 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
727 return ipr_cmd;
728 }
729
730 /**
731 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
732 * @ioa_cfg: ioa config struct
733 * @clr_ints: interrupts to clear
734 *
735 * This function masks all interrupts on the adapter, then clears the
736 * interrupts specified in the mask
737 *
738 * Return value:
739 * none
740 **/
ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg * ioa_cfg,u32 clr_ints)741 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
742 u32 clr_ints)
743 {
744 volatile u32 int_reg;
745 int i;
746
747 /* Stop new interrupts */
748 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
749 spin_lock(&ioa_cfg->hrrq[i]._lock);
750 ioa_cfg->hrrq[i].allow_interrupts = 0;
751 spin_unlock(&ioa_cfg->hrrq[i]._lock);
752 }
753 wmb();
754
755 /* Set interrupt mask to stop all new interrupts */
756 if (ioa_cfg->sis64)
757 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
758 else
759 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
760
761 /* Clear any pending interrupts */
762 if (ioa_cfg->sis64)
763 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
764 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
765 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
766 }
767
768 /**
769 * ipr_save_pcix_cmd_reg - Save PCI-X command register
770 * @ioa_cfg: ioa config struct
771 *
772 * Return value:
773 * 0 on success / -EIO on failure
774 **/
ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)775 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
776 {
777 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
778
779 if (pcix_cmd_reg == 0)
780 return 0;
781
782 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
783 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
784 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
785 return -EIO;
786 }
787
788 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
789 return 0;
790 }
791
792 /**
793 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
794 * @ioa_cfg: ioa config struct
795 *
796 * Return value:
797 * 0 on success / -EIO on failure
798 **/
ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)799 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
800 {
801 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
802
803 if (pcix_cmd_reg) {
804 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
805 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
806 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
807 return -EIO;
808 }
809 }
810
811 return 0;
812 }
813
814 /**
815 * ipr_sata_eh_done - done function for aborted SATA commands
816 * @ipr_cmd: ipr command struct
817 *
818 * This function is invoked for ops generated to SATA
819 * devices which are being aborted.
820 *
821 * Return value:
822 * none
823 **/
ipr_sata_eh_done(struct ipr_cmnd * ipr_cmd)824 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
825 {
826 struct ata_queued_cmd *qc = ipr_cmd->qc;
827 struct ipr_sata_port *sata_port = qc->ap->private_data;
828
829 qc->err_mask |= AC_ERR_OTHER;
830 sata_port->ioasa.status |= ATA_BUSY;
831 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
832 ata_qc_complete(qc);
833 }
834
835 /**
836 * ipr_scsi_eh_done - mid-layer done function for aborted ops
837 * @ipr_cmd: ipr command struct
838 *
839 * This function is invoked by the interrupt handler for
840 * ops generated by the SCSI mid-layer which are being aborted.
841 *
842 * Return value:
843 * none
844 **/
ipr_scsi_eh_done(struct ipr_cmnd * ipr_cmd)845 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
846 {
847 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
848
849 scsi_cmd->result |= (DID_ERROR << 16);
850
851 scsi_dma_unmap(ipr_cmd->scsi_cmd);
852 scsi_cmd->scsi_done(scsi_cmd);
853 if (ipr_cmd->eh_comp)
854 complete(ipr_cmd->eh_comp);
855 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
856 }
857
858 /**
859 * ipr_fail_all_ops - Fails all outstanding ops.
860 * @ioa_cfg: ioa config struct
861 *
862 * This function fails all outstanding ops.
863 *
864 * Return value:
865 * none
866 **/
ipr_fail_all_ops(struct ipr_ioa_cfg * ioa_cfg)867 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
868 {
869 struct ipr_cmnd *ipr_cmd, *temp;
870 struct ipr_hrr_queue *hrrq;
871
872 ENTER;
873 for_each_hrrq(hrrq, ioa_cfg) {
874 spin_lock(&hrrq->_lock);
875 list_for_each_entry_safe(ipr_cmd,
876 temp, &hrrq->hrrq_pending_q, queue) {
877 list_del(&ipr_cmd->queue);
878
879 ipr_cmd->s.ioasa.hdr.ioasc =
880 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
881 ipr_cmd->s.ioasa.hdr.ilid =
882 cpu_to_be32(IPR_DRIVER_ILID);
883
884 if (ipr_cmd->scsi_cmd)
885 ipr_cmd->done = ipr_scsi_eh_done;
886 else if (ipr_cmd->qc)
887 ipr_cmd->done = ipr_sata_eh_done;
888
889 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
890 IPR_IOASC_IOA_WAS_RESET);
891 del_timer(&ipr_cmd->timer);
892 ipr_cmd->done(ipr_cmd);
893 }
894 spin_unlock(&hrrq->_lock);
895 }
896 LEAVE;
897 }
898
899 /**
900 * ipr_send_command - Send driver initiated requests.
901 * @ipr_cmd: ipr command struct
902 *
903 * This function sends a command to the adapter using the correct write call.
904 * In the case of sis64, calculate the ioarcb size required. Then or in the
905 * appropriate bits.
906 *
907 * Return value:
908 * none
909 **/
ipr_send_command(struct ipr_cmnd * ipr_cmd)910 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
911 {
912 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
913 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
914
915 if (ioa_cfg->sis64) {
916 /* The default size is 256 bytes */
917 send_dma_addr |= 0x1;
918
919 /* If the number of ioadls * size of ioadl > 128 bytes,
920 then use a 512 byte ioarcb */
921 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
922 send_dma_addr |= 0x4;
923 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
924 } else
925 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
926 }
927
928 /**
929 * ipr_do_req - Send driver initiated requests.
930 * @ipr_cmd: ipr command struct
931 * @done: done function
932 * @timeout_func: timeout function
933 * @timeout: timeout value
934 *
935 * This function sends the specified command to the adapter with the
936 * timeout given. The done function is invoked on command completion.
937 *
938 * Return value:
939 * none
940 **/
ipr_do_req(struct ipr_cmnd * ipr_cmd,void (* done)(struct ipr_cmnd *),void (* timeout_func)(struct ipr_cmnd *),u32 timeout)941 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
942 void (*done) (struct ipr_cmnd *),
943 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
944 {
945 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
946
947 ipr_cmd->done = done;
948
949 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
950 ipr_cmd->timer.expires = jiffies + timeout;
951 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
952
953 add_timer(&ipr_cmd->timer);
954
955 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
956
957 ipr_send_command(ipr_cmd);
958 }
959
960 /**
961 * ipr_internal_cmd_done - Op done function for an internally generated op.
962 * @ipr_cmd: ipr command struct
963 *
964 * This function is the op done function for an internally generated,
965 * blocking op. It simply wakes the sleeping thread.
966 *
967 * Return value:
968 * none
969 **/
ipr_internal_cmd_done(struct ipr_cmnd * ipr_cmd)970 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
971 {
972 if (ipr_cmd->sibling)
973 ipr_cmd->sibling = NULL;
974 else
975 complete(&ipr_cmd->completion);
976 }
977
978 /**
979 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
980 * @ipr_cmd: ipr command struct
981 * @dma_addr: dma address
982 * @len: transfer length
983 * @flags: ioadl flag value
984 *
985 * This function initializes an ioadl in the case where there is only a single
986 * descriptor.
987 *
988 * Return value:
989 * nothing
990 **/
ipr_init_ioadl(struct ipr_cmnd * ipr_cmd,dma_addr_t dma_addr,u32 len,int flags)991 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
992 u32 len, int flags)
993 {
994 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
995 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
996
997 ipr_cmd->dma_use_sg = 1;
998
999 if (ipr_cmd->ioa_cfg->sis64) {
1000 ioadl64->flags = cpu_to_be32(flags);
1001 ioadl64->data_len = cpu_to_be32(len);
1002 ioadl64->address = cpu_to_be64(dma_addr);
1003
1004 ipr_cmd->ioarcb.ioadl_len =
1005 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1006 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1007 } else {
1008 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1009 ioadl->address = cpu_to_be32(dma_addr);
1010
1011 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1012 ipr_cmd->ioarcb.read_ioadl_len =
1013 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1014 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1015 } else {
1016 ipr_cmd->ioarcb.ioadl_len =
1017 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1018 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1019 }
1020 }
1021 }
1022
1023 /**
1024 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1025 * @ipr_cmd: ipr command struct
1026 * @timeout_func: function to invoke if command times out
1027 * @timeout: timeout
1028 *
1029 * Return value:
1030 * none
1031 **/
ipr_send_blocking_cmd(struct ipr_cmnd * ipr_cmd,void (* timeout_func)(struct ipr_cmnd * ipr_cmd),u32 timeout)1032 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1033 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1034 u32 timeout)
1035 {
1036 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1037
1038 init_completion(&ipr_cmd->completion);
1039 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1040
1041 spin_unlock_irq(ioa_cfg->host->host_lock);
1042 wait_for_completion(&ipr_cmd->completion);
1043 spin_lock_irq(ioa_cfg->host->host_lock);
1044 }
1045
ipr_get_hrrq_index(struct ipr_ioa_cfg * ioa_cfg)1046 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1047 {
1048 unsigned int hrrq;
1049
1050 if (ioa_cfg->hrrq_num == 1)
1051 hrrq = 0;
1052 else {
1053 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1054 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1055 }
1056 return hrrq;
1057 }
1058
1059 /**
1060 * ipr_send_hcam - Send an HCAM to the adapter.
1061 * @ioa_cfg: ioa config struct
1062 * @type: HCAM type
1063 * @hostrcb: hostrcb struct
1064 *
1065 * This function will send a Host Controlled Async command to the adapter.
1066 * If HCAMs are currently not allowed to be issued to the adapter, it will
1067 * place the hostrcb on the free queue.
1068 *
1069 * Return value:
1070 * none
1071 **/
ipr_send_hcam(struct ipr_ioa_cfg * ioa_cfg,u8 type,struct ipr_hostrcb * hostrcb)1072 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1073 struct ipr_hostrcb *hostrcb)
1074 {
1075 struct ipr_cmnd *ipr_cmd;
1076 struct ipr_ioarcb *ioarcb;
1077
1078 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1079 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1080 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1081 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1082
1083 ipr_cmd->u.hostrcb = hostrcb;
1084 ioarcb = &ipr_cmd->ioarcb;
1085
1086 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1087 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1088 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1089 ioarcb->cmd_pkt.cdb[1] = type;
1090 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1091 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1092
1093 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1094 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1095
1096 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1097 ipr_cmd->done = ipr_process_ccn;
1098 else
1099 ipr_cmd->done = ipr_process_error;
1100
1101 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1102
1103 ipr_send_command(ipr_cmd);
1104 } else {
1105 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1106 }
1107 }
1108
1109 /**
1110 * ipr_update_ata_class - Update the ata class in the resource entry
1111 * @res: resource entry struct
1112 * @proto: cfgte device bus protocol value
1113 *
1114 * Return value:
1115 * none
1116 **/
ipr_update_ata_class(struct ipr_resource_entry * res,unsigned int proto)1117 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1118 {
1119 switch (proto) {
1120 case IPR_PROTO_SATA:
1121 case IPR_PROTO_SAS_STP:
1122 res->ata_class = ATA_DEV_ATA;
1123 break;
1124 case IPR_PROTO_SATA_ATAPI:
1125 case IPR_PROTO_SAS_STP_ATAPI:
1126 res->ata_class = ATA_DEV_ATAPI;
1127 break;
1128 default:
1129 res->ata_class = ATA_DEV_UNKNOWN;
1130 break;
1131 };
1132 }
1133
1134 /**
1135 * ipr_init_res_entry - Initialize a resource entry struct.
1136 * @res: resource entry struct
1137 * @cfgtew: config table entry wrapper struct
1138 *
1139 * Return value:
1140 * none
1141 **/
ipr_init_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1142 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1143 struct ipr_config_table_entry_wrapper *cfgtew)
1144 {
1145 int found = 0;
1146 unsigned int proto;
1147 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1148 struct ipr_resource_entry *gscsi_res = NULL;
1149
1150 res->needs_sync_complete = 0;
1151 res->in_erp = 0;
1152 res->add_to_ml = 0;
1153 res->del_from_ml = 0;
1154 res->resetting_device = 0;
1155 res->reset_occurred = 0;
1156 res->sdev = NULL;
1157 res->sata_port = NULL;
1158
1159 if (ioa_cfg->sis64) {
1160 proto = cfgtew->u.cfgte64->proto;
1161 res->res_flags = cfgtew->u.cfgte64->res_flags;
1162 res->qmodel = IPR_QUEUEING_MODEL64(res);
1163 res->type = cfgtew->u.cfgte64->res_type;
1164
1165 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1166 sizeof(res->res_path));
1167
1168 res->bus = 0;
1169 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1170 sizeof(res->dev_lun.scsi_lun));
1171 res->lun = scsilun_to_int(&res->dev_lun);
1172
1173 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1174 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1175 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1176 found = 1;
1177 res->target = gscsi_res->target;
1178 break;
1179 }
1180 }
1181 if (!found) {
1182 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1183 ioa_cfg->max_devs_supported);
1184 set_bit(res->target, ioa_cfg->target_ids);
1185 }
1186 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1187 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1188 res->target = 0;
1189 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1190 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1191 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1192 ioa_cfg->max_devs_supported);
1193 set_bit(res->target, ioa_cfg->array_ids);
1194 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1195 res->bus = IPR_VSET_VIRTUAL_BUS;
1196 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1197 ioa_cfg->max_devs_supported);
1198 set_bit(res->target, ioa_cfg->vset_ids);
1199 } else {
1200 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1201 ioa_cfg->max_devs_supported);
1202 set_bit(res->target, ioa_cfg->target_ids);
1203 }
1204 } else {
1205 proto = cfgtew->u.cfgte->proto;
1206 res->qmodel = IPR_QUEUEING_MODEL(res);
1207 res->flags = cfgtew->u.cfgte->flags;
1208 if (res->flags & IPR_IS_IOA_RESOURCE)
1209 res->type = IPR_RES_TYPE_IOAFP;
1210 else
1211 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1212
1213 res->bus = cfgtew->u.cfgte->res_addr.bus;
1214 res->target = cfgtew->u.cfgte->res_addr.target;
1215 res->lun = cfgtew->u.cfgte->res_addr.lun;
1216 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1217 }
1218
1219 ipr_update_ata_class(res, proto);
1220 }
1221
1222 /**
1223 * ipr_is_same_device - Determine if two devices are the same.
1224 * @res: resource entry struct
1225 * @cfgtew: config table entry wrapper struct
1226 *
1227 * Return value:
1228 * 1 if the devices are the same / 0 otherwise
1229 **/
ipr_is_same_device(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1230 static int ipr_is_same_device(struct ipr_resource_entry *res,
1231 struct ipr_config_table_entry_wrapper *cfgtew)
1232 {
1233 if (res->ioa_cfg->sis64) {
1234 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1235 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1236 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1237 sizeof(cfgtew->u.cfgte64->lun))) {
1238 return 1;
1239 }
1240 } else {
1241 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1242 res->target == cfgtew->u.cfgte->res_addr.target &&
1243 res->lun == cfgtew->u.cfgte->res_addr.lun)
1244 return 1;
1245 }
1246
1247 return 0;
1248 }
1249
1250 /**
1251 * __ipr_format_res_path - Format the resource path for printing.
1252 * @res_path: resource path
1253 * @buf: buffer
1254 * @len: length of buffer provided
1255 *
1256 * Return value:
1257 * pointer to buffer
1258 **/
__ipr_format_res_path(u8 * res_path,char * buffer,int len)1259 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1260 {
1261 int i;
1262 char *p = buffer;
1263
1264 *p = '\0';
1265 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1266 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1267 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1268
1269 return buffer;
1270 }
1271
1272 /**
1273 * ipr_format_res_path - Format the resource path for printing.
1274 * @ioa_cfg: ioa config struct
1275 * @res_path: resource path
1276 * @buf: buffer
1277 * @len: length of buffer provided
1278 *
1279 * Return value:
1280 * pointer to buffer
1281 **/
ipr_format_res_path(struct ipr_ioa_cfg * ioa_cfg,u8 * res_path,char * buffer,int len)1282 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1283 u8 *res_path, char *buffer, int len)
1284 {
1285 char *p = buffer;
1286
1287 *p = '\0';
1288 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1289 __ipr_format_res_path(res_path, p, len - (buffer - p));
1290 return buffer;
1291 }
1292
1293 /**
1294 * ipr_update_res_entry - Update the resource entry.
1295 * @res: resource entry struct
1296 * @cfgtew: config table entry wrapper struct
1297 *
1298 * Return value:
1299 * none
1300 **/
ipr_update_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1301 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1302 struct ipr_config_table_entry_wrapper *cfgtew)
1303 {
1304 char buffer[IPR_MAX_RES_PATH_LENGTH];
1305 unsigned int proto;
1306 int new_path = 0;
1307
1308 if (res->ioa_cfg->sis64) {
1309 res->flags = cfgtew->u.cfgte64->flags;
1310 res->res_flags = cfgtew->u.cfgte64->res_flags;
1311 res->type = cfgtew->u.cfgte64->res_type;
1312
1313 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1314 sizeof(struct ipr_std_inq_data));
1315
1316 res->qmodel = IPR_QUEUEING_MODEL64(res);
1317 proto = cfgtew->u.cfgte64->proto;
1318 res->res_handle = cfgtew->u.cfgte64->res_handle;
1319 res->dev_id = cfgtew->u.cfgte64->dev_id;
1320
1321 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1322 sizeof(res->dev_lun.scsi_lun));
1323
1324 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1325 sizeof(res->res_path))) {
1326 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1327 sizeof(res->res_path));
1328 new_path = 1;
1329 }
1330
1331 if (res->sdev && new_path)
1332 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1333 ipr_format_res_path(res->ioa_cfg,
1334 res->res_path, buffer, sizeof(buffer)));
1335 } else {
1336 res->flags = cfgtew->u.cfgte->flags;
1337 if (res->flags & IPR_IS_IOA_RESOURCE)
1338 res->type = IPR_RES_TYPE_IOAFP;
1339 else
1340 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1341
1342 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1343 sizeof(struct ipr_std_inq_data));
1344
1345 res->qmodel = IPR_QUEUEING_MODEL(res);
1346 proto = cfgtew->u.cfgte->proto;
1347 res->res_handle = cfgtew->u.cfgte->res_handle;
1348 }
1349
1350 ipr_update_ata_class(res, proto);
1351 }
1352
1353 /**
1354 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1355 * for the resource.
1356 * @res: resource entry struct
1357 * @cfgtew: config table entry wrapper struct
1358 *
1359 * Return value:
1360 * none
1361 **/
ipr_clear_res_target(struct ipr_resource_entry * res)1362 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1363 {
1364 struct ipr_resource_entry *gscsi_res = NULL;
1365 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1366
1367 if (!ioa_cfg->sis64)
1368 return;
1369
1370 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1371 clear_bit(res->target, ioa_cfg->array_ids);
1372 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1373 clear_bit(res->target, ioa_cfg->vset_ids);
1374 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1375 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1376 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1377 return;
1378 clear_bit(res->target, ioa_cfg->target_ids);
1379
1380 } else if (res->bus == 0)
1381 clear_bit(res->target, ioa_cfg->target_ids);
1382 }
1383
1384 /**
1385 * ipr_handle_config_change - Handle a config change from the adapter
1386 * @ioa_cfg: ioa config struct
1387 * @hostrcb: hostrcb
1388 *
1389 * Return value:
1390 * none
1391 **/
ipr_handle_config_change(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1392 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1393 struct ipr_hostrcb *hostrcb)
1394 {
1395 struct ipr_resource_entry *res = NULL;
1396 struct ipr_config_table_entry_wrapper cfgtew;
1397 __be32 cc_res_handle;
1398
1399 u32 is_ndn = 1;
1400
1401 if (ioa_cfg->sis64) {
1402 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1403 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1404 } else {
1405 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1406 cc_res_handle = cfgtew.u.cfgte->res_handle;
1407 }
1408
1409 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1410 if (res->res_handle == cc_res_handle) {
1411 is_ndn = 0;
1412 break;
1413 }
1414 }
1415
1416 if (is_ndn) {
1417 if (list_empty(&ioa_cfg->free_res_q)) {
1418 ipr_send_hcam(ioa_cfg,
1419 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1420 hostrcb);
1421 return;
1422 }
1423
1424 res = list_entry(ioa_cfg->free_res_q.next,
1425 struct ipr_resource_entry, queue);
1426
1427 list_del(&res->queue);
1428 ipr_init_res_entry(res, &cfgtew);
1429 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1430 }
1431
1432 ipr_update_res_entry(res, &cfgtew);
1433
1434 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1435 if (res->sdev) {
1436 res->del_from_ml = 1;
1437 res->res_handle = IPR_INVALID_RES_HANDLE;
1438 if (ioa_cfg->allow_ml_add_del)
1439 schedule_work(&ioa_cfg->work_q);
1440 } else {
1441 ipr_clear_res_target(res);
1442 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1443 }
1444 } else if (!res->sdev || res->del_from_ml) {
1445 res->add_to_ml = 1;
1446 if (ioa_cfg->allow_ml_add_del)
1447 schedule_work(&ioa_cfg->work_q);
1448 }
1449
1450 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1451 }
1452
1453 /**
1454 * ipr_process_ccn - Op done function for a CCN.
1455 * @ipr_cmd: ipr command struct
1456 *
1457 * This function is the op done function for a configuration
1458 * change notification host controlled async from the adapter.
1459 *
1460 * Return value:
1461 * none
1462 **/
ipr_process_ccn(struct ipr_cmnd * ipr_cmd)1463 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1464 {
1465 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1466 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1467 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1468
1469 list_del(&hostrcb->queue);
1470 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1471
1472 if (ioasc) {
1473 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1474 dev_err(&ioa_cfg->pdev->dev,
1475 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1476
1477 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1478 } else {
1479 ipr_handle_config_change(ioa_cfg, hostrcb);
1480 }
1481 }
1482
1483 /**
1484 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1485 * @i: index into buffer
1486 * @buf: string to modify
1487 *
1488 * This function will strip all trailing whitespace, pad the end
1489 * of the string with a single space, and NULL terminate the string.
1490 *
1491 * Return value:
1492 * new length of string
1493 **/
strip_and_pad_whitespace(int i,char * buf)1494 static int strip_and_pad_whitespace(int i, char *buf)
1495 {
1496 while (i && buf[i] == ' ')
1497 i--;
1498 buf[i+1] = ' ';
1499 buf[i+2] = '\0';
1500 return i + 2;
1501 }
1502
1503 /**
1504 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1505 * @prefix: string to print at start of printk
1506 * @hostrcb: hostrcb pointer
1507 * @vpd: vendor/product id/sn struct
1508 *
1509 * Return value:
1510 * none
1511 **/
ipr_log_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_vpd * vpd)1512 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1513 struct ipr_vpd *vpd)
1514 {
1515 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1516 int i = 0;
1517
1518 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1519 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1520
1521 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1522 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1523
1524 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1525 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1526
1527 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1528 }
1529
1530 /**
1531 * ipr_log_vpd - Log the passed VPD to the error log.
1532 * @vpd: vendor/product id/sn struct
1533 *
1534 * Return value:
1535 * none
1536 **/
ipr_log_vpd(struct ipr_vpd * vpd)1537 static void ipr_log_vpd(struct ipr_vpd *vpd)
1538 {
1539 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1540 + IPR_SERIAL_NUM_LEN];
1541
1542 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1543 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1544 IPR_PROD_ID_LEN);
1545 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1546 ipr_err("Vendor/Product ID: %s\n", buffer);
1547
1548 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1549 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1550 ipr_err(" Serial Number: %s\n", buffer);
1551 }
1552
1553 /**
1554 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn/wwn struct
1558 *
1559 * Return value:
1560 * none
1561 **/
ipr_log_ext_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_ext_vpd * vpd)1562 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563 struct ipr_ext_vpd *vpd)
1564 {
1565 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1566 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1567 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1568 }
1569
1570 /**
1571 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1572 * @vpd: vendor/product id/sn/wwn struct
1573 *
1574 * Return value:
1575 * none
1576 **/
ipr_log_ext_vpd(struct ipr_ext_vpd * vpd)1577 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1578 {
1579 ipr_log_vpd(&vpd->vpd);
1580 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1581 be32_to_cpu(vpd->wwid[1]));
1582 }
1583
1584 /**
1585 * ipr_log_enhanced_cache_error - Log a cache error.
1586 * @ioa_cfg: ioa config struct
1587 * @hostrcb: hostrcb struct
1588 *
1589 * Return value:
1590 * none
1591 **/
ipr_log_enhanced_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1592 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1593 struct ipr_hostrcb *hostrcb)
1594 {
1595 struct ipr_hostrcb_type_12_error *error;
1596
1597 if (ioa_cfg->sis64)
1598 error = &hostrcb->hcam.u.error64.u.type_12_error;
1599 else
1600 error = &hostrcb->hcam.u.error.u.type_12_error;
1601
1602 ipr_err("-----Current Configuration-----\n");
1603 ipr_err("Cache Directory Card Information:\n");
1604 ipr_log_ext_vpd(&error->ioa_vpd);
1605 ipr_err("Adapter Card Information:\n");
1606 ipr_log_ext_vpd(&error->cfc_vpd);
1607
1608 ipr_err("-----Expected Configuration-----\n");
1609 ipr_err("Cache Directory Card Information:\n");
1610 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1611 ipr_err("Adapter Card Information:\n");
1612 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1613
1614 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1615 be32_to_cpu(error->ioa_data[0]),
1616 be32_to_cpu(error->ioa_data[1]),
1617 be32_to_cpu(error->ioa_data[2]));
1618 }
1619
1620 /**
1621 * ipr_log_cache_error - Log a cache error.
1622 * @ioa_cfg: ioa config struct
1623 * @hostrcb: hostrcb struct
1624 *
1625 * Return value:
1626 * none
1627 **/
ipr_log_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1628 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1629 struct ipr_hostrcb *hostrcb)
1630 {
1631 struct ipr_hostrcb_type_02_error *error =
1632 &hostrcb->hcam.u.error.u.type_02_error;
1633
1634 ipr_err("-----Current Configuration-----\n");
1635 ipr_err("Cache Directory Card Information:\n");
1636 ipr_log_vpd(&error->ioa_vpd);
1637 ipr_err("Adapter Card Information:\n");
1638 ipr_log_vpd(&error->cfc_vpd);
1639
1640 ipr_err("-----Expected Configuration-----\n");
1641 ipr_err("Cache Directory Card Information:\n");
1642 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1643 ipr_err("Adapter Card Information:\n");
1644 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1645
1646 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1647 be32_to_cpu(error->ioa_data[0]),
1648 be32_to_cpu(error->ioa_data[1]),
1649 be32_to_cpu(error->ioa_data[2]));
1650 }
1651
1652 /**
1653 * ipr_log_enhanced_config_error - Log a configuration error.
1654 * @ioa_cfg: ioa config struct
1655 * @hostrcb: hostrcb struct
1656 *
1657 * Return value:
1658 * none
1659 **/
ipr_log_enhanced_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1660 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1661 struct ipr_hostrcb *hostrcb)
1662 {
1663 int errors_logged, i;
1664 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1665 struct ipr_hostrcb_type_13_error *error;
1666
1667 error = &hostrcb->hcam.u.error.u.type_13_error;
1668 errors_logged = be32_to_cpu(error->errors_logged);
1669
1670 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1671 be32_to_cpu(error->errors_detected), errors_logged);
1672
1673 dev_entry = error->dev;
1674
1675 for (i = 0; i < errors_logged; i++, dev_entry++) {
1676 ipr_err_separator;
1677
1678 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1679 ipr_log_ext_vpd(&dev_entry->vpd);
1680
1681 ipr_err("-----New Device Information-----\n");
1682 ipr_log_ext_vpd(&dev_entry->new_vpd);
1683
1684 ipr_err("Cache Directory Card Information:\n");
1685 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1686
1687 ipr_err("Adapter Card Information:\n");
1688 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1689 }
1690 }
1691
1692 /**
1693 * ipr_log_sis64_config_error - Log a device error.
1694 * @ioa_cfg: ioa config struct
1695 * @hostrcb: hostrcb struct
1696 *
1697 * Return value:
1698 * none
1699 **/
ipr_log_sis64_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1700 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1701 struct ipr_hostrcb *hostrcb)
1702 {
1703 int errors_logged, i;
1704 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1705 struct ipr_hostrcb_type_23_error *error;
1706 char buffer[IPR_MAX_RES_PATH_LENGTH];
1707
1708 error = &hostrcb->hcam.u.error64.u.type_23_error;
1709 errors_logged = be32_to_cpu(error->errors_logged);
1710
1711 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1712 be32_to_cpu(error->errors_detected), errors_logged);
1713
1714 dev_entry = error->dev;
1715
1716 for (i = 0; i < errors_logged; i++, dev_entry++) {
1717 ipr_err_separator;
1718
1719 ipr_err("Device %d : %s", i + 1,
1720 __ipr_format_res_path(dev_entry->res_path,
1721 buffer, sizeof(buffer)));
1722 ipr_log_ext_vpd(&dev_entry->vpd);
1723
1724 ipr_err("-----New Device Information-----\n");
1725 ipr_log_ext_vpd(&dev_entry->new_vpd);
1726
1727 ipr_err("Cache Directory Card Information:\n");
1728 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1729
1730 ipr_err("Adapter Card Information:\n");
1731 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1732 }
1733 }
1734
1735 /**
1736 * ipr_log_config_error - Log a configuration error.
1737 * @ioa_cfg: ioa config struct
1738 * @hostrcb: hostrcb struct
1739 *
1740 * Return value:
1741 * none
1742 **/
ipr_log_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1743 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1744 struct ipr_hostrcb *hostrcb)
1745 {
1746 int errors_logged, i;
1747 struct ipr_hostrcb_device_data_entry *dev_entry;
1748 struct ipr_hostrcb_type_03_error *error;
1749
1750 error = &hostrcb->hcam.u.error.u.type_03_error;
1751 errors_logged = be32_to_cpu(error->errors_logged);
1752
1753 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1754 be32_to_cpu(error->errors_detected), errors_logged);
1755
1756 dev_entry = error->dev;
1757
1758 for (i = 0; i < errors_logged; i++, dev_entry++) {
1759 ipr_err_separator;
1760
1761 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1762 ipr_log_vpd(&dev_entry->vpd);
1763
1764 ipr_err("-----New Device Information-----\n");
1765 ipr_log_vpd(&dev_entry->new_vpd);
1766
1767 ipr_err("Cache Directory Card Information:\n");
1768 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1769
1770 ipr_err("Adapter Card Information:\n");
1771 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1772
1773 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1774 be32_to_cpu(dev_entry->ioa_data[0]),
1775 be32_to_cpu(dev_entry->ioa_data[1]),
1776 be32_to_cpu(dev_entry->ioa_data[2]),
1777 be32_to_cpu(dev_entry->ioa_data[3]),
1778 be32_to_cpu(dev_entry->ioa_data[4]));
1779 }
1780 }
1781
1782 /**
1783 * ipr_log_enhanced_array_error - Log an array configuration error.
1784 * @ioa_cfg: ioa config struct
1785 * @hostrcb: hostrcb struct
1786 *
1787 * Return value:
1788 * none
1789 **/
ipr_log_enhanced_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1790 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1791 struct ipr_hostrcb *hostrcb)
1792 {
1793 int i, num_entries;
1794 struct ipr_hostrcb_type_14_error *error;
1795 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1796 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1797
1798 error = &hostrcb->hcam.u.error.u.type_14_error;
1799
1800 ipr_err_separator;
1801
1802 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803 error->protection_level,
1804 ioa_cfg->host->host_no,
1805 error->last_func_vset_res_addr.bus,
1806 error->last_func_vset_res_addr.target,
1807 error->last_func_vset_res_addr.lun);
1808
1809 ipr_err_separator;
1810
1811 array_entry = error->array_member;
1812 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1813 ARRAY_SIZE(error->array_member));
1814
1815 for (i = 0; i < num_entries; i++, array_entry++) {
1816 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1817 continue;
1818
1819 if (be32_to_cpu(error->exposed_mode_adn) == i)
1820 ipr_err("Exposed Array Member %d:\n", i);
1821 else
1822 ipr_err("Array Member %d:\n", i);
1823
1824 ipr_log_ext_vpd(&array_entry->vpd);
1825 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1826 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1827 "Expected Location");
1828
1829 ipr_err_separator;
1830 }
1831 }
1832
1833 /**
1834 * ipr_log_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1837 *
1838 * Return value:
1839 * none
1840 **/
ipr_log_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1841 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842 struct ipr_hostrcb *hostrcb)
1843 {
1844 int i;
1845 struct ipr_hostrcb_type_04_error *error;
1846 struct ipr_hostrcb_array_data_entry *array_entry;
1847 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849 error = &hostrcb->hcam.u.error.u.type_04_error;
1850
1851 ipr_err_separator;
1852
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error->protection_level,
1855 ioa_cfg->host->host_no,
1856 error->last_func_vset_res_addr.bus,
1857 error->last_func_vset_res_addr.target,
1858 error->last_func_vset_res_addr.lun);
1859
1860 ipr_err_separator;
1861
1862 array_entry = error->array_member;
1863
1864 for (i = 0; i < 18; i++) {
1865 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1866 continue;
1867
1868 if (be32_to_cpu(error->exposed_mode_adn) == i)
1869 ipr_err("Exposed Array Member %d:\n", i);
1870 else
1871 ipr_err("Array Member %d:\n", i);
1872
1873 ipr_log_vpd(&array_entry->vpd);
1874
1875 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877 "Expected Location");
1878
1879 ipr_err_separator;
1880
1881 if (i == 9)
1882 array_entry = error->array_member2;
1883 else
1884 array_entry++;
1885 }
1886 }
1887
1888 /**
1889 * ipr_log_hex_data - Log additional hex IOA error data.
1890 * @ioa_cfg: ioa config struct
1891 * @data: IOA error data
1892 * @len: data length
1893 *
1894 * Return value:
1895 * none
1896 **/
ipr_log_hex_data(struct ipr_ioa_cfg * ioa_cfg,u32 * data,int len)1897 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1898 {
1899 int i;
1900
1901 if (len == 0)
1902 return;
1903
1904 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1905 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1906
1907 for (i = 0; i < len / 4; i += 4) {
1908 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1909 be32_to_cpu(data[i]),
1910 be32_to_cpu(data[i+1]),
1911 be32_to_cpu(data[i+2]),
1912 be32_to_cpu(data[i+3]));
1913 }
1914 }
1915
1916 /**
1917 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1918 * @ioa_cfg: ioa config struct
1919 * @hostrcb: hostrcb struct
1920 *
1921 * Return value:
1922 * none
1923 **/
ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1924 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1925 struct ipr_hostrcb *hostrcb)
1926 {
1927 struct ipr_hostrcb_type_17_error *error;
1928
1929 if (ioa_cfg->sis64)
1930 error = &hostrcb->hcam.u.error64.u.type_17_error;
1931 else
1932 error = &hostrcb->hcam.u.error.u.type_17_error;
1933
1934 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1935 strim(error->failure_reason);
1936
1937 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1938 be32_to_cpu(hostrcb->hcam.u.error.prc));
1939 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1940 ipr_log_hex_data(ioa_cfg, error->data,
1941 be32_to_cpu(hostrcb->hcam.length) -
1942 (offsetof(struct ipr_hostrcb_error, u) +
1943 offsetof(struct ipr_hostrcb_type_17_error, data)));
1944 }
1945
1946 /**
1947 * ipr_log_dual_ioa_error - Log a dual adapter error.
1948 * @ioa_cfg: ioa config struct
1949 * @hostrcb: hostrcb struct
1950 *
1951 * Return value:
1952 * none
1953 **/
ipr_log_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1954 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1955 struct ipr_hostrcb *hostrcb)
1956 {
1957 struct ipr_hostrcb_type_07_error *error;
1958
1959 error = &hostrcb->hcam.u.error.u.type_07_error;
1960 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1961 strim(error->failure_reason);
1962
1963 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1964 be32_to_cpu(hostrcb->hcam.u.error.prc));
1965 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1966 ipr_log_hex_data(ioa_cfg, error->data,
1967 be32_to_cpu(hostrcb->hcam.length) -
1968 (offsetof(struct ipr_hostrcb_error, u) +
1969 offsetof(struct ipr_hostrcb_type_07_error, data)));
1970 }
1971
1972 static const struct {
1973 u8 active;
1974 char *desc;
1975 } path_active_desc[] = {
1976 { IPR_PATH_NO_INFO, "Path" },
1977 { IPR_PATH_ACTIVE, "Active path" },
1978 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1979 };
1980
1981 static const struct {
1982 u8 state;
1983 char *desc;
1984 } path_state_desc[] = {
1985 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1986 { IPR_PATH_HEALTHY, "is healthy" },
1987 { IPR_PATH_DEGRADED, "is degraded" },
1988 { IPR_PATH_FAILED, "is failed" }
1989 };
1990
1991 /**
1992 * ipr_log_fabric_path - Log a fabric path error
1993 * @hostrcb: hostrcb struct
1994 * @fabric: fabric descriptor
1995 *
1996 * Return value:
1997 * none
1998 **/
ipr_log_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_fabric_desc * fabric)1999 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2000 struct ipr_hostrcb_fabric_desc *fabric)
2001 {
2002 int i, j;
2003 u8 path_state = fabric->path_state;
2004 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2005 u8 state = path_state & IPR_PATH_STATE_MASK;
2006
2007 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2008 if (path_active_desc[i].active != active)
2009 continue;
2010
2011 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2012 if (path_state_desc[j].state != state)
2013 continue;
2014
2015 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2016 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2017 path_active_desc[i].desc, path_state_desc[j].desc,
2018 fabric->ioa_port);
2019 } else if (fabric->cascaded_expander == 0xff) {
2020 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2021 path_active_desc[i].desc, path_state_desc[j].desc,
2022 fabric->ioa_port, fabric->phy);
2023 } else if (fabric->phy == 0xff) {
2024 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2025 path_active_desc[i].desc, path_state_desc[j].desc,
2026 fabric->ioa_port, fabric->cascaded_expander);
2027 } else {
2028 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2029 path_active_desc[i].desc, path_state_desc[j].desc,
2030 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2031 }
2032 return;
2033 }
2034 }
2035
2036 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2037 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2038 }
2039
2040 /**
2041 * ipr_log64_fabric_path - Log a fabric path error
2042 * @hostrcb: hostrcb struct
2043 * @fabric: fabric descriptor
2044 *
2045 * Return value:
2046 * none
2047 **/
ipr_log64_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_fabric_desc * fabric)2048 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2049 struct ipr_hostrcb64_fabric_desc *fabric)
2050 {
2051 int i, j;
2052 u8 path_state = fabric->path_state;
2053 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2054 u8 state = path_state & IPR_PATH_STATE_MASK;
2055 char buffer[IPR_MAX_RES_PATH_LENGTH];
2056
2057 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058 if (path_active_desc[i].active != active)
2059 continue;
2060
2061 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062 if (path_state_desc[j].state != state)
2063 continue;
2064
2065 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2066 path_active_desc[i].desc, path_state_desc[j].desc,
2067 ipr_format_res_path(hostrcb->ioa_cfg,
2068 fabric->res_path,
2069 buffer, sizeof(buffer)));
2070 return;
2071 }
2072 }
2073
2074 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2075 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2076 buffer, sizeof(buffer)));
2077 }
2078
2079 static const struct {
2080 u8 type;
2081 char *desc;
2082 } path_type_desc[] = {
2083 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2084 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2085 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2086 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2087 };
2088
2089 static const struct {
2090 u8 status;
2091 char *desc;
2092 } path_status_desc[] = {
2093 { IPR_PATH_CFG_NO_PROB, "Functional" },
2094 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2095 { IPR_PATH_CFG_FAILED, "Failed" },
2096 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2097 { IPR_PATH_NOT_DETECTED, "Missing" },
2098 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2099 };
2100
2101 static const char *link_rate[] = {
2102 "unknown",
2103 "disabled",
2104 "phy reset problem",
2105 "spinup hold",
2106 "port selector",
2107 "unknown",
2108 "unknown",
2109 "unknown",
2110 "1.5Gbps",
2111 "3.0Gbps",
2112 "unknown",
2113 "unknown",
2114 "unknown",
2115 "unknown",
2116 "unknown",
2117 "unknown"
2118 };
2119
2120 /**
2121 * ipr_log_path_elem - Log a fabric path element.
2122 * @hostrcb: hostrcb struct
2123 * @cfg: fabric path element struct
2124 *
2125 * Return value:
2126 * none
2127 **/
ipr_log_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_config_element * cfg)2128 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2129 struct ipr_hostrcb_config_element *cfg)
2130 {
2131 int i, j;
2132 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2133 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2134
2135 if (type == IPR_PATH_CFG_NOT_EXIST)
2136 return;
2137
2138 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2139 if (path_type_desc[i].type != type)
2140 continue;
2141
2142 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2143 if (path_status_desc[j].status != status)
2144 continue;
2145
2146 if (type == IPR_PATH_CFG_IOA_PORT) {
2147 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2148 path_status_desc[j].desc, path_type_desc[i].desc,
2149 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2150 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2151 } else {
2152 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2153 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2154 path_status_desc[j].desc, path_type_desc[i].desc,
2155 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2156 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2157 } else if (cfg->cascaded_expander == 0xff) {
2158 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2159 "WWN=%08X%08X\n", path_status_desc[j].desc,
2160 path_type_desc[i].desc, cfg->phy,
2161 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2162 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2163 } else if (cfg->phy == 0xff) {
2164 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2165 "WWN=%08X%08X\n", path_status_desc[j].desc,
2166 path_type_desc[i].desc, cfg->cascaded_expander,
2167 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2168 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2169 } else {
2170 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2171 "WWN=%08X%08X\n", path_status_desc[j].desc,
2172 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2173 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2174 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2175 }
2176 }
2177 return;
2178 }
2179 }
2180
2181 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2182 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2183 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2184 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2185 }
2186
2187 /**
2188 * ipr_log64_path_elem - Log a fabric path element.
2189 * @hostrcb: hostrcb struct
2190 * @cfg: fabric path element struct
2191 *
2192 * Return value:
2193 * none
2194 **/
ipr_log64_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_config_element * cfg)2195 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2196 struct ipr_hostrcb64_config_element *cfg)
2197 {
2198 int i, j;
2199 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2200 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2201 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2202 char buffer[IPR_MAX_RES_PATH_LENGTH];
2203
2204 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2205 return;
2206
2207 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2208 if (path_type_desc[i].type != type)
2209 continue;
2210
2211 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2212 if (path_status_desc[j].status != status)
2213 continue;
2214
2215 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2216 path_status_desc[j].desc, path_type_desc[i].desc,
2217 ipr_format_res_path(hostrcb->ioa_cfg,
2218 cfg->res_path, buffer, sizeof(buffer)),
2219 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220 be32_to_cpu(cfg->wwid[0]),
2221 be32_to_cpu(cfg->wwid[1]));
2222 return;
2223 }
2224 }
2225 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2226 "WWN=%08X%08X\n", cfg->type_status,
2227 ipr_format_res_path(hostrcb->ioa_cfg,
2228 cfg->res_path, buffer, sizeof(buffer)),
2229 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2230 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2231 }
2232
2233 /**
2234 * ipr_log_fabric_error - Log a fabric error.
2235 * @ioa_cfg: ioa config struct
2236 * @hostrcb: hostrcb struct
2237 *
2238 * Return value:
2239 * none
2240 **/
ipr_log_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2241 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2242 struct ipr_hostrcb *hostrcb)
2243 {
2244 struct ipr_hostrcb_type_20_error *error;
2245 struct ipr_hostrcb_fabric_desc *fabric;
2246 struct ipr_hostrcb_config_element *cfg;
2247 int i, add_len;
2248
2249 error = &hostrcb->hcam.u.error.u.type_20_error;
2250 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2251 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2252
2253 add_len = be32_to_cpu(hostrcb->hcam.length) -
2254 (offsetof(struct ipr_hostrcb_error, u) +
2255 offsetof(struct ipr_hostrcb_type_20_error, desc));
2256
2257 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2258 ipr_log_fabric_path(hostrcb, fabric);
2259 for_each_fabric_cfg(fabric, cfg)
2260 ipr_log_path_elem(hostrcb, cfg);
2261
2262 add_len -= be16_to_cpu(fabric->length);
2263 fabric = (struct ipr_hostrcb_fabric_desc *)
2264 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2265 }
2266
2267 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2268 }
2269
2270 /**
2271 * ipr_log_sis64_array_error - Log a sis64 array error.
2272 * @ioa_cfg: ioa config struct
2273 * @hostrcb: hostrcb struct
2274 *
2275 * Return value:
2276 * none
2277 **/
ipr_log_sis64_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2278 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2279 struct ipr_hostrcb *hostrcb)
2280 {
2281 int i, num_entries;
2282 struct ipr_hostrcb_type_24_error *error;
2283 struct ipr_hostrcb64_array_data_entry *array_entry;
2284 char buffer[IPR_MAX_RES_PATH_LENGTH];
2285 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2286
2287 error = &hostrcb->hcam.u.error64.u.type_24_error;
2288
2289 ipr_err_separator;
2290
2291 ipr_err("RAID %s Array Configuration: %s\n",
2292 error->protection_level,
2293 ipr_format_res_path(ioa_cfg, error->last_res_path,
2294 buffer, sizeof(buffer)));
2295
2296 ipr_err_separator;
2297
2298 array_entry = error->array_member;
2299 num_entries = min_t(u32, error->num_entries,
2300 ARRAY_SIZE(error->array_member));
2301
2302 for (i = 0; i < num_entries; i++, array_entry++) {
2303
2304 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2305 continue;
2306
2307 if (error->exposed_mode_adn == i)
2308 ipr_err("Exposed Array Member %d:\n", i);
2309 else
2310 ipr_err("Array Member %d:\n", i);
2311
2312 ipr_err("Array Member %d:\n", i);
2313 ipr_log_ext_vpd(&array_entry->vpd);
2314 ipr_err("Current Location: %s\n",
2315 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2316 buffer, sizeof(buffer)));
2317 ipr_err("Expected Location: %s\n",
2318 ipr_format_res_path(ioa_cfg,
2319 array_entry->expected_res_path,
2320 buffer, sizeof(buffer)));
2321
2322 ipr_err_separator;
2323 }
2324 }
2325
2326 /**
2327 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2328 * @ioa_cfg: ioa config struct
2329 * @hostrcb: hostrcb struct
2330 *
2331 * Return value:
2332 * none
2333 **/
ipr_log_sis64_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2334 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2335 struct ipr_hostrcb *hostrcb)
2336 {
2337 struct ipr_hostrcb_type_30_error *error;
2338 struct ipr_hostrcb64_fabric_desc *fabric;
2339 struct ipr_hostrcb64_config_element *cfg;
2340 int i, add_len;
2341
2342 error = &hostrcb->hcam.u.error64.u.type_30_error;
2343
2344 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2345 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2346
2347 add_len = be32_to_cpu(hostrcb->hcam.length) -
2348 (offsetof(struct ipr_hostrcb64_error, u) +
2349 offsetof(struct ipr_hostrcb_type_30_error, desc));
2350
2351 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2352 ipr_log64_fabric_path(hostrcb, fabric);
2353 for_each_fabric_cfg(fabric, cfg)
2354 ipr_log64_path_elem(hostrcb, cfg);
2355
2356 add_len -= be16_to_cpu(fabric->length);
2357 fabric = (struct ipr_hostrcb64_fabric_desc *)
2358 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2359 }
2360
2361 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2362 }
2363
2364 /**
2365 * ipr_log_generic_error - Log an adapter error.
2366 * @ioa_cfg: ioa config struct
2367 * @hostrcb: hostrcb struct
2368 *
2369 * Return value:
2370 * none
2371 **/
ipr_log_generic_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2372 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2373 struct ipr_hostrcb *hostrcb)
2374 {
2375 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2376 be32_to_cpu(hostrcb->hcam.length));
2377 }
2378
2379 /**
2380 * ipr_log_sis64_device_error - Log a cache error.
2381 * @ioa_cfg: ioa config struct
2382 * @hostrcb: hostrcb struct
2383 *
2384 * Return value:
2385 * none
2386 **/
ipr_log_sis64_device_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2387 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2388 struct ipr_hostrcb *hostrcb)
2389 {
2390 struct ipr_hostrcb_type_21_error *error;
2391 char buffer[IPR_MAX_RES_PATH_LENGTH];
2392
2393 error = &hostrcb->hcam.u.error64.u.type_21_error;
2394
2395 ipr_err("-----Failing Device Information-----\n");
2396 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2397 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2398 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2399 ipr_err("Device Resource Path: %s\n",
2400 __ipr_format_res_path(error->res_path,
2401 buffer, sizeof(buffer)));
2402 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2403 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2404 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2405 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2406 ipr_err("SCSI Sense Data:\n");
2407 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2408 ipr_err("SCSI Command Descriptor Block: \n");
2409 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2410
2411 ipr_err("Additional IOA Data:\n");
2412 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2413 }
2414
2415 /**
2416 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2417 * @ioasc: IOASC
2418 *
2419 * This function will return the index of into the ipr_error_table
2420 * for the specified IOASC. If the IOASC is not in the table,
2421 * 0 will be returned, which points to the entry used for unknown errors.
2422 *
2423 * Return value:
2424 * index into the ipr_error_table
2425 **/
ipr_get_error(u32 ioasc)2426 static u32 ipr_get_error(u32 ioasc)
2427 {
2428 int i;
2429
2430 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2431 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2432 return i;
2433
2434 return 0;
2435 }
2436
2437 /**
2438 * ipr_handle_log_data - Log an adapter error.
2439 * @ioa_cfg: ioa config struct
2440 * @hostrcb: hostrcb struct
2441 *
2442 * This function logs an adapter error to the system.
2443 *
2444 * Return value:
2445 * none
2446 **/
ipr_handle_log_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2447 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2448 struct ipr_hostrcb *hostrcb)
2449 {
2450 u32 ioasc;
2451 int error_index;
2452 struct ipr_hostrcb_type_21_error *error;
2453
2454 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2455 return;
2456
2457 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2458 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2459
2460 if (ioa_cfg->sis64)
2461 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2462 else
2463 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2464
2465 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2466 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2467 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2468 scsi_report_bus_reset(ioa_cfg->host,
2469 hostrcb->hcam.u.error.fd_res_addr.bus);
2470 }
2471
2472 error_index = ipr_get_error(ioasc);
2473
2474 if (!ipr_error_table[error_index].log_hcam)
2475 return;
2476
2477 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2478 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2479 error = &hostrcb->hcam.u.error64.u.type_21_error;
2480
2481 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2482 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2483 return;
2484 }
2485
2486 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2487
2488 /* Set indication we have logged an error */
2489 ioa_cfg->errors_logged++;
2490
2491 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2492 return;
2493 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2494 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2495
2496 switch (hostrcb->hcam.overlay_id) {
2497 case IPR_HOST_RCB_OVERLAY_ID_2:
2498 ipr_log_cache_error(ioa_cfg, hostrcb);
2499 break;
2500 case IPR_HOST_RCB_OVERLAY_ID_3:
2501 ipr_log_config_error(ioa_cfg, hostrcb);
2502 break;
2503 case IPR_HOST_RCB_OVERLAY_ID_4:
2504 case IPR_HOST_RCB_OVERLAY_ID_6:
2505 ipr_log_array_error(ioa_cfg, hostrcb);
2506 break;
2507 case IPR_HOST_RCB_OVERLAY_ID_7:
2508 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2509 break;
2510 case IPR_HOST_RCB_OVERLAY_ID_12:
2511 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2512 break;
2513 case IPR_HOST_RCB_OVERLAY_ID_13:
2514 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2515 break;
2516 case IPR_HOST_RCB_OVERLAY_ID_14:
2517 case IPR_HOST_RCB_OVERLAY_ID_16:
2518 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2519 break;
2520 case IPR_HOST_RCB_OVERLAY_ID_17:
2521 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2522 break;
2523 case IPR_HOST_RCB_OVERLAY_ID_20:
2524 ipr_log_fabric_error(ioa_cfg, hostrcb);
2525 break;
2526 case IPR_HOST_RCB_OVERLAY_ID_21:
2527 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2528 break;
2529 case IPR_HOST_RCB_OVERLAY_ID_23:
2530 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2531 break;
2532 case IPR_HOST_RCB_OVERLAY_ID_24:
2533 case IPR_HOST_RCB_OVERLAY_ID_26:
2534 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2535 break;
2536 case IPR_HOST_RCB_OVERLAY_ID_30:
2537 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2538 break;
2539 case IPR_HOST_RCB_OVERLAY_ID_1:
2540 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2541 default:
2542 ipr_log_generic_error(ioa_cfg, hostrcb);
2543 break;
2544 }
2545 }
2546
2547 /**
2548 * ipr_process_error - Op done function for an adapter error log.
2549 * @ipr_cmd: ipr command struct
2550 *
2551 * This function is the op done function for an error log host
2552 * controlled async from the adapter. It will log the error and
2553 * send the HCAM back to the adapter.
2554 *
2555 * Return value:
2556 * none
2557 **/
ipr_process_error(struct ipr_cmnd * ipr_cmd)2558 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2559 {
2560 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2561 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2562 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2563 u32 fd_ioasc;
2564
2565 if (ioa_cfg->sis64)
2566 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2567 else
2568 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2569
2570 list_del(&hostrcb->queue);
2571 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2572
2573 if (!ioasc) {
2574 ipr_handle_log_data(ioa_cfg, hostrcb);
2575 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2576 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2577 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2578 dev_err(&ioa_cfg->pdev->dev,
2579 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2580 }
2581
2582 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2583 }
2584
2585 /**
2586 * ipr_timeout - An internally generated op has timed out.
2587 * @ipr_cmd: ipr command struct
2588 *
2589 * This function blocks host requests and initiates an
2590 * adapter reset.
2591 *
2592 * Return value:
2593 * none
2594 **/
ipr_timeout(struct ipr_cmnd * ipr_cmd)2595 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2596 {
2597 unsigned long lock_flags = 0;
2598 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2599
2600 ENTER;
2601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2602
2603 ioa_cfg->errors_logged++;
2604 dev_err(&ioa_cfg->pdev->dev,
2605 "Adapter being reset due to command timeout.\n");
2606
2607 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2608 ioa_cfg->sdt_state = GET_DUMP;
2609
2610 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2611 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2612
2613 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2614 LEAVE;
2615 }
2616
2617 /**
2618 * ipr_oper_timeout - Adapter timed out transitioning to operational
2619 * @ipr_cmd: ipr command struct
2620 *
2621 * This function blocks host requests and initiates an
2622 * adapter reset.
2623 *
2624 * Return value:
2625 * none
2626 **/
ipr_oper_timeout(struct ipr_cmnd * ipr_cmd)2627 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2628 {
2629 unsigned long lock_flags = 0;
2630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2631
2632 ENTER;
2633 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2634
2635 ioa_cfg->errors_logged++;
2636 dev_err(&ioa_cfg->pdev->dev,
2637 "Adapter timed out transitioning to operational.\n");
2638
2639 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2640 ioa_cfg->sdt_state = GET_DUMP;
2641
2642 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2643 if (ipr_fastfail)
2644 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2645 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2646 }
2647
2648 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2649 LEAVE;
2650 }
2651
2652 /**
2653 * ipr_find_ses_entry - Find matching SES in SES table
2654 * @res: resource entry struct of SES
2655 *
2656 * Return value:
2657 * pointer to SES table entry / NULL on failure
2658 **/
2659 static const struct ipr_ses_table_entry *
ipr_find_ses_entry(struct ipr_resource_entry * res)2660 ipr_find_ses_entry(struct ipr_resource_entry *res)
2661 {
2662 int i, j, matches;
2663 struct ipr_std_inq_vpids *vpids;
2664 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2665
2666 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2667 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2668 if (ste->compare_product_id_byte[j] == 'X') {
2669 vpids = &res->std_inq_data.vpids;
2670 if (vpids->product_id[j] == ste->product_id[j])
2671 matches++;
2672 else
2673 break;
2674 } else
2675 matches++;
2676 }
2677
2678 if (matches == IPR_PROD_ID_LEN)
2679 return ste;
2680 }
2681
2682 return NULL;
2683 }
2684
2685 /**
2686 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2687 * @ioa_cfg: ioa config struct
2688 * @bus: SCSI bus
2689 * @bus_width: bus width
2690 *
2691 * Return value:
2692 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2693 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2694 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2695 * max 160MHz = max 320MB/sec).
2696 **/
ipr_get_max_scsi_speed(struct ipr_ioa_cfg * ioa_cfg,u8 bus,u8 bus_width)2697 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2698 {
2699 struct ipr_resource_entry *res;
2700 const struct ipr_ses_table_entry *ste;
2701 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2702
2703 /* Loop through each config table entry in the config table buffer */
2704 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2705 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2706 continue;
2707
2708 if (bus != res->bus)
2709 continue;
2710
2711 if (!(ste = ipr_find_ses_entry(res)))
2712 continue;
2713
2714 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2715 }
2716
2717 return max_xfer_rate;
2718 }
2719
2720 /**
2721 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2722 * @ioa_cfg: ioa config struct
2723 * @max_delay: max delay in micro-seconds to wait
2724 *
2725 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2726 *
2727 * Return value:
2728 * 0 on success / other on failure
2729 **/
ipr_wait_iodbg_ack(struct ipr_ioa_cfg * ioa_cfg,int max_delay)2730 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2731 {
2732 volatile u32 pcii_reg;
2733 int delay = 1;
2734
2735 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2736 while (delay < max_delay) {
2737 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2738
2739 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2740 return 0;
2741
2742 /* udelay cannot be used if delay is more than a few milliseconds */
2743 if ((delay / 1000) > MAX_UDELAY_MS)
2744 mdelay(delay / 1000);
2745 else
2746 udelay(delay);
2747
2748 delay += delay;
2749 }
2750 return -EIO;
2751 }
2752
2753 /**
2754 * ipr_get_sis64_dump_data_section - Dump IOA memory
2755 * @ioa_cfg: ioa config struct
2756 * @start_addr: adapter address to dump
2757 * @dest: destination kernel buffer
2758 * @length_in_words: length to dump in 4 byte words
2759 *
2760 * Return value:
2761 * 0 on success
2762 **/
ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2763 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2764 u32 start_addr,
2765 __be32 *dest, u32 length_in_words)
2766 {
2767 int i;
2768
2769 for (i = 0; i < length_in_words; i++) {
2770 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2771 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2772 dest++;
2773 }
2774
2775 return 0;
2776 }
2777
2778 /**
2779 * ipr_get_ldump_data_section - Dump IOA memory
2780 * @ioa_cfg: ioa config struct
2781 * @start_addr: adapter address to dump
2782 * @dest: destination kernel buffer
2783 * @length_in_words: length to dump in 4 byte words
2784 *
2785 * Return value:
2786 * 0 on success / -EIO on failure
2787 **/
ipr_get_ldump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2788 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2789 u32 start_addr,
2790 __be32 *dest, u32 length_in_words)
2791 {
2792 volatile u32 temp_pcii_reg;
2793 int i, delay = 0;
2794
2795 if (ioa_cfg->sis64)
2796 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2797 dest, length_in_words);
2798
2799 /* Write IOA interrupt reg starting LDUMP state */
2800 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2801 ioa_cfg->regs.set_uproc_interrupt_reg32);
2802
2803 /* Wait for IO debug acknowledge */
2804 if (ipr_wait_iodbg_ack(ioa_cfg,
2805 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2806 dev_err(&ioa_cfg->pdev->dev,
2807 "IOA dump long data transfer timeout\n");
2808 return -EIO;
2809 }
2810
2811 /* Signal LDUMP interlocked - clear IO debug ack */
2812 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2813 ioa_cfg->regs.clr_interrupt_reg);
2814
2815 /* Write Mailbox with starting address */
2816 writel(start_addr, ioa_cfg->ioa_mailbox);
2817
2818 /* Signal address valid - clear IOA Reset alert */
2819 writel(IPR_UPROCI_RESET_ALERT,
2820 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2821
2822 for (i = 0; i < length_in_words; i++) {
2823 /* Wait for IO debug acknowledge */
2824 if (ipr_wait_iodbg_ack(ioa_cfg,
2825 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2826 dev_err(&ioa_cfg->pdev->dev,
2827 "IOA dump short data transfer timeout\n");
2828 return -EIO;
2829 }
2830
2831 /* Read data from mailbox and increment destination pointer */
2832 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2833 dest++;
2834
2835 /* For all but the last word of data, signal data received */
2836 if (i < (length_in_words - 1)) {
2837 /* Signal dump data received - Clear IO debug Ack */
2838 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2839 ioa_cfg->regs.clr_interrupt_reg);
2840 }
2841 }
2842
2843 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2844 writel(IPR_UPROCI_RESET_ALERT,
2845 ioa_cfg->regs.set_uproc_interrupt_reg32);
2846
2847 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2848 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2849
2850 /* Signal dump data received - Clear IO debug Ack */
2851 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2852 ioa_cfg->regs.clr_interrupt_reg);
2853
2854 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2855 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2856 temp_pcii_reg =
2857 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2858
2859 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2860 return 0;
2861
2862 udelay(10);
2863 delay += 10;
2864 }
2865
2866 return 0;
2867 }
2868
2869 #ifdef CONFIG_SCSI_IPR_DUMP
2870 /**
2871 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2872 * @ioa_cfg: ioa config struct
2873 * @pci_address: adapter address
2874 * @length: length of data to copy
2875 *
2876 * Copy data from PCI adapter to kernel buffer.
2877 * Note: length MUST be a 4 byte multiple
2878 * Return value:
2879 * 0 on success / other on failure
2880 **/
ipr_sdt_copy(struct ipr_ioa_cfg * ioa_cfg,unsigned long pci_address,u32 length)2881 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2882 unsigned long pci_address, u32 length)
2883 {
2884 int bytes_copied = 0;
2885 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2886 __be32 *page;
2887 unsigned long lock_flags = 0;
2888 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2889
2890 if (ioa_cfg->sis64)
2891 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2892 else
2893 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2894
2895 while (bytes_copied < length &&
2896 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2897 if (ioa_dump->page_offset >= PAGE_SIZE ||
2898 ioa_dump->page_offset == 0) {
2899 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2900
2901 if (!page) {
2902 ipr_trace;
2903 return bytes_copied;
2904 }
2905
2906 ioa_dump->page_offset = 0;
2907 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2908 ioa_dump->next_page_index++;
2909 } else
2910 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2911
2912 rem_len = length - bytes_copied;
2913 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2914 cur_len = min(rem_len, rem_page_len);
2915
2916 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2917 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2918 rc = -EIO;
2919 } else {
2920 rc = ipr_get_ldump_data_section(ioa_cfg,
2921 pci_address + bytes_copied,
2922 &page[ioa_dump->page_offset / 4],
2923 (cur_len / sizeof(u32)));
2924 }
2925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2926
2927 if (!rc) {
2928 ioa_dump->page_offset += cur_len;
2929 bytes_copied += cur_len;
2930 } else {
2931 ipr_trace;
2932 break;
2933 }
2934 schedule();
2935 }
2936
2937 return bytes_copied;
2938 }
2939
2940 /**
2941 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2942 * @hdr: dump entry header struct
2943 *
2944 * Return value:
2945 * nothing
2946 **/
ipr_init_dump_entry_hdr(struct ipr_dump_entry_header * hdr)2947 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2948 {
2949 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2950 hdr->num_elems = 1;
2951 hdr->offset = sizeof(*hdr);
2952 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2953 }
2954
2955 /**
2956 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2957 * @ioa_cfg: ioa config struct
2958 * @driver_dump: driver dump struct
2959 *
2960 * Return value:
2961 * nothing
2962 **/
ipr_dump_ioa_type_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2963 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2964 struct ipr_driver_dump *driver_dump)
2965 {
2966 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2967
2968 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2969 driver_dump->ioa_type_entry.hdr.len =
2970 sizeof(struct ipr_dump_ioa_type_entry) -
2971 sizeof(struct ipr_dump_entry_header);
2972 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2973 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2974 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2975 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2976 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2977 ucode_vpd->minor_release[1];
2978 driver_dump->hdr.num_entries++;
2979 }
2980
2981 /**
2982 * ipr_dump_version_data - Fill in the driver version in the dump.
2983 * @ioa_cfg: ioa config struct
2984 * @driver_dump: driver dump struct
2985 *
2986 * Return value:
2987 * nothing
2988 **/
ipr_dump_version_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2989 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2990 struct ipr_driver_dump *driver_dump)
2991 {
2992 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2993 driver_dump->version_entry.hdr.len =
2994 sizeof(struct ipr_dump_version_entry) -
2995 sizeof(struct ipr_dump_entry_header);
2996 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2997 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2998 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2999 driver_dump->hdr.num_entries++;
3000 }
3001
3002 /**
3003 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3004 * @ioa_cfg: ioa config struct
3005 * @driver_dump: driver dump struct
3006 *
3007 * Return value:
3008 * nothing
3009 **/
ipr_dump_trace_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)3010 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3011 struct ipr_driver_dump *driver_dump)
3012 {
3013 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3014 driver_dump->trace_entry.hdr.len =
3015 sizeof(struct ipr_dump_trace_entry) -
3016 sizeof(struct ipr_dump_entry_header);
3017 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3018 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3019 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3020 driver_dump->hdr.num_entries++;
3021 }
3022
3023 /**
3024 * ipr_dump_location_data - Fill in the IOA location in the dump.
3025 * @ioa_cfg: ioa config struct
3026 * @driver_dump: driver dump struct
3027 *
3028 * Return value:
3029 * nothing
3030 **/
ipr_dump_location_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)3031 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3032 struct ipr_driver_dump *driver_dump)
3033 {
3034 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3035 driver_dump->location_entry.hdr.len =
3036 sizeof(struct ipr_dump_location_entry) -
3037 sizeof(struct ipr_dump_entry_header);
3038 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3039 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3040 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3041 driver_dump->hdr.num_entries++;
3042 }
3043
3044 /**
3045 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3046 * @ioa_cfg: ioa config struct
3047 * @dump: dump struct
3048 *
3049 * Return value:
3050 * nothing
3051 **/
ipr_get_ioa_dump(struct ipr_ioa_cfg * ioa_cfg,struct ipr_dump * dump)3052 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3053 {
3054 unsigned long start_addr, sdt_word;
3055 unsigned long lock_flags = 0;
3056 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3057 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3058 u32 num_entries, max_num_entries, start_off, end_off;
3059 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3060 struct ipr_sdt *sdt;
3061 int valid = 1;
3062 int i;
3063
3064 ENTER;
3065
3066 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3067
3068 if (ioa_cfg->sdt_state != READ_DUMP) {
3069 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3070 return;
3071 }
3072
3073 if (ioa_cfg->sis64) {
3074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3075 ssleep(IPR_DUMP_DELAY_SECONDS);
3076 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3077 }
3078
3079 start_addr = readl(ioa_cfg->ioa_mailbox);
3080
3081 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3082 dev_err(&ioa_cfg->pdev->dev,
3083 "Invalid dump table format: %lx\n", start_addr);
3084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085 return;
3086 }
3087
3088 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3089
3090 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3091
3092 /* Initialize the overall dump header */
3093 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3094 driver_dump->hdr.num_entries = 1;
3095 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3096 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3097 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3098 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3099
3100 ipr_dump_version_data(ioa_cfg, driver_dump);
3101 ipr_dump_location_data(ioa_cfg, driver_dump);
3102 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3103 ipr_dump_trace_data(ioa_cfg, driver_dump);
3104
3105 /* Update dump_header */
3106 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3107
3108 /* IOA Dump entry */
3109 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3110 ioa_dump->hdr.len = 0;
3111 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3112 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3113
3114 /* First entries in sdt are actually a list of dump addresses and
3115 lengths to gather the real dump data. sdt represents the pointer
3116 to the ioa generated dump table. Dump data will be extracted based
3117 on entries in this table */
3118 sdt = &ioa_dump->sdt;
3119
3120 if (ioa_cfg->sis64) {
3121 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3122 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3123 } else {
3124 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3125 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3126 }
3127
3128 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3129 (max_num_entries * sizeof(struct ipr_sdt_entry));
3130 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3131 bytes_to_copy / sizeof(__be32));
3132
3133 /* Smart Dump table is ready to use and the first entry is valid */
3134 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3135 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3136 dev_err(&ioa_cfg->pdev->dev,
3137 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3138 rc, be32_to_cpu(sdt->hdr.state));
3139 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3140 ioa_cfg->sdt_state = DUMP_OBTAINED;
3141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3142 return;
3143 }
3144
3145 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3146
3147 if (num_entries > max_num_entries)
3148 num_entries = max_num_entries;
3149
3150 /* Update dump length to the actual data to be copied */
3151 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3152 if (ioa_cfg->sis64)
3153 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3154 else
3155 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3156
3157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158
3159 for (i = 0; i < num_entries; i++) {
3160 if (ioa_dump->hdr.len > max_dump_size) {
3161 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3162 break;
3163 }
3164
3165 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3166 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3167 if (ioa_cfg->sis64)
3168 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3169 else {
3170 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3171 end_off = be32_to_cpu(sdt->entry[i].end_token);
3172
3173 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3174 bytes_to_copy = end_off - start_off;
3175 else
3176 valid = 0;
3177 }
3178 if (valid) {
3179 if (bytes_to_copy > max_dump_size) {
3180 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3181 continue;
3182 }
3183
3184 /* Copy data from adapter to driver buffers */
3185 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3186 bytes_to_copy);
3187
3188 ioa_dump->hdr.len += bytes_copied;
3189
3190 if (bytes_copied != bytes_to_copy) {
3191 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3192 break;
3193 }
3194 }
3195 }
3196 }
3197
3198 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3199
3200 /* Update dump_header */
3201 driver_dump->hdr.len += ioa_dump->hdr.len;
3202 wmb();
3203 ioa_cfg->sdt_state = DUMP_OBTAINED;
3204 LEAVE;
3205 }
3206
3207 #else
3208 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3209 #endif
3210
3211 /**
3212 * ipr_release_dump - Free adapter dump memory
3213 * @kref: kref struct
3214 *
3215 * Return value:
3216 * nothing
3217 **/
ipr_release_dump(struct kref * kref)3218 static void ipr_release_dump(struct kref *kref)
3219 {
3220 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3221 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3222 unsigned long lock_flags = 0;
3223 int i;
3224
3225 ENTER;
3226 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3227 ioa_cfg->dump = NULL;
3228 ioa_cfg->sdt_state = INACTIVE;
3229 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3230
3231 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3232 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3233
3234 vfree(dump->ioa_dump.ioa_data);
3235 kfree(dump);
3236 LEAVE;
3237 }
3238
3239 /**
3240 * ipr_worker_thread - Worker thread
3241 * @work: ioa config struct
3242 *
3243 * Called at task level from a work thread. This function takes care
3244 * of adding and removing device from the mid-layer as configuration
3245 * changes are detected by the adapter.
3246 *
3247 * Return value:
3248 * nothing
3249 **/
ipr_worker_thread(struct work_struct * work)3250 static void ipr_worker_thread(struct work_struct *work)
3251 {
3252 unsigned long lock_flags;
3253 struct ipr_resource_entry *res;
3254 struct scsi_device *sdev;
3255 struct ipr_dump *dump;
3256 struct ipr_ioa_cfg *ioa_cfg =
3257 container_of(work, struct ipr_ioa_cfg, work_q);
3258 u8 bus, target, lun;
3259 int did_work;
3260
3261 ENTER;
3262 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3263
3264 if (ioa_cfg->sdt_state == READ_DUMP) {
3265 dump = ioa_cfg->dump;
3266 if (!dump) {
3267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3268 return;
3269 }
3270 kref_get(&dump->kref);
3271 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3272 ipr_get_ioa_dump(ioa_cfg, dump);
3273 kref_put(&dump->kref, ipr_release_dump);
3274
3275 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3276 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3277 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 return;
3280 }
3281
3282 restart:
3283 do {
3284 did_work = 0;
3285 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
3286 !ioa_cfg->allow_ml_add_del) {
3287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3288 return;
3289 }
3290
3291 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3292 if (res->del_from_ml && res->sdev) {
3293 did_work = 1;
3294 sdev = res->sdev;
3295 if (!scsi_device_get(sdev)) {
3296 if (!res->add_to_ml)
3297 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3298 else
3299 res->del_from_ml = 0;
3300 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3301 scsi_remove_device(sdev);
3302 scsi_device_put(sdev);
3303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3304 }
3305 break;
3306 }
3307 }
3308 } while (did_work);
3309
3310 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3311 if (res->add_to_ml) {
3312 bus = res->bus;
3313 target = res->target;
3314 lun = res->lun;
3315 res->add_to_ml = 0;
3316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3317 scsi_add_device(ioa_cfg->host, bus, target, lun);
3318 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3319 goto restart;
3320 }
3321 }
3322
3323 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3325 LEAVE;
3326 }
3327
3328 #ifdef CONFIG_SCSI_IPR_TRACE
3329 /**
3330 * ipr_read_trace - Dump the adapter trace
3331 * @filp: open sysfs file
3332 * @kobj: kobject struct
3333 * @bin_attr: bin_attribute struct
3334 * @buf: buffer
3335 * @off: offset
3336 * @count: buffer size
3337 *
3338 * Return value:
3339 * number of bytes printed to buffer
3340 **/
ipr_read_trace(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3341 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3342 struct bin_attribute *bin_attr,
3343 char *buf, loff_t off, size_t count)
3344 {
3345 struct device *dev = container_of(kobj, struct device, kobj);
3346 struct Scsi_Host *shost = class_to_shost(dev);
3347 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3348 unsigned long lock_flags = 0;
3349 ssize_t ret;
3350
3351 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3352 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3353 IPR_TRACE_SIZE);
3354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3355
3356 return ret;
3357 }
3358
3359 static struct bin_attribute ipr_trace_attr = {
3360 .attr = {
3361 .name = "trace",
3362 .mode = S_IRUGO,
3363 },
3364 .size = 0,
3365 .read = ipr_read_trace,
3366 };
3367 #endif
3368
3369 /**
3370 * ipr_show_fw_version - Show the firmware version
3371 * @dev: class device struct
3372 * @buf: buffer
3373 *
3374 * Return value:
3375 * number of bytes printed to buffer
3376 **/
ipr_show_fw_version(struct device * dev,struct device_attribute * attr,char * buf)3377 static ssize_t ipr_show_fw_version(struct device *dev,
3378 struct device_attribute *attr, char *buf)
3379 {
3380 struct Scsi_Host *shost = class_to_shost(dev);
3381 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3382 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3383 unsigned long lock_flags = 0;
3384 int len;
3385
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3388 ucode_vpd->major_release, ucode_vpd->card_type,
3389 ucode_vpd->minor_release[0],
3390 ucode_vpd->minor_release[1]);
3391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3392 return len;
3393 }
3394
3395 static struct device_attribute ipr_fw_version_attr = {
3396 .attr = {
3397 .name = "fw_version",
3398 .mode = S_IRUGO,
3399 },
3400 .show = ipr_show_fw_version,
3401 };
3402
3403 /**
3404 * ipr_show_log_level - Show the adapter's error logging level
3405 * @dev: class device struct
3406 * @buf: buffer
3407 *
3408 * Return value:
3409 * number of bytes printed to buffer
3410 **/
ipr_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3411 static ssize_t ipr_show_log_level(struct device *dev,
3412 struct device_attribute *attr, char *buf)
3413 {
3414 struct Scsi_Host *shost = class_to_shost(dev);
3415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3416 unsigned long lock_flags = 0;
3417 int len;
3418
3419 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3420 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 return len;
3423 }
3424
3425 /**
3426 * ipr_store_log_level - Change the adapter's error logging level
3427 * @dev: class device struct
3428 * @buf: buffer
3429 *
3430 * Return value:
3431 * number of bytes printed to buffer
3432 **/
ipr_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3433 static ssize_t ipr_store_log_level(struct device *dev,
3434 struct device_attribute *attr,
3435 const char *buf, size_t count)
3436 {
3437 struct Scsi_Host *shost = class_to_shost(dev);
3438 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3439 unsigned long lock_flags = 0;
3440
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3443 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3444 return strlen(buf);
3445 }
3446
3447 static struct device_attribute ipr_log_level_attr = {
3448 .attr = {
3449 .name = "log_level",
3450 .mode = S_IRUGO | S_IWUSR,
3451 },
3452 .show = ipr_show_log_level,
3453 .store = ipr_store_log_level
3454 };
3455
3456 /**
3457 * ipr_store_diagnostics - IOA Diagnostics interface
3458 * @dev: device struct
3459 * @buf: buffer
3460 * @count: buffer size
3461 *
3462 * This function will reset the adapter and wait a reasonable
3463 * amount of time for any errors that the adapter might log.
3464 *
3465 * Return value:
3466 * count on success / other on failure
3467 **/
ipr_store_diagnostics(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3468 static ssize_t ipr_store_diagnostics(struct device *dev,
3469 struct device_attribute *attr,
3470 const char *buf, size_t count)
3471 {
3472 struct Scsi_Host *shost = class_to_shost(dev);
3473 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3474 unsigned long lock_flags = 0;
3475 int rc = count;
3476
3477 if (!capable(CAP_SYS_ADMIN))
3478 return -EACCES;
3479
3480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3481 while (ioa_cfg->in_reset_reload) {
3482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3483 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3484 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3485 }
3486
3487 ioa_cfg->errors_logged = 0;
3488 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3489
3490 if (ioa_cfg->in_reset_reload) {
3491 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3492 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3493
3494 /* Wait for a second for any errors to be logged */
3495 msleep(1000);
3496 } else {
3497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3498 return -EIO;
3499 }
3500
3501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3502 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3503 rc = -EIO;
3504 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3505
3506 return rc;
3507 }
3508
3509 static struct device_attribute ipr_diagnostics_attr = {
3510 .attr = {
3511 .name = "run_diagnostics",
3512 .mode = S_IWUSR,
3513 },
3514 .store = ipr_store_diagnostics
3515 };
3516
3517 /**
3518 * ipr_show_adapter_state - Show the adapter's state
3519 * @class_dev: device struct
3520 * @buf: buffer
3521 *
3522 * Return value:
3523 * number of bytes printed to buffer
3524 **/
ipr_show_adapter_state(struct device * dev,struct device_attribute * attr,char * buf)3525 static ssize_t ipr_show_adapter_state(struct device *dev,
3526 struct device_attribute *attr, char *buf)
3527 {
3528 struct Scsi_Host *shost = class_to_shost(dev);
3529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3530 unsigned long lock_flags = 0;
3531 int len;
3532
3533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3534 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3535 len = snprintf(buf, PAGE_SIZE, "offline\n");
3536 else
3537 len = snprintf(buf, PAGE_SIZE, "online\n");
3538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3539 return len;
3540 }
3541
3542 /**
3543 * ipr_store_adapter_state - Change adapter state
3544 * @dev: device struct
3545 * @buf: buffer
3546 * @count: buffer size
3547 *
3548 * This function will change the adapter's state.
3549 *
3550 * Return value:
3551 * count on success / other on failure
3552 **/
ipr_store_adapter_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3553 static ssize_t ipr_store_adapter_state(struct device *dev,
3554 struct device_attribute *attr,
3555 const char *buf, size_t count)
3556 {
3557 struct Scsi_Host *shost = class_to_shost(dev);
3558 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3559 unsigned long lock_flags;
3560 int result = count, i;
3561
3562 if (!capable(CAP_SYS_ADMIN))
3563 return -EACCES;
3564
3565 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3566 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3567 !strncmp(buf, "online", 6)) {
3568 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3569 spin_lock(&ioa_cfg->hrrq[i]._lock);
3570 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3571 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3572 }
3573 wmb();
3574 ioa_cfg->reset_retries = 0;
3575 ioa_cfg->in_ioa_bringdown = 0;
3576 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3577 }
3578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3579 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3580
3581 return result;
3582 }
3583
3584 static struct device_attribute ipr_ioa_state_attr = {
3585 .attr = {
3586 .name = "online_state",
3587 .mode = S_IRUGO | S_IWUSR,
3588 },
3589 .show = ipr_show_adapter_state,
3590 .store = ipr_store_adapter_state
3591 };
3592
3593 /**
3594 * ipr_store_reset_adapter - Reset the adapter
3595 * @dev: device struct
3596 * @buf: buffer
3597 * @count: buffer size
3598 *
3599 * This function will reset the adapter.
3600 *
3601 * Return value:
3602 * count on success / other on failure
3603 **/
ipr_store_reset_adapter(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3604 static ssize_t ipr_store_reset_adapter(struct device *dev,
3605 struct device_attribute *attr,
3606 const char *buf, size_t count)
3607 {
3608 struct Scsi_Host *shost = class_to_shost(dev);
3609 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3610 unsigned long lock_flags;
3611 int result = count;
3612
3613 if (!capable(CAP_SYS_ADMIN))
3614 return -EACCES;
3615
3616 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3617 if (!ioa_cfg->in_reset_reload)
3618 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3619 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3620 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3621
3622 return result;
3623 }
3624
3625 static struct device_attribute ipr_ioa_reset_attr = {
3626 .attr = {
3627 .name = "reset_host",
3628 .mode = S_IWUSR,
3629 },
3630 .store = ipr_store_reset_adapter
3631 };
3632
3633 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3634 /**
3635 * ipr_show_iopoll_weight - Show ipr polling mode
3636 * @dev: class device struct
3637 * @buf: buffer
3638 *
3639 * Return value:
3640 * number of bytes printed to buffer
3641 **/
ipr_show_iopoll_weight(struct device * dev,struct device_attribute * attr,char * buf)3642 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3643 struct device_attribute *attr, char *buf)
3644 {
3645 struct Scsi_Host *shost = class_to_shost(dev);
3646 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647 unsigned long lock_flags = 0;
3648 int len;
3649
3650 spin_lock_irqsave(shost->host_lock, lock_flags);
3651 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3652 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3653
3654 return len;
3655 }
3656
3657 /**
3658 * ipr_store_iopoll_weight - Change the adapter's polling mode
3659 * @dev: class device struct
3660 * @buf: buffer
3661 *
3662 * Return value:
3663 * number of bytes printed to buffer
3664 **/
ipr_store_iopoll_weight(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3665 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3666 struct device_attribute *attr,
3667 const char *buf, size_t count)
3668 {
3669 struct Scsi_Host *shost = class_to_shost(dev);
3670 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3671 unsigned long user_iopoll_weight;
3672 unsigned long lock_flags = 0;
3673 int i;
3674
3675 if (!ioa_cfg->sis64) {
3676 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3677 return -EINVAL;
3678 }
3679 if (kstrtoul(buf, 10, &user_iopoll_weight))
3680 return -EINVAL;
3681
3682 if (user_iopoll_weight > 256) {
3683 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3684 return -EINVAL;
3685 }
3686
3687 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3688 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3689 return strlen(buf);
3690 }
3691
3692 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3693 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3694 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3695 }
3696
3697 spin_lock_irqsave(shost->host_lock, lock_flags);
3698 ioa_cfg->iopoll_weight = user_iopoll_weight;
3699 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3700 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3701 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3702 ioa_cfg->iopoll_weight, ipr_iopoll);
3703 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3704 }
3705 }
3706 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3707
3708 return strlen(buf);
3709 }
3710
3711 static struct device_attribute ipr_iopoll_weight_attr = {
3712 .attr = {
3713 .name = "iopoll_weight",
3714 .mode = S_IRUGO | S_IWUSR,
3715 },
3716 .show = ipr_show_iopoll_weight,
3717 .store = ipr_store_iopoll_weight
3718 };
3719
3720 /**
3721 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3722 * @buf_len: buffer length
3723 *
3724 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3725 * list to use for microcode download
3726 *
3727 * Return value:
3728 * pointer to sglist / NULL on failure
3729 **/
ipr_alloc_ucode_buffer(int buf_len)3730 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3731 {
3732 int sg_size, order, bsize_elem, num_elem, i, j;
3733 struct ipr_sglist *sglist;
3734 struct scatterlist *scatterlist;
3735 struct page *page;
3736
3737 /* Get the minimum size per scatter/gather element */
3738 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3739
3740 /* Get the actual size per element */
3741 order = get_order(sg_size);
3742
3743 /* Determine the actual number of bytes per element */
3744 bsize_elem = PAGE_SIZE * (1 << order);
3745
3746 /* Determine the actual number of sg entries needed */
3747 if (buf_len % bsize_elem)
3748 num_elem = (buf_len / bsize_elem) + 1;
3749 else
3750 num_elem = buf_len / bsize_elem;
3751
3752 /* Allocate a scatter/gather list for the DMA */
3753 sglist = kzalloc(sizeof(struct ipr_sglist) +
3754 (sizeof(struct scatterlist) * (num_elem - 1)),
3755 GFP_KERNEL);
3756
3757 if (sglist == NULL) {
3758 ipr_trace;
3759 return NULL;
3760 }
3761
3762 scatterlist = sglist->scatterlist;
3763 sg_init_table(scatterlist, num_elem);
3764
3765 sglist->order = order;
3766 sglist->num_sg = num_elem;
3767
3768 /* Allocate a bunch of sg elements */
3769 for (i = 0; i < num_elem; i++) {
3770 page = alloc_pages(GFP_KERNEL, order);
3771 if (!page) {
3772 ipr_trace;
3773
3774 /* Free up what we already allocated */
3775 for (j = i - 1; j >= 0; j--)
3776 __free_pages(sg_page(&scatterlist[j]), order);
3777 kfree(sglist);
3778 return NULL;
3779 }
3780
3781 sg_set_page(&scatterlist[i], page, 0, 0);
3782 }
3783
3784 return sglist;
3785 }
3786
3787 /**
3788 * ipr_free_ucode_buffer - Frees a microcode download buffer
3789 * @p_dnld: scatter/gather list pointer
3790 *
3791 * Free a DMA'able ucode download buffer previously allocated with
3792 * ipr_alloc_ucode_buffer
3793 *
3794 * Return value:
3795 * nothing
3796 **/
ipr_free_ucode_buffer(struct ipr_sglist * sglist)3797 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3798 {
3799 int i;
3800
3801 for (i = 0; i < sglist->num_sg; i++)
3802 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3803
3804 kfree(sglist);
3805 }
3806
3807 /**
3808 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3809 * @sglist: scatter/gather list pointer
3810 * @buffer: buffer pointer
3811 * @len: buffer length
3812 *
3813 * Copy a microcode image from a user buffer into a buffer allocated by
3814 * ipr_alloc_ucode_buffer
3815 *
3816 * Return value:
3817 * 0 on success / other on failure
3818 **/
ipr_copy_ucode_buffer(struct ipr_sglist * sglist,u8 * buffer,u32 len)3819 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3820 u8 *buffer, u32 len)
3821 {
3822 int bsize_elem, i, result = 0;
3823 struct scatterlist *scatterlist;
3824 void *kaddr;
3825
3826 /* Determine the actual number of bytes per element */
3827 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3828
3829 scatterlist = sglist->scatterlist;
3830
3831 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3832 struct page *page = sg_page(&scatterlist[i]);
3833
3834 kaddr = kmap(page);
3835 memcpy(kaddr, buffer, bsize_elem);
3836 kunmap(page);
3837
3838 scatterlist[i].length = bsize_elem;
3839
3840 if (result != 0) {
3841 ipr_trace;
3842 return result;
3843 }
3844 }
3845
3846 if (len % bsize_elem) {
3847 struct page *page = sg_page(&scatterlist[i]);
3848
3849 kaddr = kmap(page);
3850 memcpy(kaddr, buffer, len % bsize_elem);
3851 kunmap(page);
3852
3853 scatterlist[i].length = len % bsize_elem;
3854 }
3855
3856 sglist->buffer_len = len;
3857 return result;
3858 }
3859
3860 /**
3861 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3862 * @ipr_cmd: ipr command struct
3863 * @sglist: scatter/gather list
3864 *
3865 * Builds a microcode download IOA data list (IOADL).
3866 *
3867 **/
ipr_build_ucode_ioadl64(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3868 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3869 struct ipr_sglist *sglist)
3870 {
3871 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3872 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3873 struct scatterlist *scatterlist = sglist->scatterlist;
3874 int i;
3875
3876 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3877 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3878 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3879
3880 ioarcb->ioadl_len =
3881 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3882 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3883 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3884 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3885 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3886 }
3887
3888 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3889 }
3890
3891 /**
3892 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3893 * @ipr_cmd: ipr command struct
3894 * @sglist: scatter/gather list
3895 *
3896 * Builds a microcode download IOA data list (IOADL).
3897 *
3898 **/
ipr_build_ucode_ioadl(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3899 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3900 struct ipr_sglist *sglist)
3901 {
3902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3903 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3904 struct scatterlist *scatterlist = sglist->scatterlist;
3905 int i;
3906
3907 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3908 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3909 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3910
3911 ioarcb->ioadl_len =
3912 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3913
3914 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3915 ioadl[i].flags_and_data_len =
3916 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3917 ioadl[i].address =
3918 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3919 }
3920
3921 ioadl[i-1].flags_and_data_len |=
3922 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3923 }
3924
3925 /**
3926 * ipr_update_ioa_ucode - Update IOA's microcode
3927 * @ioa_cfg: ioa config struct
3928 * @sglist: scatter/gather list
3929 *
3930 * Initiate an adapter reset to update the IOA's microcode
3931 *
3932 * Return value:
3933 * 0 on success / -EIO on failure
3934 **/
ipr_update_ioa_ucode(struct ipr_ioa_cfg * ioa_cfg,struct ipr_sglist * sglist)3935 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3936 struct ipr_sglist *sglist)
3937 {
3938 unsigned long lock_flags;
3939
3940 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3941 while (ioa_cfg->in_reset_reload) {
3942 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3943 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3944 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3945 }
3946
3947 if (ioa_cfg->ucode_sglist) {
3948 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3949 dev_err(&ioa_cfg->pdev->dev,
3950 "Microcode download already in progress\n");
3951 return -EIO;
3952 }
3953
3954 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3955 sglist->num_sg, DMA_TO_DEVICE);
3956
3957 if (!sglist->num_dma_sg) {
3958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959 dev_err(&ioa_cfg->pdev->dev,
3960 "Failed to map microcode download buffer!\n");
3961 return -EIO;
3962 }
3963
3964 ioa_cfg->ucode_sglist = sglist;
3965 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3967 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3968
3969 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3970 ioa_cfg->ucode_sglist = NULL;
3971 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3972 return 0;
3973 }
3974
3975 /**
3976 * ipr_store_update_fw - Update the firmware on the adapter
3977 * @class_dev: device struct
3978 * @buf: buffer
3979 * @count: buffer size
3980 *
3981 * This function will update the firmware on the adapter.
3982 *
3983 * Return value:
3984 * count on success / other on failure
3985 **/
ipr_store_update_fw(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3986 static ssize_t ipr_store_update_fw(struct device *dev,
3987 struct device_attribute *attr,
3988 const char *buf, size_t count)
3989 {
3990 struct Scsi_Host *shost = class_to_shost(dev);
3991 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3992 struct ipr_ucode_image_header *image_hdr;
3993 const struct firmware *fw_entry;
3994 struct ipr_sglist *sglist;
3995 char fname[100];
3996 char *src;
3997 int len, result, dnld_size;
3998
3999 if (!capable(CAP_SYS_ADMIN))
4000 return -EACCES;
4001
4002 len = snprintf(fname, 99, "%s", buf);
4003 fname[len-1] = '\0';
4004
4005 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4006 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4007 return -EIO;
4008 }
4009
4010 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4011
4012 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4013 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4014 sglist = ipr_alloc_ucode_buffer(dnld_size);
4015
4016 if (!sglist) {
4017 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4018 release_firmware(fw_entry);
4019 return -ENOMEM;
4020 }
4021
4022 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4023
4024 if (result) {
4025 dev_err(&ioa_cfg->pdev->dev,
4026 "Microcode buffer copy to DMA buffer failed\n");
4027 goto out;
4028 }
4029
4030 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4031
4032 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4033
4034 if (!result)
4035 result = count;
4036 out:
4037 ipr_free_ucode_buffer(sglist);
4038 release_firmware(fw_entry);
4039 return result;
4040 }
4041
4042 static struct device_attribute ipr_update_fw_attr = {
4043 .attr = {
4044 .name = "update_fw",
4045 .mode = S_IWUSR,
4046 },
4047 .store = ipr_store_update_fw
4048 };
4049
4050 /**
4051 * ipr_show_fw_type - Show the adapter's firmware type.
4052 * @dev: class device struct
4053 * @buf: buffer
4054 *
4055 * Return value:
4056 * number of bytes printed to buffer
4057 **/
ipr_show_fw_type(struct device * dev,struct device_attribute * attr,char * buf)4058 static ssize_t ipr_show_fw_type(struct device *dev,
4059 struct device_attribute *attr, char *buf)
4060 {
4061 struct Scsi_Host *shost = class_to_shost(dev);
4062 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4063 unsigned long lock_flags = 0;
4064 int len;
4065
4066 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4067 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4068 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4069 return len;
4070 }
4071
4072 static struct device_attribute ipr_ioa_fw_type_attr = {
4073 .attr = {
4074 .name = "fw_type",
4075 .mode = S_IRUGO,
4076 },
4077 .show = ipr_show_fw_type
4078 };
4079
4080 static struct device_attribute *ipr_ioa_attrs[] = {
4081 &ipr_fw_version_attr,
4082 &ipr_log_level_attr,
4083 &ipr_diagnostics_attr,
4084 &ipr_ioa_state_attr,
4085 &ipr_ioa_reset_attr,
4086 &ipr_update_fw_attr,
4087 &ipr_ioa_fw_type_attr,
4088 &ipr_iopoll_weight_attr,
4089 NULL,
4090 };
4091
4092 #ifdef CONFIG_SCSI_IPR_DUMP
4093 /**
4094 * ipr_read_dump - Dump the adapter
4095 * @filp: open sysfs file
4096 * @kobj: kobject struct
4097 * @bin_attr: bin_attribute struct
4098 * @buf: buffer
4099 * @off: offset
4100 * @count: buffer size
4101 *
4102 * Return value:
4103 * number of bytes printed to buffer
4104 **/
ipr_read_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4105 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4106 struct bin_attribute *bin_attr,
4107 char *buf, loff_t off, size_t count)
4108 {
4109 struct device *cdev = container_of(kobj, struct device, kobj);
4110 struct Scsi_Host *shost = class_to_shost(cdev);
4111 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4112 struct ipr_dump *dump;
4113 unsigned long lock_flags = 0;
4114 char *src;
4115 int len, sdt_end;
4116 size_t rc = count;
4117
4118 if (!capable(CAP_SYS_ADMIN))
4119 return -EACCES;
4120
4121 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4122 dump = ioa_cfg->dump;
4123
4124 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4125 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4126 return 0;
4127 }
4128 kref_get(&dump->kref);
4129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4130
4131 if (off > dump->driver_dump.hdr.len) {
4132 kref_put(&dump->kref, ipr_release_dump);
4133 return 0;
4134 }
4135
4136 if (off + count > dump->driver_dump.hdr.len) {
4137 count = dump->driver_dump.hdr.len - off;
4138 rc = count;
4139 }
4140
4141 if (count && off < sizeof(dump->driver_dump)) {
4142 if (off + count > sizeof(dump->driver_dump))
4143 len = sizeof(dump->driver_dump) - off;
4144 else
4145 len = count;
4146 src = (u8 *)&dump->driver_dump + off;
4147 memcpy(buf, src, len);
4148 buf += len;
4149 off += len;
4150 count -= len;
4151 }
4152
4153 off -= sizeof(dump->driver_dump);
4154
4155 if (ioa_cfg->sis64)
4156 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4157 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4158 sizeof(struct ipr_sdt_entry));
4159 else
4160 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4161 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4162
4163 if (count && off < sdt_end) {
4164 if (off + count > sdt_end)
4165 len = sdt_end - off;
4166 else
4167 len = count;
4168 src = (u8 *)&dump->ioa_dump + off;
4169 memcpy(buf, src, len);
4170 buf += len;
4171 off += len;
4172 count -= len;
4173 }
4174
4175 off -= sdt_end;
4176
4177 while (count) {
4178 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4179 len = PAGE_ALIGN(off) - off;
4180 else
4181 len = count;
4182 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4183 src += off & ~PAGE_MASK;
4184 memcpy(buf, src, len);
4185 buf += len;
4186 off += len;
4187 count -= len;
4188 }
4189
4190 kref_put(&dump->kref, ipr_release_dump);
4191 return rc;
4192 }
4193
4194 /**
4195 * ipr_alloc_dump - Prepare for adapter dump
4196 * @ioa_cfg: ioa config struct
4197 *
4198 * Return value:
4199 * 0 on success / other on failure
4200 **/
ipr_alloc_dump(struct ipr_ioa_cfg * ioa_cfg)4201 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4202 {
4203 struct ipr_dump *dump;
4204 __be32 **ioa_data;
4205 unsigned long lock_flags = 0;
4206
4207 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4208
4209 if (!dump) {
4210 ipr_err("Dump memory allocation failed\n");
4211 return -ENOMEM;
4212 }
4213
4214 if (ioa_cfg->sis64)
4215 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4216 else
4217 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4218
4219 if (!ioa_data) {
4220 ipr_err("Dump memory allocation failed\n");
4221 kfree(dump);
4222 return -ENOMEM;
4223 }
4224
4225 dump->ioa_dump.ioa_data = ioa_data;
4226
4227 kref_init(&dump->kref);
4228 dump->ioa_cfg = ioa_cfg;
4229
4230 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4231
4232 if (INACTIVE != ioa_cfg->sdt_state) {
4233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4234 vfree(dump->ioa_dump.ioa_data);
4235 kfree(dump);
4236 return 0;
4237 }
4238
4239 ioa_cfg->dump = dump;
4240 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4241 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4242 ioa_cfg->dump_taken = 1;
4243 schedule_work(&ioa_cfg->work_q);
4244 }
4245 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4246
4247 return 0;
4248 }
4249
4250 /**
4251 * ipr_free_dump - Free adapter dump memory
4252 * @ioa_cfg: ioa config struct
4253 *
4254 * Return value:
4255 * 0 on success / other on failure
4256 **/
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4257 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4258 {
4259 struct ipr_dump *dump;
4260 unsigned long lock_flags = 0;
4261
4262 ENTER;
4263
4264 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4265 dump = ioa_cfg->dump;
4266 if (!dump) {
4267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4268 return 0;
4269 }
4270
4271 ioa_cfg->dump = NULL;
4272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4273
4274 kref_put(&dump->kref, ipr_release_dump);
4275
4276 LEAVE;
4277 return 0;
4278 }
4279
4280 /**
4281 * ipr_write_dump - Setup dump state of adapter
4282 * @filp: open sysfs file
4283 * @kobj: kobject struct
4284 * @bin_attr: bin_attribute struct
4285 * @buf: buffer
4286 * @off: offset
4287 * @count: buffer size
4288 *
4289 * Return value:
4290 * number of bytes printed to buffer
4291 **/
ipr_write_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4292 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4293 struct bin_attribute *bin_attr,
4294 char *buf, loff_t off, size_t count)
4295 {
4296 struct device *cdev = container_of(kobj, struct device, kobj);
4297 struct Scsi_Host *shost = class_to_shost(cdev);
4298 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4299 int rc;
4300
4301 if (!capable(CAP_SYS_ADMIN))
4302 return -EACCES;
4303
4304 if (buf[0] == '1')
4305 rc = ipr_alloc_dump(ioa_cfg);
4306 else if (buf[0] == '0')
4307 rc = ipr_free_dump(ioa_cfg);
4308 else
4309 return -EINVAL;
4310
4311 if (rc)
4312 return rc;
4313 else
4314 return count;
4315 }
4316
4317 static struct bin_attribute ipr_dump_attr = {
4318 .attr = {
4319 .name = "dump",
4320 .mode = S_IRUSR | S_IWUSR,
4321 },
4322 .size = 0,
4323 .read = ipr_read_dump,
4324 .write = ipr_write_dump
4325 };
4326 #else
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4327 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4328 #endif
4329
4330 /**
4331 * ipr_change_queue_depth - Change the device's queue depth
4332 * @sdev: scsi device struct
4333 * @qdepth: depth to set
4334 * @reason: calling context
4335 *
4336 * Return value:
4337 * actual depth set
4338 **/
ipr_change_queue_depth(struct scsi_device * sdev,int qdepth,int reason)4339 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4340 int reason)
4341 {
4342 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4343 struct ipr_resource_entry *res;
4344 unsigned long lock_flags = 0;
4345
4346 if (reason != SCSI_QDEPTH_DEFAULT)
4347 return -EOPNOTSUPP;
4348
4349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4350 res = (struct ipr_resource_entry *)sdev->hostdata;
4351
4352 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4353 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4355
4356 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4357 return sdev->queue_depth;
4358 }
4359
4360 /**
4361 * ipr_change_queue_type - Change the device's queue type
4362 * @dsev: scsi device struct
4363 * @tag_type: type of tags to use
4364 *
4365 * Return value:
4366 * actual queue type set
4367 **/
ipr_change_queue_type(struct scsi_device * sdev,int tag_type)4368 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4369 {
4370 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4371 struct ipr_resource_entry *res;
4372 unsigned long lock_flags = 0;
4373
4374 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4375 res = (struct ipr_resource_entry *)sdev->hostdata;
4376
4377 if (res) {
4378 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4379 /*
4380 * We don't bother quiescing the device here since the
4381 * adapter firmware does it for us.
4382 */
4383 scsi_set_tag_type(sdev, tag_type);
4384
4385 if (tag_type)
4386 scsi_activate_tcq(sdev, sdev->queue_depth);
4387 else
4388 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4389 } else
4390 tag_type = 0;
4391 } else
4392 tag_type = 0;
4393
4394 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4395 return tag_type;
4396 }
4397
4398 /**
4399 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4400 * @dev: device struct
4401 * @attr: device attribute structure
4402 * @buf: buffer
4403 *
4404 * Return value:
4405 * number of bytes printed to buffer
4406 **/
ipr_show_adapter_handle(struct device * dev,struct device_attribute * attr,char * buf)4407 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4408 {
4409 struct scsi_device *sdev = to_scsi_device(dev);
4410 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4411 struct ipr_resource_entry *res;
4412 unsigned long lock_flags = 0;
4413 ssize_t len = -ENXIO;
4414
4415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4416 res = (struct ipr_resource_entry *)sdev->hostdata;
4417 if (res)
4418 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4419 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4420 return len;
4421 }
4422
4423 static struct device_attribute ipr_adapter_handle_attr = {
4424 .attr = {
4425 .name = "adapter_handle",
4426 .mode = S_IRUSR,
4427 },
4428 .show = ipr_show_adapter_handle
4429 };
4430
4431 /**
4432 * ipr_show_resource_path - Show the resource path or the resource address for
4433 * this device.
4434 * @dev: device struct
4435 * @attr: device attribute structure
4436 * @buf: buffer
4437 *
4438 * Return value:
4439 * number of bytes printed to buffer
4440 **/
ipr_show_resource_path(struct device * dev,struct device_attribute * attr,char * buf)4441 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4442 {
4443 struct scsi_device *sdev = to_scsi_device(dev);
4444 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4445 struct ipr_resource_entry *res;
4446 unsigned long lock_flags = 0;
4447 ssize_t len = -ENXIO;
4448 char buffer[IPR_MAX_RES_PATH_LENGTH];
4449
4450 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4451 res = (struct ipr_resource_entry *)sdev->hostdata;
4452 if (res && ioa_cfg->sis64)
4453 len = snprintf(buf, PAGE_SIZE, "%s\n",
4454 __ipr_format_res_path(res->res_path, buffer,
4455 sizeof(buffer)));
4456 else if (res)
4457 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4458 res->bus, res->target, res->lun);
4459
4460 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4461 return len;
4462 }
4463
4464 static struct device_attribute ipr_resource_path_attr = {
4465 .attr = {
4466 .name = "resource_path",
4467 .mode = S_IRUGO,
4468 },
4469 .show = ipr_show_resource_path
4470 };
4471
4472 /**
4473 * ipr_show_device_id - Show the device_id for this device.
4474 * @dev: device struct
4475 * @attr: device attribute structure
4476 * @buf: buffer
4477 *
4478 * Return value:
4479 * number of bytes printed to buffer
4480 **/
ipr_show_device_id(struct device * dev,struct device_attribute * attr,char * buf)4481 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4482 {
4483 struct scsi_device *sdev = to_scsi_device(dev);
4484 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4485 struct ipr_resource_entry *res;
4486 unsigned long lock_flags = 0;
4487 ssize_t len = -ENXIO;
4488
4489 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4490 res = (struct ipr_resource_entry *)sdev->hostdata;
4491 if (res && ioa_cfg->sis64)
4492 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4493 else if (res)
4494 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4495
4496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4497 return len;
4498 }
4499
4500 static struct device_attribute ipr_device_id_attr = {
4501 .attr = {
4502 .name = "device_id",
4503 .mode = S_IRUGO,
4504 },
4505 .show = ipr_show_device_id
4506 };
4507
4508 /**
4509 * ipr_show_resource_type - Show the resource type for this device.
4510 * @dev: device struct
4511 * @attr: device attribute structure
4512 * @buf: buffer
4513 *
4514 * Return value:
4515 * number of bytes printed to buffer
4516 **/
ipr_show_resource_type(struct device * dev,struct device_attribute * attr,char * buf)4517 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4518 {
4519 struct scsi_device *sdev = to_scsi_device(dev);
4520 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4521 struct ipr_resource_entry *res;
4522 unsigned long lock_flags = 0;
4523 ssize_t len = -ENXIO;
4524
4525 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4526 res = (struct ipr_resource_entry *)sdev->hostdata;
4527
4528 if (res)
4529 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4530
4531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4532 return len;
4533 }
4534
4535 static struct device_attribute ipr_resource_type_attr = {
4536 .attr = {
4537 .name = "resource_type",
4538 .mode = S_IRUGO,
4539 },
4540 .show = ipr_show_resource_type
4541 };
4542
4543 static struct device_attribute *ipr_dev_attrs[] = {
4544 &ipr_adapter_handle_attr,
4545 &ipr_resource_path_attr,
4546 &ipr_device_id_attr,
4547 &ipr_resource_type_attr,
4548 NULL,
4549 };
4550
4551 /**
4552 * ipr_biosparam - Return the HSC mapping
4553 * @sdev: scsi device struct
4554 * @block_device: block device pointer
4555 * @capacity: capacity of the device
4556 * @parm: Array containing returned HSC values.
4557 *
4558 * This function generates the HSC parms that fdisk uses.
4559 * We want to make sure we return something that places partitions
4560 * on 4k boundaries for best performance with the IOA.
4561 *
4562 * Return value:
4563 * 0 on success
4564 **/
ipr_biosparam(struct scsi_device * sdev,struct block_device * block_device,sector_t capacity,int * parm)4565 static int ipr_biosparam(struct scsi_device *sdev,
4566 struct block_device *block_device,
4567 sector_t capacity, int *parm)
4568 {
4569 int heads, sectors;
4570 sector_t cylinders;
4571
4572 heads = 128;
4573 sectors = 32;
4574
4575 cylinders = capacity;
4576 sector_div(cylinders, (128 * 32));
4577
4578 /* return result */
4579 parm[0] = heads;
4580 parm[1] = sectors;
4581 parm[2] = cylinders;
4582
4583 return 0;
4584 }
4585
4586 /**
4587 * ipr_find_starget - Find target based on bus/target.
4588 * @starget: scsi target struct
4589 *
4590 * Return value:
4591 * resource entry pointer if found / NULL if not found
4592 **/
ipr_find_starget(struct scsi_target * starget)4593 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4594 {
4595 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4596 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4597 struct ipr_resource_entry *res;
4598
4599 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4600 if ((res->bus == starget->channel) &&
4601 (res->target == starget->id)) {
4602 return res;
4603 }
4604 }
4605
4606 return NULL;
4607 }
4608
4609 static struct ata_port_info sata_port_info;
4610
4611 /**
4612 * ipr_target_alloc - Prepare for commands to a SCSI target
4613 * @starget: scsi target struct
4614 *
4615 * If the device is a SATA device, this function allocates an
4616 * ATA port with libata, else it does nothing.
4617 *
4618 * Return value:
4619 * 0 on success / non-0 on failure
4620 **/
ipr_target_alloc(struct scsi_target * starget)4621 static int ipr_target_alloc(struct scsi_target *starget)
4622 {
4623 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4624 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4625 struct ipr_sata_port *sata_port;
4626 struct ata_port *ap;
4627 struct ipr_resource_entry *res;
4628 unsigned long lock_flags;
4629
4630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4631 res = ipr_find_starget(starget);
4632 starget->hostdata = NULL;
4633
4634 if (res && ipr_is_gata(res)) {
4635 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4636 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4637 if (!sata_port)
4638 return -ENOMEM;
4639
4640 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4641 if (ap) {
4642 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4643 sata_port->ioa_cfg = ioa_cfg;
4644 sata_port->ap = ap;
4645 sata_port->res = res;
4646
4647 res->sata_port = sata_port;
4648 ap->private_data = sata_port;
4649 starget->hostdata = sata_port;
4650 } else {
4651 kfree(sata_port);
4652 return -ENOMEM;
4653 }
4654 }
4655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4656
4657 return 0;
4658 }
4659
4660 /**
4661 * ipr_target_destroy - Destroy a SCSI target
4662 * @starget: scsi target struct
4663 *
4664 * If the device was a SATA device, this function frees the libata
4665 * ATA port, else it does nothing.
4666 *
4667 **/
ipr_target_destroy(struct scsi_target * starget)4668 static void ipr_target_destroy(struct scsi_target *starget)
4669 {
4670 struct ipr_sata_port *sata_port = starget->hostdata;
4671 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4672 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4673
4674 if (ioa_cfg->sis64) {
4675 if (!ipr_find_starget(starget)) {
4676 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4677 clear_bit(starget->id, ioa_cfg->array_ids);
4678 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4679 clear_bit(starget->id, ioa_cfg->vset_ids);
4680 else if (starget->channel == 0)
4681 clear_bit(starget->id, ioa_cfg->target_ids);
4682 }
4683 }
4684
4685 if (sata_port) {
4686 starget->hostdata = NULL;
4687 ata_sas_port_destroy(sata_port->ap);
4688 kfree(sata_port);
4689 }
4690 }
4691
4692 /**
4693 * ipr_find_sdev - Find device based on bus/target/lun.
4694 * @sdev: scsi device struct
4695 *
4696 * Return value:
4697 * resource entry pointer if found / NULL if not found
4698 **/
ipr_find_sdev(struct scsi_device * sdev)4699 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4700 {
4701 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4702 struct ipr_resource_entry *res;
4703
4704 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4705 if ((res->bus == sdev->channel) &&
4706 (res->target == sdev->id) &&
4707 (res->lun == sdev->lun))
4708 return res;
4709 }
4710
4711 return NULL;
4712 }
4713
4714 /**
4715 * ipr_slave_destroy - Unconfigure a SCSI device
4716 * @sdev: scsi device struct
4717 *
4718 * Return value:
4719 * nothing
4720 **/
ipr_slave_destroy(struct scsi_device * sdev)4721 static void ipr_slave_destroy(struct scsi_device *sdev)
4722 {
4723 struct ipr_resource_entry *res;
4724 struct ipr_ioa_cfg *ioa_cfg;
4725 unsigned long lock_flags = 0;
4726
4727 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4728
4729 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4730 res = (struct ipr_resource_entry *) sdev->hostdata;
4731 if (res) {
4732 if (res->sata_port)
4733 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4734 sdev->hostdata = NULL;
4735 res->sdev = NULL;
4736 res->sata_port = NULL;
4737 }
4738 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4739 }
4740
4741 /**
4742 * ipr_slave_configure - Configure a SCSI device
4743 * @sdev: scsi device struct
4744 *
4745 * This function configures the specified scsi device.
4746 *
4747 * Return value:
4748 * 0 on success
4749 **/
ipr_slave_configure(struct scsi_device * sdev)4750 static int ipr_slave_configure(struct scsi_device *sdev)
4751 {
4752 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4753 struct ipr_resource_entry *res;
4754 struct ata_port *ap = NULL;
4755 unsigned long lock_flags = 0;
4756 char buffer[IPR_MAX_RES_PATH_LENGTH];
4757
4758 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4759 res = sdev->hostdata;
4760 if (res) {
4761 if (ipr_is_af_dasd_device(res))
4762 sdev->type = TYPE_RAID;
4763 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4764 sdev->scsi_level = 4;
4765 sdev->no_uld_attach = 1;
4766 }
4767 if (ipr_is_vset_device(res)) {
4768 blk_queue_rq_timeout(sdev->request_queue,
4769 IPR_VSET_RW_TIMEOUT);
4770 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4771 }
4772 if (ipr_is_gata(res) && res->sata_port)
4773 ap = res->sata_port->ap;
4774 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4775
4776 if (ap) {
4777 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4778 ata_sas_slave_configure(sdev, ap);
4779 } else
4780 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4781 if (ioa_cfg->sis64)
4782 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4783 ipr_format_res_path(ioa_cfg,
4784 res->res_path, buffer, sizeof(buffer)));
4785 return 0;
4786 }
4787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4788 return 0;
4789 }
4790
4791 /**
4792 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4793 * @sdev: scsi device struct
4794 *
4795 * This function initializes an ATA port so that future commands
4796 * sent through queuecommand will work.
4797 *
4798 * Return value:
4799 * 0 on success
4800 **/
ipr_ata_slave_alloc(struct scsi_device * sdev)4801 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4802 {
4803 struct ipr_sata_port *sata_port = NULL;
4804 int rc = -ENXIO;
4805
4806 ENTER;
4807 if (sdev->sdev_target)
4808 sata_port = sdev->sdev_target->hostdata;
4809 if (sata_port) {
4810 rc = ata_sas_port_init(sata_port->ap);
4811 if (rc == 0)
4812 rc = ata_sas_sync_probe(sata_port->ap);
4813 }
4814
4815 if (rc)
4816 ipr_slave_destroy(sdev);
4817
4818 LEAVE;
4819 return rc;
4820 }
4821
4822 /**
4823 * ipr_slave_alloc - Prepare for commands to a device.
4824 * @sdev: scsi device struct
4825 *
4826 * This function saves a pointer to the resource entry
4827 * in the scsi device struct if the device exists. We
4828 * can then use this pointer in ipr_queuecommand when
4829 * handling new commands.
4830 *
4831 * Return value:
4832 * 0 on success / -ENXIO if device does not exist
4833 **/
ipr_slave_alloc(struct scsi_device * sdev)4834 static int ipr_slave_alloc(struct scsi_device *sdev)
4835 {
4836 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4837 struct ipr_resource_entry *res;
4838 unsigned long lock_flags;
4839 int rc = -ENXIO;
4840
4841 sdev->hostdata = NULL;
4842
4843 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4844
4845 res = ipr_find_sdev(sdev);
4846 if (res) {
4847 res->sdev = sdev;
4848 res->add_to_ml = 0;
4849 res->in_erp = 0;
4850 sdev->hostdata = res;
4851 if (!ipr_is_naca_model(res))
4852 res->needs_sync_complete = 1;
4853 rc = 0;
4854 if (ipr_is_gata(res)) {
4855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4856 return ipr_ata_slave_alloc(sdev);
4857 }
4858 }
4859
4860 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4861
4862 return rc;
4863 }
4864
4865 /**
4866 * ipr_match_lun - Match function for specified LUN
4867 * @ipr_cmd: ipr command struct
4868 * @device: device to match (sdev)
4869 *
4870 * Returns:
4871 * 1 if command matches sdev / 0 if command does not match sdev
4872 **/
ipr_match_lun(struct ipr_cmnd * ipr_cmd,void * device)4873 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4874 {
4875 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4876 return 1;
4877 return 0;
4878 }
4879
4880 /**
4881 * ipr_wait_for_ops - Wait for matching commands to complete
4882 * @ipr_cmd: ipr command struct
4883 * @device: device to match (sdev)
4884 * @match: match function to use
4885 *
4886 * Returns:
4887 * SUCCESS / FAILED
4888 **/
ipr_wait_for_ops(struct ipr_ioa_cfg * ioa_cfg,void * device,int (* match)(struct ipr_cmnd *,void *))4889 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4890 int (*match)(struct ipr_cmnd *, void *))
4891 {
4892 struct ipr_cmnd *ipr_cmd;
4893 int wait;
4894 unsigned long flags;
4895 struct ipr_hrr_queue *hrrq;
4896 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4897 DECLARE_COMPLETION_ONSTACK(comp);
4898
4899 ENTER;
4900 do {
4901 wait = 0;
4902
4903 for_each_hrrq(hrrq, ioa_cfg) {
4904 spin_lock_irqsave(hrrq->lock, flags);
4905 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4906 if (match(ipr_cmd, device)) {
4907 ipr_cmd->eh_comp = ∁
4908 wait++;
4909 }
4910 }
4911 spin_unlock_irqrestore(hrrq->lock, flags);
4912 }
4913
4914 if (wait) {
4915 timeout = wait_for_completion_timeout(&comp, timeout);
4916
4917 if (!timeout) {
4918 wait = 0;
4919
4920 for_each_hrrq(hrrq, ioa_cfg) {
4921 spin_lock_irqsave(hrrq->lock, flags);
4922 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4923 if (match(ipr_cmd, device)) {
4924 ipr_cmd->eh_comp = NULL;
4925 wait++;
4926 }
4927 }
4928 spin_unlock_irqrestore(hrrq->lock, flags);
4929 }
4930
4931 if (wait)
4932 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4933 LEAVE;
4934 return wait ? FAILED : SUCCESS;
4935 }
4936 }
4937 } while (wait);
4938
4939 LEAVE;
4940 return SUCCESS;
4941 }
4942
ipr_eh_host_reset(struct scsi_cmnd * cmd)4943 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4944 {
4945 struct ipr_ioa_cfg *ioa_cfg;
4946 unsigned long lock_flags = 0;
4947 int rc = SUCCESS;
4948
4949 ENTER;
4950 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4951 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4952
4953 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4954 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4955 dev_err(&ioa_cfg->pdev->dev,
4956 "Adapter being reset as a result of error recovery.\n");
4957
4958 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4959 ioa_cfg->sdt_state = GET_DUMP;
4960 }
4961
4962 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4963 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4964 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4965
4966 /* If we got hit with a host reset while we were already resetting
4967 the adapter for some reason, and the reset failed. */
4968 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4969 ipr_trace;
4970 rc = FAILED;
4971 }
4972
4973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974 LEAVE;
4975 return rc;
4976 }
4977
4978 /**
4979 * ipr_device_reset - Reset the device
4980 * @ioa_cfg: ioa config struct
4981 * @res: resource entry struct
4982 *
4983 * This function issues a device reset to the affected device.
4984 * If the device is a SCSI device, a LUN reset will be sent
4985 * to the device first. If that does not work, a target reset
4986 * will be sent. If the device is a SATA device, a PHY reset will
4987 * be sent.
4988 *
4989 * Return value:
4990 * 0 on success / non-zero on failure
4991 **/
ipr_device_reset(struct ipr_ioa_cfg * ioa_cfg,struct ipr_resource_entry * res)4992 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4993 struct ipr_resource_entry *res)
4994 {
4995 struct ipr_cmnd *ipr_cmd;
4996 struct ipr_ioarcb *ioarcb;
4997 struct ipr_cmd_pkt *cmd_pkt;
4998 struct ipr_ioarcb_ata_regs *regs;
4999 u32 ioasc;
5000
5001 ENTER;
5002 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5003 ioarcb = &ipr_cmd->ioarcb;
5004 cmd_pkt = &ioarcb->cmd_pkt;
5005
5006 if (ipr_cmd->ioa_cfg->sis64) {
5007 regs = &ipr_cmd->i.ata_ioadl.regs;
5008 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5009 } else
5010 regs = &ioarcb->u.add_data.u.regs;
5011
5012 ioarcb->res_handle = res->res_handle;
5013 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5014 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5015 if (ipr_is_gata(res)) {
5016 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5017 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5018 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5019 }
5020
5021 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5022 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5023 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5024 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5025 if (ipr_cmd->ioa_cfg->sis64)
5026 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5027 sizeof(struct ipr_ioasa_gata));
5028 else
5029 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5030 sizeof(struct ipr_ioasa_gata));
5031 }
5032
5033 LEAVE;
5034 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5035 }
5036
5037 /**
5038 * ipr_sata_reset - Reset the SATA port
5039 * @link: SATA link to reset
5040 * @classes: class of the attached device
5041 *
5042 * This function issues a SATA phy reset to the affected ATA link.
5043 *
5044 * Return value:
5045 * 0 on success / non-zero on failure
5046 **/
ipr_sata_reset(struct ata_link * link,unsigned int * classes,unsigned long deadline)5047 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5048 unsigned long deadline)
5049 {
5050 struct ipr_sata_port *sata_port = link->ap->private_data;
5051 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5052 struct ipr_resource_entry *res;
5053 unsigned long lock_flags = 0;
5054 int rc = -ENXIO;
5055
5056 ENTER;
5057 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5058 while (ioa_cfg->in_reset_reload) {
5059 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5060 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5061 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5062 }
5063
5064 res = sata_port->res;
5065 if (res) {
5066 rc = ipr_device_reset(ioa_cfg, res);
5067 *classes = res->ata_class;
5068 }
5069
5070 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5071 LEAVE;
5072 return rc;
5073 }
5074
5075 /**
5076 * ipr_eh_dev_reset - Reset the device
5077 * @scsi_cmd: scsi command struct
5078 *
5079 * This function issues a device reset to the affected device.
5080 * A LUN reset will be sent to the device first. If that does
5081 * not work, a target reset will be sent.
5082 *
5083 * Return value:
5084 * SUCCESS / FAILED
5085 **/
__ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)5086 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5087 {
5088 struct ipr_cmnd *ipr_cmd;
5089 struct ipr_ioa_cfg *ioa_cfg;
5090 struct ipr_resource_entry *res;
5091 struct ata_port *ap;
5092 int rc = 0;
5093 struct ipr_hrr_queue *hrrq;
5094
5095 ENTER;
5096 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5097 res = scsi_cmd->device->hostdata;
5098
5099 if (!res)
5100 return FAILED;
5101
5102 /*
5103 * If we are currently going through reset/reload, return failed. This will force the
5104 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5105 * reset to complete
5106 */
5107 if (ioa_cfg->in_reset_reload)
5108 return FAILED;
5109 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5110 return FAILED;
5111
5112 for_each_hrrq(hrrq, ioa_cfg) {
5113 spin_lock(&hrrq->_lock);
5114 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5115 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5116 if (ipr_cmd->scsi_cmd)
5117 ipr_cmd->done = ipr_scsi_eh_done;
5118 if (ipr_cmd->qc)
5119 ipr_cmd->done = ipr_sata_eh_done;
5120 if (ipr_cmd->qc &&
5121 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5122 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5123 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5124 }
5125 }
5126 }
5127 spin_unlock(&hrrq->_lock);
5128 }
5129 res->resetting_device = 1;
5130 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5131
5132 if (ipr_is_gata(res) && res->sata_port) {
5133 ap = res->sata_port->ap;
5134 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5135 ata_std_error_handler(ap);
5136 spin_lock_irq(scsi_cmd->device->host->host_lock);
5137
5138 for_each_hrrq(hrrq, ioa_cfg) {
5139 spin_lock(&hrrq->_lock);
5140 list_for_each_entry(ipr_cmd,
5141 &hrrq->hrrq_pending_q, queue) {
5142 if (ipr_cmd->ioarcb.res_handle ==
5143 res->res_handle) {
5144 rc = -EIO;
5145 break;
5146 }
5147 }
5148 spin_unlock(&hrrq->_lock);
5149 }
5150 } else
5151 rc = ipr_device_reset(ioa_cfg, res);
5152 res->resetting_device = 0;
5153 res->reset_occurred = 1;
5154
5155 LEAVE;
5156 return rc ? FAILED : SUCCESS;
5157 }
5158
ipr_eh_dev_reset(struct scsi_cmnd * cmd)5159 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5160 {
5161 int rc;
5162 struct ipr_ioa_cfg *ioa_cfg;
5163
5164 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5165
5166 spin_lock_irq(cmd->device->host->host_lock);
5167 rc = __ipr_eh_dev_reset(cmd);
5168 spin_unlock_irq(cmd->device->host->host_lock);
5169
5170 if (rc == SUCCESS)
5171 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5172
5173 return rc;
5174 }
5175
5176 /**
5177 * ipr_bus_reset_done - Op done function for bus reset.
5178 * @ipr_cmd: ipr command struct
5179 *
5180 * This function is the op done function for a bus reset
5181 *
5182 * Return value:
5183 * none
5184 **/
ipr_bus_reset_done(struct ipr_cmnd * ipr_cmd)5185 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5186 {
5187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5188 struct ipr_resource_entry *res;
5189
5190 ENTER;
5191 if (!ioa_cfg->sis64)
5192 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5193 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5194 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5195 break;
5196 }
5197 }
5198
5199 /*
5200 * If abort has not completed, indicate the reset has, else call the
5201 * abort's done function to wake the sleeping eh thread
5202 */
5203 if (ipr_cmd->sibling->sibling)
5204 ipr_cmd->sibling->sibling = NULL;
5205 else
5206 ipr_cmd->sibling->done(ipr_cmd->sibling);
5207
5208 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5209 LEAVE;
5210 }
5211
5212 /**
5213 * ipr_abort_timeout - An abort task has timed out
5214 * @ipr_cmd: ipr command struct
5215 *
5216 * This function handles when an abort task times out. If this
5217 * happens we issue a bus reset since we have resources tied
5218 * up that must be freed before returning to the midlayer.
5219 *
5220 * Return value:
5221 * none
5222 **/
ipr_abort_timeout(struct ipr_cmnd * ipr_cmd)5223 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5224 {
5225 struct ipr_cmnd *reset_cmd;
5226 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5227 struct ipr_cmd_pkt *cmd_pkt;
5228 unsigned long lock_flags = 0;
5229
5230 ENTER;
5231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5232 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5234 return;
5235 }
5236
5237 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5238 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5239 ipr_cmd->sibling = reset_cmd;
5240 reset_cmd->sibling = ipr_cmd;
5241 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5242 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5243 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5244 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5245 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5246
5247 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5249 LEAVE;
5250 }
5251
5252 /**
5253 * ipr_cancel_op - Cancel specified op
5254 * @scsi_cmd: scsi command struct
5255 *
5256 * This function cancels specified op.
5257 *
5258 * Return value:
5259 * SUCCESS / FAILED
5260 **/
ipr_cancel_op(struct scsi_cmnd * scsi_cmd)5261 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5262 {
5263 struct ipr_cmnd *ipr_cmd;
5264 struct ipr_ioa_cfg *ioa_cfg;
5265 struct ipr_resource_entry *res;
5266 struct ipr_cmd_pkt *cmd_pkt;
5267 u32 ioasc, int_reg;
5268 int op_found = 0;
5269 struct ipr_hrr_queue *hrrq;
5270
5271 ENTER;
5272 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5273 res = scsi_cmd->device->hostdata;
5274
5275 /* If we are currently going through reset/reload, return failed.
5276 * This will force the mid-layer to call ipr_eh_host_reset,
5277 * which will then go to sleep and wait for the reset to complete
5278 */
5279 if (ioa_cfg->in_reset_reload ||
5280 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5281 return FAILED;
5282 if (!res)
5283 return FAILED;
5284
5285 /*
5286 * If we are aborting a timed out op, chances are that the timeout was caused
5287 * by a still not detected EEH error. In such cases, reading a register will
5288 * trigger the EEH recovery infrastructure.
5289 */
5290 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5291
5292 if (!ipr_is_gscsi(res))
5293 return FAILED;
5294
5295 for_each_hrrq(hrrq, ioa_cfg) {
5296 spin_lock(&hrrq->_lock);
5297 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5298 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5299 ipr_cmd->done = ipr_scsi_eh_done;
5300 op_found = 1;
5301 break;
5302 }
5303 }
5304 spin_unlock(&hrrq->_lock);
5305 }
5306
5307 if (!op_found)
5308 return SUCCESS;
5309
5310 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5311 ipr_cmd->ioarcb.res_handle = res->res_handle;
5312 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5313 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5314 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5315 ipr_cmd->u.sdev = scsi_cmd->device;
5316
5317 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5318 scsi_cmd->cmnd[0]);
5319 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5320 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5321
5322 /*
5323 * If the abort task timed out and we sent a bus reset, we will get
5324 * one the following responses to the abort
5325 */
5326 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5327 ioasc = 0;
5328 ipr_trace;
5329 }
5330
5331 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5332 if (!ipr_is_naca_model(res))
5333 res->needs_sync_complete = 1;
5334
5335 LEAVE;
5336 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5337 }
5338
5339 /**
5340 * ipr_eh_abort - Abort a single op
5341 * @scsi_cmd: scsi command struct
5342 *
5343 * Return value:
5344 * SUCCESS / FAILED
5345 **/
ipr_eh_abort(struct scsi_cmnd * scsi_cmd)5346 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5347 {
5348 unsigned long flags;
5349 int rc;
5350 struct ipr_ioa_cfg *ioa_cfg;
5351
5352 ENTER;
5353
5354 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5355
5356 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5357 rc = ipr_cancel_op(scsi_cmd);
5358 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5359
5360 if (rc == SUCCESS)
5361 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5362 LEAVE;
5363 return rc;
5364 }
5365
5366 /**
5367 * ipr_handle_other_interrupt - Handle "other" interrupts
5368 * @ioa_cfg: ioa config struct
5369 * @int_reg: interrupt register
5370 *
5371 * Return value:
5372 * IRQ_NONE / IRQ_HANDLED
5373 **/
ipr_handle_other_interrupt(struct ipr_ioa_cfg * ioa_cfg,u32 int_reg)5374 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5375 u32 int_reg)
5376 {
5377 irqreturn_t rc = IRQ_HANDLED;
5378 u32 int_mask_reg;
5379
5380 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5381 int_reg &= ~int_mask_reg;
5382
5383 /* If an interrupt on the adapter did not occur, ignore it.
5384 * Or in the case of SIS 64, check for a stage change interrupt.
5385 */
5386 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5387 if (ioa_cfg->sis64) {
5388 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5389 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5390 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5391
5392 /* clear stage change */
5393 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5394 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5395 list_del(&ioa_cfg->reset_cmd->queue);
5396 del_timer(&ioa_cfg->reset_cmd->timer);
5397 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5398 return IRQ_HANDLED;
5399 }
5400 }
5401
5402 return IRQ_NONE;
5403 }
5404
5405 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5406 /* Mask the interrupt */
5407 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5408
5409 /* Clear the interrupt */
5410 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
5411 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5412
5413 list_del(&ioa_cfg->reset_cmd->queue);
5414 del_timer(&ioa_cfg->reset_cmd->timer);
5415 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5416 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5417 if (ioa_cfg->clear_isr) {
5418 if (ipr_debug && printk_ratelimit())
5419 dev_err(&ioa_cfg->pdev->dev,
5420 "Spurious interrupt detected. 0x%08X\n", int_reg);
5421 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5422 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5423 return IRQ_NONE;
5424 }
5425 } else {
5426 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5427 ioa_cfg->ioa_unit_checked = 1;
5428 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5429 dev_err(&ioa_cfg->pdev->dev,
5430 "No Host RRQ. 0x%08X\n", int_reg);
5431 else
5432 dev_err(&ioa_cfg->pdev->dev,
5433 "Permanent IOA failure. 0x%08X\n", int_reg);
5434
5435 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5436 ioa_cfg->sdt_state = GET_DUMP;
5437
5438 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5439 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5440 }
5441
5442 return rc;
5443 }
5444
5445 /**
5446 * ipr_isr_eh - Interrupt service routine error handler
5447 * @ioa_cfg: ioa config struct
5448 * @msg: message to log
5449 *
5450 * Return value:
5451 * none
5452 **/
ipr_isr_eh(struct ipr_ioa_cfg * ioa_cfg,char * msg,u16 number)5453 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5454 {
5455 ioa_cfg->errors_logged++;
5456 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5457
5458 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5459 ioa_cfg->sdt_state = GET_DUMP;
5460
5461 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5462 }
5463
ipr_process_hrrq(struct ipr_hrr_queue * hrr_queue,int budget,struct list_head * doneq)5464 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5465 struct list_head *doneq)
5466 {
5467 u32 ioasc;
5468 u16 cmd_index;
5469 struct ipr_cmnd *ipr_cmd;
5470 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5471 int num_hrrq = 0;
5472
5473 /* If interrupts are disabled, ignore the interrupt */
5474 if (!hrr_queue->allow_interrupts)
5475 return 0;
5476
5477 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5478 hrr_queue->toggle_bit) {
5479
5480 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5481 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5482 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5483
5484 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5485 cmd_index < hrr_queue->min_cmd_id)) {
5486 ipr_isr_eh(ioa_cfg,
5487 "Invalid response handle from IOA: ",
5488 cmd_index);
5489 break;
5490 }
5491
5492 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5493 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5494
5495 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5496
5497 list_move_tail(&ipr_cmd->queue, doneq);
5498
5499 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5500 hrr_queue->hrrq_curr++;
5501 } else {
5502 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5503 hrr_queue->toggle_bit ^= 1u;
5504 }
5505 num_hrrq++;
5506 if (budget > 0 && num_hrrq >= budget)
5507 break;
5508 }
5509
5510 return num_hrrq;
5511 }
5512
ipr_iopoll(struct blk_iopoll * iop,int budget)5513 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5514 {
5515 struct ipr_ioa_cfg *ioa_cfg;
5516 struct ipr_hrr_queue *hrrq;
5517 struct ipr_cmnd *ipr_cmd, *temp;
5518 unsigned long hrrq_flags;
5519 int completed_ops;
5520 LIST_HEAD(doneq);
5521
5522 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5523 ioa_cfg = hrrq->ioa_cfg;
5524
5525 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5526 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5527
5528 if (completed_ops < budget)
5529 blk_iopoll_complete(iop);
5530 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5531
5532 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5533 list_del(&ipr_cmd->queue);
5534 del_timer(&ipr_cmd->timer);
5535 ipr_cmd->fast_done(ipr_cmd);
5536 }
5537
5538 return completed_ops;
5539 }
5540
5541 /**
5542 * ipr_isr - Interrupt service routine
5543 * @irq: irq number
5544 * @devp: pointer to ioa config struct
5545 *
5546 * Return value:
5547 * IRQ_NONE / IRQ_HANDLED
5548 **/
ipr_isr(int irq,void * devp)5549 static irqreturn_t ipr_isr(int irq, void *devp)
5550 {
5551 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5552 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5553 unsigned long hrrq_flags = 0;
5554 u32 int_reg = 0;
5555 int num_hrrq = 0;
5556 int irq_none = 0;
5557 struct ipr_cmnd *ipr_cmd, *temp;
5558 irqreturn_t rc = IRQ_NONE;
5559 LIST_HEAD(doneq);
5560
5561 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5562 /* If interrupts are disabled, ignore the interrupt */
5563 if (!hrrq->allow_interrupts) {
5564 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5565 return IRQ_NONE;
5566 }
5567
5568 while (1) {
5569 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5570 rc = IRQ_HANDLED;
5571
5572 if (!ioa_cfg->clear_isr)
5573 break;
5574
5575 /* Clear the PCI interrupt */
5576 num_hrrq = 0;
5577 do {
5578 writel(IPR_PCII_HRRQ_UPDATED,
5579 ioa_cfg->regs.clr_interrupt_reg32);
5580 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5581 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5582 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5583
5584 } else if (rc == IRQ_NONE && irq_none == 0) {
5585 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5586 irq_none++;
5587 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5588 int_reg & IPR_PCII_HRRQ_UPDATED) {
5589 ipr_isr_eh(ioa_cfg,
5590 "Error clearing HRRQ: ", num_hrrq);
5591 rc = IRQ_HANDLED;
5592 break;
5593 } else
5594 break;
5595 }
5596
5597 if (unlikely(rc == IRQ_NONE))
5598 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5599
5600 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5601 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5602 list_del(&ipr_cmd->queue);
5603 del_timer(&ipr_cmd->timer);
5604 ipr_cmd->fast_done(ipr_cmd);
5605 }
5606 return rc;
5607 }
5608
5609 /**
5610 * ipr_isr_mhrrq - Interrupt service routine
5611 * @irq: irq number
5612 * @devp: pointer to ioa config struct
5613 *
5614 * Return value:
5615 * IRQ_NONE / IRQ_HANDLED
5616 **/
ipr_isr_mhrrq(int irq,void * devp)5617 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5618 {
5619 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5620 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5621 unsigned long hrrq_flags = 0;
5622 struct ipr_cmnd *ipr_cmd, *temp;
5623 irqreturn_t rc = IRQ_NONE;
5624 LIST_HEAD(doneq);
5625
5626 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5627
5628 /* If interrupts are disabled, ignore the interrupt */
5629 if (!hrrq->allow_interrupts) {
5630 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5631 return IRQ_NONE;
5632 }
5633
5634 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5635 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5636 hrrq->toggle_bit) {
5637 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5638 blk_iopoll_sched(&hrrq->iopoll);
5639 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5640 return IRQ_HANDLED;
5641 }
5642 } else {
5643 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5644 hrrq->toggle_bit)
5645
5646 if (ipr_process_hrrq(hrrq, -1, &doneq))
5647 rc = IRQ_HANDLED;
5648 }
5649
5650 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5651
5652 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5653 list_del(&ipr_cmd->queue);
5654 del_timer(&ipr_cmd->timer);
5655 ipr_cmd->fast_done(ipr_cmd);
5656 }
5657 return rc;
5658 }
5659
5660 /**
5661 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5662 * @ioa_cfg: ioa config struct
5663 * @ipr_cmd: ipr command struct
5664 *
5665 * Return value:
5666 * 0 on success / -1 on failure
5667 **/
ipr_build_ioadl64(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5668 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5669 struct ipr_cmnd *ipr_cmd)
5670 {
5671 int i, nseg;
5672 struct scatterlist *sg;
5673 u32 length;
5674 u32 ioadl_flags = 0;
5675 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5676 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5677 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5678
5679 length = scsi_bufflen(scsi_cmd);
5680 if (!length)
5681 return 0;
5682
5683 nseg = scsi_dma_map(scsi_cmd);
5684 if (nseg < 0) {
5685 if (printk_ratelimit())
5686 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5687 return -1;
5688 }
5689
5690 ipr_cmd->dma_use_sg = nseg;
5691
5692 ioarcb->data_transfer_length = cpu_to_be32(length);
5693 ioarcb->ioadl_len =
5694 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5695
5696 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5697 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5698 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5699 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5700 ioadl_flags = IPR_IOADL_FLAGS_READ;
5701
5702 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5703 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5704 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5705 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5706 }
5707
5708 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5709 return 0;
5710 }
5711
5712 /**
5713 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5714 * @ioa_cfg: ioa config struct
5715 * @ipr_cmd: ipr command struct
5716 *
5717 * Return value:
5718 * 0 on success / -1 on failure
5719 **/
ipr_build_ioadl(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5720 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5721 struct ipr_cmnd *ipr_cmd)
5722 {
5723 int i, nseg;
5724 struct scatterlist *sg;
5725 u32 length;
5726 u32 ioadl_flags = 0;
5727 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5728 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5729 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5730
5731 length = scsi_bufflen(scsi_cmd);
5732 if (!length)
5733 return 0;
5734
5735 nseg = scsi_dma_map(scsi_cmd);
5736 if (nseg < 0) {
5737 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5738 return -1;
5739 }
5740
5741 ipr_cmd->dma_use_sg = nseg;
5742
5743 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5744 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5745 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5746 ioarcb->data_transfer_length = cpu_to_be32(length);
5747 ioarcb->ioadl_len =
5748 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5749 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5750 ioadl_flags = IPR_IOADL_FLAGS_READ;
5751 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5752 ioarcb->read_ioadl_len =
5753 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5754 }
5755
5756 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5757 ioadl = ioarcb->u.add_data.u.ioadl;
5758 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5759 offsetof(struct ipr_ioarcb, u.add_data));
5760 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5761 }
5762
5763 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5764 ioadl[i].flags_and_data_len =
5765 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5766 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5767 }
5768
5769 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5770 return 0;
5771 }
5772
5773 /**
5774 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5775 * @scsi_cmd: scsi command struct
5776 *
5777 * Return value:
5778 * task attributes
5779 **/
ipr_get_task_attributes(struct scsi_cmnd * scsi_cmd)5780 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5781 {
5782 u8 tag[2];
5783 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5784
5785 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5786 switch (tag[0]) {
5787 case MSG_SIMPLE_TAG:
5788 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5789 break;
5790 case MSG_HEAD_TAG:
5791 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5792 break;
5793 case MSG_ORDERED_TAG:
5794 rc = IPR_FLAGS_LO_ORDERED_TASK;
5795 break;
5796 };
5797 }
5798
5799 return rc;
5800 }
5801
5802 /**
5803 * ipr_erp_done - Process completion of ERP for a device
5804 * @ipr_cmd: ipr command struct
5805 *
5806 * This function copies the sense buffer into the scsi_cmd
5807 * struct and pushes the scsi_done function.
5808 *
5809 * Return value:
5810 * nothing
5811 **/
ipr_erp_done(struct ipr_cmnd * ipr_cmd)5812 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5813 {
5814 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5815 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5816 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5817
5818 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5819 scsi_cmd->result |= (DID_ERROR << 16);
5820 scmd_printk(KERN_ERR, scsi_cmd,
5821 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5822 } else {
5823 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5824 SCSI_SENSE_BUFFERSIZE);
5825 }
5826
5827 if (res) {
5828 if (!ipr_is_naca_model(res))
5829 res->needs_sync_complete = 1;
5830 res->in_erp = 0;
5831 }
5832 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5833 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5834 scsi_cmd->scsi_done(scsi_cmd);
5835 }
5836
5837 /**
5838 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5839 * @ipr_cmd: ipr command struct
5840 *
5841 * Return value:
5842 * none
5843 **/
ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd * ipr_cmd)5844 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5845 {
5846 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5847 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5848 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5849
5850 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5851 ioarcb->data_transfer_length = 0;
5852 ioarcb->read_data_transfer_length = 0;
5853 ioarcb->ioadl_len = 0;
5854 ioarcb->read_ioadl_len = 0;
5855 ioasa->hdr.ioasc = 0;
5856 ioasa->hdr.residual_data_len = 0;
5857
5858 if (ipr_cmd->ioa_cfg->sis64)
5859 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5860 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5861 else {
5862 ioarcb->write_ioadl_addr =
5863 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5864 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5865 }
5866 }
5867
5868 /**
5869 * ipr_erp_request_sense - Send request sense to a device
5870 * @ipr_cmd: ipr command struct
5871 *
5872 * This function sends a request sense to a device as a result
5873 * of a check condition.
5874 *
5875 * Return value:
5876 * nothing
5877 **/
ipr_erp_request_sense(struct ipr_cmnd * ipr_cmd)5878 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5879 {
5880 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5881 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5882
5883 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5884 ipr_erp_done(ipr_cmd);
5885 return;
5886 }
5887
5888 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5889
5890 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5891 cmd_pkt->cdb[0] = REQUEST_SENSE;
5892 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5893 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5894 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5895 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5896
5897 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5898 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5899
5900 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5901 IPR_REQUEST_SENSE_TIMEOUT * 2);
5902 }
5903
5904 /**
5905 * ipr_erp_cancel_all - Send cancel all to a device
5906 * @ipr_cmd: ipr command struct
5907 *
5908 * This function sends a cancel all to a device to clear the
5909 * queue. If we are running TCQ on the device, QERR is set to 1,
5910 * which means all outstanding ops have been dropped on the floor.
5911 * Cancel all will return them to us.
5912 *
5913 * Return value:
5914 * nothing
5915 **/
ipr_erp_cancel_all(struct ipr_cmnd * ipr_cmd)5916 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5917 {
5918 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5919 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5920 struct ipr_cmd_pkt *cmd_pkt;
5921
5922 res->in_erp = 1;
5923
5924 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5925
5926 if (!scsi_get_tag_type(scsi_cmd->device)) {
5927 ipr_erp_request_sense(ipr_cmd);
5928 return;
5929 }
5930
5931 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5932 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5933 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5934
5935 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5936 IPR_CANCEL_ALL_TIMEOUT);
5937 }
5938
5939 /**
5940 * ipr_dump_ioasa - Dump contents of IOASA
5941 * @ioa_cfg: ioa config struct
5942 * @ipr_cmd: ipr command struct
5943 * @res: resource entry struct
5944 *
5945 * This function is invoked by the interrupt handler when ops
5946 * fail. It will log the IOASA if appropriate. Only called
5947 * for GPDD ops.
5948 *
5949 * Return value:
5950 * none
5951 **/
ipr_dump_ioasa(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd,struct ipr_resource_entry * res)5952 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5953 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5954 {
5955 int i;
5956 u16 data_len;
5957 u32 ioasc, fd_ioasc;
5958 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5959 __be32 *ioasa_data = (__be32 *)ioasa;
5960 int error_index;
5961
5962 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5963 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5964
5965 if (0 == ioasc)
5966 return;
5967
5968 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5969 return;
5970
5971 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5972 error_index = ipr_get_error(fd_ioasc);
5973 else
5974 error_index = ipr_get_error(ioasc);
5975
5976 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5977 /* Don't log an error if the IOA already logged one */
5978 if (ioasa->hdr.ilid != 0)
5979 return;
5980
5981 if (!ipr_is_gscsi(res))
5982 return;
5983
5984 if (ipr_error_table[error_index].log_ioasa == 0)
5985 return;
5986 }
5987
5988 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5989
5990 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5991 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5992 data_len = sizeof(struct ipr_ioasa64);
5993 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5994 data_len = sizeof(struct ipr_ioasa);
5995
5996 ipr_err("IOASA Dump:\n");
5997
5998 for (i = 0; i < data_len / 4; i += 4) {
5999 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6000 be32_to_cpu(ioasa_data[i]),
6001 be32_to_cpu(ioasa_data[i+1]),
6002 be32_to_cpu(ioasa_data[i+2]),
6003 be32_to_cpu(ioasa_data[i+3]));
6004 }
6005 }
6006
6007 /**
6008 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6009 * @ioasa: IOASA
6010 * @sense_buf: sense data buffer
6011 *
6012 * Return value:
6013 * none
6014 **/
ipr_gen_sense(struct ipr_cmnd * ipr_cmd)6015 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6016 {
6017 u32 failing_lba;
6018 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6019 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6020 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6021 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6022
6023 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6024
6025 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6026 return;
6027
6028 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6029
6030 if (ipr_is_vset_device(res) &&
6031 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6032 ioasa->u.vset.failing_lba_hi != 0) {
6033 sense_buf[0] = 0x72;
6034 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6035 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6036 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6037
6038 sense_buf[7] = 12;
6039 sense_buf[8] = 0;
6040 sense_buf[9] = 0x0A;
6041 sense_buf[10] = 0x80;
6042
6043 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6044
6045 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6046 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6047 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6048 sense_buf[15] = failing_lba & 0x000000ff;
6049
6050 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6051
6052 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6053 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6054 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6055 sense_buf[19] = failing_lba & 0x000000ff;
6056 } else {
6057 sense_buf[0] = 0x70;
6058 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6059 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6060 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6061
6062 /* Illegal request */
6063 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6064 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6065 sense_buf[7] = 10; /* additional length */
6066
6067 /* IOARCB was in error */
6068 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6069 sense_buf[15] = 0xC0;
6070 else /* Parameter data was invalid */
6071 sense_buf[15] = 0x80;
6072
6073 sense_buf[16] =
6074 ((IPR_FIELD_POINTER_MASK &
6075 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6076 sense_buf[17] =
6077 (IPR_FIELD_POINTER_MASK &
6078 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6079 } else {
6080 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6081 if (ipr_is_vset_device(res))
6082 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6083 else
6084 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6085
6086 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6087 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6088 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6089 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6090 sense_buf[6] = failing_lba & 0x000000ff;
6091 }
6092
6093 sense_buf[7] = 6; /* additional length */
6094 }
6095 }
6096 }
6097
6098 /**
6099 * ipr_get_autosense - Copy autosense data to sense buffer
6100 * @ipr_cmd: ipr command struct
6101 *
6102 * This function copies the autosense buffer to the buffer
6103 * in the scsi_cmd, if there is autosense available.
6104 *
6105 * Return value:
6106 * 1 if autosense was available / 0 if not
6107 **/
ipr_get_autosense(struct ipr_cmnd * ipr_cmd)6108 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6109 {
6110 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6111 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6112
6113 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6114 return 0;
6115
6116 if (ipr_cmd->ioa_cfg->sis64)
6117 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6118 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6119 SCSI_SENSE_BUFFERSIZE));
6120 else
6121 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6122 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6123 SCSI_SENSE_BUFFERSIZE));
6124 return 1;
6125 }
6126
6127 /**
6128 * ipr_erp_start - Process an error response for a SCSI op
6129 * @ioa_cfg: ioa config struct
6130 * @ipr_cmd: ipr command struct
6131 *
6132 * This function determines whether or not to initiate ERP
6133 * on the affected device.
6134 *
6135 * Return value:
6136 * nothing
6137 **/
ipr_erp_start(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)6138 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6139 struct ipr_cmnd *ipr_cmd)
6140 {
6141 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6142 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6143 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6144 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6145
6146 if (!res) {
6147 ipr_scsi_eh_done(ipr_cmd);
6148 return;
6149 }
6150
6151 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6152 ipr_gen_sense(ipr_cmd);
6153
6154 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6155
6156 switch (masked_ioasc) {
6157 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6158 if (ipr_is_naca_model(res))
6159 scsi_cmd->result |= (DID_ABORT << 16);
6160 else
6161 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6162 break;
6163 case IPR_IOASC_IR_RESOURCE_HANDLE:
6164 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6165 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6166 break;
6167 case IPR_IOASC_HW_SEL_TIMEOUT:
6168 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6169 if (!ipr_is_naca_model(res))
6170 res->needs_sync_complete = 1;
6171 break;
6172 case IPR_IOASC_SYNC_REQUIRED:
6173 if (!res->in_erp)
6174 res->needs_sync_complete = 1;
6175 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6176 break;
6177 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6178 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6179 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6180 break;
6181 case IPR_IOASC_BUS_WAS_RESET:
6182 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6183 /*
6184 * Report the bus reset and ask for a retry. The device
6185 * will give CC/UA the next command.
6186 */
6187 if (!res->resetting_device)
6188 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6189 scsi_cmd->result |= (DID_ERROR << 16);
6190 if (!ipr_is_naca_model(res))
6191 res->needs_sync_complete = 1;
6192 break;
6193 case IPR_IOASC_HW_DEV_BUS_STATUS:
6194 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6195 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6196 if (!ipr_get_autosense(ipr_cmd)) {
6197 if (!ipr_is_naca_model(res)) {
6198 ipr_erp_cancel_all(ipr_cmd);
6199 return;
6200 }
6201 }
6202 }
6203 if (!ipr_is_naca_model(res))
6204 res->needs_sync_complete = 1;
6205 break;
6206 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6207 break;
6208 default:
6209 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6210 scsi_cmd->result |= (DID_ERROR << 16);
6211 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6212 res->needs_sync_complete = 1;
6213 break;
6214 }
6215
6216 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6217 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6218 scsi_cmd->scsi_done(scsi_cmd);
6219 }
6220
6221 /**
6222 * ipr_scsi_done - mid-layer done function
6223 * @ipr_cmd: ipr command struct
6224 *
6225 * This function is invoked by the interrupt handler for
6226 * ops generated by the SCSI mid-layer
6227 *
6228 * Return value:
6229 * none
6230 **/
ipr_scsi_done(struct ipr_cmnd * ipr_cmd)6231 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6232 {
6233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6234 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6235 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6236 unsigned long lock_flags;
6237
6238 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6239
6240 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6241 scsi_dma_unmap(scsi_cmd);
6242
6243 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6244 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6245 scsi_cmd->scsi_done(scsi_cmd);
6246 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6247 } else {
6248 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6249 spin_lock(&ipr_cmd->hrrq->_lock);
6250 ipr_erp_start(ioa_cfg, ipr_cmd);
6251 spin_unlock(&ipr_cmd->hrrq->_lock);
6252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6253 }
6254 }
6255
6256 /**
6257 * ipr_queuecommand - Queue a mid-layer request
6258 * @shost: scsi host struct
6259 * @scsi_cmd: scsi command struct
6260 *
6261 * This function queues a request generated by the mid-layer.
6262 *
6263 * Return value:
6264 * 0 on success
6265 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6266 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6267 **/
ipr_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scsi_cmd)6268 static int ipr_queuecommand(struct Scsi_Host *shost,
6269 struct scsi_cmnd *scsi_cmd)
6270 {
6271 struct ipr_ioa_cfg *ioa_cfg;
6272 struct ipr_resource_entry *res;
6273 struct ipr_ioarcb *ioarcb;
6274 struct ipr_cmnd *ipr_cmd;
6275 unsigned long hrrq_flags, lock_flags;
6276 int rc;
6277 struct ipr_hrr_queue *hrrq;
6278 int hrrq_id;
6279
6280 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6281
6282 scsi_cmd->result = (DID_OK << 16);
6283 res = scsi_cmd->device->hostdata;
6284
6285 if (ipr_is_gata(res) && res->sata_port) {
6286 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6287 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6289 return rc;
6290 }
6291
6292 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6293 hrrq = &ioa_cfg->hrrq[hrrq_id];
6294
6295 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6296 /*
6297 * We are currently blocking all devices due to a host reset
6298 * We have told the host to stop giving us new requests, but
6299 * ERP ops don't count. FIXME
6300 */
6301 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6302 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6303 return SCSI_MLQUEUE_HOST_BUSY;
6304 }
6305
6306 /*
6307 * FIXME - Create scsi_set_host_offline interface
6308 * and the ioa_is_dead check can be removed
6309 */
6310 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6311 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6312 goto err_nodev;
6313 }
6314
6315 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6316 if (ipr_cmd == NULL) {
6317 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6318 return SCSI_MLQUEUE_HOST_BUSY;
6319 }
6320 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6321
6322 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6323 ioarcb = &ipr_cmd->ioarcb;
6324
6325 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6326 ipr_cmd->scsi_cmd = scsi_cmd;
6327 ipr_cmd->done = ipr_scsi_eh_done;
6328
6329 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6330 if (scsi_cmd->underflow == 0)
6331 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6332
6333 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6334 if (ipr_is_gscsi(res) && res->reset_occurred) {
6335 res->reset_occurred = 0;
6336 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6337 }
6338 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6339 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
6340 }
6341
6342 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6343 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6344 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6345 }
6346
6347 if (ioa_cfg->sis64)
6348 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6349 else
6350 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6351
6352 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6353 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6354 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6355 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6356 if (!rc)
6357 scsi_dma_unmap(scsi_cmd);
6358 return SCSI_MLQUEUE_HOST_BUSY;
6359 }
6360
6361 if (unlikely(hrrq->ioa_is_dead)) {
6362 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6363 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6364 scsi_dma_unmap(scsi_cmd);
6365 goto err_nodev;
6366 }
6367
6368 ioarcb->res_handle = res->res_handle;
6369 if (res->needs_sync_complete) {
6370 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6371 res->needs_sync_complete = 0;
6372 }
6373 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6374 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6375 ipr_send_command(ipr_cmd);
6376 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6377 return 0;
6378
6379 err_nodev:
6380 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6381 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6382 scsi_cmd->result = (DID_NO_CONNECT << 16);
6383 scsi_cmd->scsi_done(scsi_cmd);
6384 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6385 return 0;
6386 }
6387
6388 /**
6389 * ipr_ioctl - IOCTL handler
6390 * @sdev: scsi device struct
6391 * @cmd: IOCTL cmd
6392 * @arg: IOCTL arg
6393 *
6394 * Return value:
6395 * 0 on success / other on failure
6396 **/
ipr_ioctl(struct scsi_device * sdev,int cmd,void __user * arg)6397 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6398 {
6399 struct ipr_resource_entry *res;
6400
6401 res = (struct ipr_resource_entry *)sdev->hostdata;
6402 if (res && ipr_is_gata(res)) {
6403 if (cmd == HDIO_GET_IDENTITY)
6404 return -ENOTTY;
6405 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6406 }
6407
6408 return -EINVAL;
6409 }
6410
6411 /**
6412 * ipr_info - Get information about the card/driver
6413 * @scsi_host: scsi host struct
6414 *
6415 * Return value:
6416 * pointer to buffer with description string
6417 **/
ipr_ioa_info(struct Scsi_Host * host)6418 static const char *ipr_ioa_info(struct Scsi_Host *host)
6419 {
6420 static char buffer[512];
6421 struct ipr_ioa_cfg *ioa_cfg;
6422 unsigned long lock_flags = 0;
6423
6424 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6425
6426 spin_lock_irqsave(host->host_lock, lock_flags);
6427 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6428 spin_unlock_irqrestore(host->host_lock, lock_flags);
6429
6430 return buffer;
6431 }
6432
6433 static struct scsi_host_template driver_template = {
6434 .module = THIS_MODULE,
6435 .name = "IPR",
6436 .info = ipr_ioa_info,
6437 .ioctl = ipr_ioctl,
6438 .queuecommand = ipr_queuecommand,
6439 .eh_abort_handler = ipr_eh_abort,
6440 .eh_device_reset_handler = ipr_eh_dev_reset,
6441 .eh_host_reset_handler = ipr_eh_host_reset,
6442 .slave_alloc = ipr_slave_alloc,
6443 .slave_configure = ipr_slave_configure,
6444 .slave_destroy = ipr_slave_destroy,
6445 .target_alloc = ipr_target_alloc,
6446 .target_destroy = ipr_target_destroy,
6447 .change_queue_depth = ipr_change_queue_depth,
6448 .change_queue_type = ipr_change_queue_type,
6449 .bios_param = ipr_biosparam,
6450 .can_queue = IPR_MAX_COMMANDS,
6451 .this_id = -1,
6452 .sg_tablesize = IPR_MAX_SGLIST,
6453 .max_sectors = IPR_IOA_MAX_SECTORS,
6454 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6455 .use_clustering = ENABLE_CLUSTERING,
6456 .shost_attrs = ipr_ioa_attrs,
6457 .sdev_attrs = ipr_dev_attrs,
6458 .proc_name = IPR_NAME,
6459 .no_write_same = 1,
6460 };
6461
6462 /**
6463 * ipr_ata_phy_reset - libata phy_reset handler
6464 * @ap: ata port to reset
6465 *
6466 **/
ipr_ata_phy_reset(struct ata_port * ap)6467 static void ipr_ata_phy_reset(struct ata_port *ap)
6468 {
6469 unsigned long flags;
6470 struct ipr_sata_port *sata_port = ap->private_data;
6471 struct ipr_resource_entry *res = sata_port->res;
6472 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6473 int rc;
6474
6475 ENTER;
6476 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6477 while (ioa_cfg->in_reset_reload) {
6478 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6479 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6480 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6481 }
6482
6483 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6484 goto out_unlock;
6485
6486 rc = ipr_device_reset(ioa_cfg, res);
6487
6488 if (rc) {
6489 ap->link.device[0].class = ATA_DEV_NONE;
6490 goto out_unlock;
6491 }
6492
6493 ap->link.device[0].class = res->ata_class;
6494 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6495 ap->link.device[0].class = ATA_DEV_NONE;
6496
6497 out_unlock:
6498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6499 LEAVE;
6500 }
6501
6502 /**
6503 * ipr_ata_post_internal - Cleanup after an internal command
6504 * @qc: ATA queued command
6505 *
6506 * Return value:
6507 * none
6508 **/
ipr_ata_post_internal(struct ata_queued_cmd * qc)6509 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6510 {
6511 struct ipr_sata_port *sata_port = qc->ap->private_data;
6512 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6513 struct ipr_cmnd *ipr_cmd;
6514 struct ipr_hrr_queue *hrrq;
6515 unsigned long flags;
6516
6517 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6518 while (ioa_cfg->in_reset_reload) {
6519 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6520 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6521 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6522 }
6523
6524 for_each_hrrq(hrrq, ioa_cfg) {
6525 spin_lock(&hrrq->_lock);
6526 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6527 if (ipr_cmd->qc == qc) {
6528 ipr_device_reset(ioa_cfg, sata_port->res);
6529 break;
6530 }
6531 }
6532 spin_unlock(&hrrq->_lock);
6533 }
6534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6535 }
6536
6537 /**
6538 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6539 * @regs: destination
6540 * @tf: source ATA taskfile
6541 *
6542 * Return value:
6543 * none
6544 **/
ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs * regs,struct ata_taskfile * tf)6545 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6546 struct ata_taskfile *tf)
6547 {
6548 regs->feature = tf->feature;
6549 regs->nsect = tf->nsect;
6550 regs->lbal = tf->lbal;
6551 regs->lbam = tf->lbam;
6552 regs->lbah = tf->lbah;
6553 regs->device = tf->device;
6554 regs->command = tf->command;
6555 regs->hob_feature = tf->hob_feature;
6556 regs->hob_nsect = tf->hob_nsect;
6557 regs->hob_lbal = tf->hob_lbal;
6558 regs->hob_lbam = tf->hob_lbam;
6559 regs->hob_lbah = tf->hob_lbah;
6560 regs->ctl = tf->ctl;
6561 }
6562
6563 /**
6564 * ipr_sata_done - done function for SATA commands
6565 * @ipr_cmd: ipr command struct
6566 *
6567 * This function is invoked by the interrupt handler for
6568 * ops generated by the SCSI mid-layer to SATA devices
6569 *
6570 * Return value:
6571 * none
6572 **/
ipr_sata_done(struct ipr_cmnd * ipr_cmd)6573 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6574 {
6575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6576 struct ata_queued_cmd *qc = ipr_cmd->qc;
6577 struct ipr_sata_port *sata_port = qc->ap->private_data;
6578 struct ipr_resource_entry *res = sata_port->res;
6579 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6580
6581 spin_lock(&ipr_cmd->hrrq->_lock);
6582 if (ipr_cmd->ioa_cfg->sis64)
6583 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6584 sizeof(struct ipr_ioasa_gata));
6585 else
6586 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6587 sizeof(struct ipr_ioasa_gata));
6588 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6589
6590 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6591 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6592
6593 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6594 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6595 else
6596 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6597 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6598 spin_unlock(&ipr_cmd->hrrq->_lock);
6599 ata_qc_complete(qc);
6600 }
6601
6602 /**
6603 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6604 * @ipr_cmd: ipr command struct
6605 * @qc: ATA queued command
6606 *
6607 **/
ipr_build_ata_ioadl64(struct ipr_cmnd * ipr_cmd,struct ata_queued_cmd * qc)6608 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6609 struct ata_queued_cmd *qc)
6610 {
6611 u32 ioadl_flags = 0;
6612 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6613 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6614 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6615 int len = qc->nbytes;
6616 struct scatterlist *sg;
6617 unsigned int si;
6618 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6619
6620 if (len == 0)
6621 return;
6622
6623 if (qc->dma_dir == DMA_TO_DEVICE) {
6624 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6625 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6626 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6627 ioadl_flags = IPR_IOADL_FLAGS_READ;
6628
6629 ioarcb->data_transfer_length = cpu_to_be32(len);
6630 ioarcb->ioadl_len =
6631 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6632 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6633 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6634
6635 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6636 ioadl64->flags = cpu_to_be32(ioadl_flags);
6637 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6638 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6639
6640 last_ioadl64 = ioadl64;
6641 ioadl64++;
6642 }
6643
6644 if (likely(last_ioadl64))
6645 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6646 }
6647
6648 /**
6649 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6650 * @ipr_cmd: ipr command struct
6651 * @qc: ATA queued command
6652 *
6653 **/
ipr_build_ata_ioadl(struct ipr_cmnd * ipr_cmd,struct ata_queued_cmd * qc)6654 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6655 struct ata_queued_cmd *qc)
6656 {
6657 u32 ioadl_flags = 0;
6658 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6659 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6660 struct ipr_ioadl_desc *last_ioadl = NULL;
6661 int len = qc->nbytes;
6662 struct scatterlist *sg;
6663 unsigned int si;
6664
6665 if (len == 0)
6666 return;
6667
6668 if (qc->dma_dir == DMA_TO_DEVICE) {
6669 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6670 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6671 ioarcb->data_transfer_length = cpu_to_be32(len);
6672 ioarcb->ioadl_len =
6673 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6674 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6675 ioadl_flags = IPR_IOADL_FLAGS_READ;
6676 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6677 ioarcb->read_ioadl_len =
6678 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6679 }
6680
6681 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6682 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6683 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6684
6685 last_ioadl = ioadl;
6686 ioadl++;
6687 }
6688
6689 if (likely(last_ioadl))
6690 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6691 }
6692
6693 /**
6694 * ipr_qc_defer - Get a free ipr_cmd
6695 * @qc: queued command
6696 *
6697 * Return value:
6698 * 0 if success
6699 **/
ipr_qc_defer(struct ata_queued_cmd * qc)6700 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6701 {
6702 struct ata_port *ap = qc->ap;
6703 struct ipr_sata_port *sata_port = ap->private_data;
6704 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6705 struct ipr_cmnd *ipr_cmd;
6706 struct ipr_hrr_queue *hrrq;
6707 int hrrq_id;
6708
6709 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6710 hrrq = &ioa_cfg->hrrq[hrrq_id];
6711
6712 qc->lldd_task = NULL;
6713 spin_lock(&hrrq->_lock);
6714 if (unlikely(hrrq->ioa_is_dead)) {
6715 spin_unlock(&hrrq->_lock);
6716 return 0;
6717 }
6718
6719 if (unlikely(!hrrq->allow_cmds)) {
6720 spin_unlock(&hrrq->_lock);
6721 return ATA_DEFER_LINK;
6722 }
6723
6724 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6725 if (ipr_cmd == NULL) {
6726 spin_unlock(&hrrq->_lock);
6727 return ATA_DEFER_LINK;
6728 }
6729
6730 qc->lldd_task = ipr_cmd;
6731 spin_unlock(&hrrq->_lock);
6732 return 0;
6733 }
6734
6735 /**
6736 * ipr_qc_issue - Issue a SATA qc to a device
6737 * @qc: queued command
6738 *
6739 * Return value:
6740 * 0 if success
6741 **/
ipr_qc_issue(struct ata_queued_cmd * qc)6742 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6743 {
6744 struct ata_port *ap = qc->ap;
6745 struct ipr_sata_port *sata_port = ap->private_data;
6746 struct ipr_resource_entry *res = sata_port->res;
6747 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6748 struct ipr_cmnd *ipr_cmd;
6749 struct ipr_ioarcb *ioarcb;
6750 struct ipr_ioarcb_ata_regs *regs;
6751
6752 if (qc->lldd_task == NULL)
6753 ipr_qc_defer(qc);
6754
6755 ipr_cmd = qc->lldd_task;
6756 if (ipr_cmd == NULL)
6757 return AC_ERR_SYSTEM;
6758
6759 qc->lldd_task = NULL;
6760 spin_lock(&ipr_cmd->hrrq->_lock);
6761 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6762 ipr_cmd->hrrq->ioa_is_dead)) {
6763 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6764 spin_unlock(&ipr_cmd->hrrq->_lock);
6765 return AC_ERR_SYSTEM;
6766 }
6767
6768 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6769 ioarcb = &ipr_cmd->ioarcb;
6770
6771 if (ioa_cfg->sis64) {
6772 regs = &ipr_cmd->i.ata_ioadl.regs;
6773 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6774 } else
6775 regs = &ioarcb->u.add_data.u.regs;
6776
6777 memset(regs, 0, sizeof(*regs));
6778 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6779
6780 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6781 ipr_cmd->qc = qc;
6782 ipr_cmd->done = ipr_sata_done;
6783 ipr_cmd->ioarcb.res_handle = res->res_handle;
6784 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6785 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6786 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6787 ipr_cmd->dma_use_sg = qc->n_elem;
6788
6789 if (ioa_cfg->sis64)
6790 ipr_build_ata_ioadl64(ipr_cmd, qc);
6791 else
6792 ipr_build_ata_ioadl(ipr_cmd, qc);
6793
6794 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6795 ipr_copy_sata_tf(regs, &qc->tf);
6796 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6797 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6798
6799 switch (qc->tf.protocol) {
6800 case ATA_PROT_NODATA:
6801 case ATA_PROT_PIO:
6802 break;
6803
6804 case ATA_PROT_DMA:
6805 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6806 break;
6807
6808 case ATAPI_PROT_PIO:
6809 case ATAPI_PROT_NODATA:
6810 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6811 break;
6812
6813 case ATAPI_PROT_DMA:
6814 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6815 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6816 break;
6817
6818 default:
6819 WARN_ON(1);
6820 spin_unlock(&ipr_cmd->hrrq->_lock);
6821 return AC_ERR_INVALID;
6822 }
6823
6824 ipr_send_command(ipr_cmd);
6825 spin_unlock(&ipr_cmd->hrrq->_lock);
6826
6827 return 0;
6828 }
6829
6830 /**
6831 * ipr_qc_fill_rtf - Read result TF
6832 * @qc: ATA queued command
6833 *
6834 * Return value:
6835 * true
6836 **/
ipr_qc_fill_rtf(struct ata_queued_cmd * qc)6837 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6838 {
6839 struct ipr_sata_port *sata_port = qc->ap->private_data;
6840 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6841 struct ata_taskfile *tf = &qc->result_tf;
6842
6843 tf->feature = g->error;
6844 tf->nsect = g->nsect;
6845 tf->lbal = g->lbal;
6846 tf->lbam = g->lbam;
6847 tf->lbah = g->lbah;
6848 tf->device = g->device;
6849 tf->command = g->status;
6850 tf->hob_nsect = g->hob_nsect;
6851 tf->hob_lbal = g->hob_lbal;
6852 tf->hob_lbam = g->hob_lbam;
6853 tf->hob_lbah = g->hob_lbah;
6854
6855 return true;
6856 }
6857
6858 static struct ata_port_operations ipr_sata_ops = {
6859 .phy_reset = ipr_ata_phy_reset,
6860 .hardreset = ipr_sata_reset,
6861 .post_internal_cmd = ipr_ata_post_internal,
6862 .qc_prep = ata_noop_qc_prep,
6863 .qc_defer = ipr_qc_defer,
6864 .qc_issue = ipr_qc_issue,
6865 .qc_fill_rtf = ipr_qc_fill_rtf,
6866 .port_start = ata_sas_port_start,
6867 .port_stop = ata_sas_port_stop
6868 };
6869
6870 static struct ata_port_info sata_port_info = {
6871 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6872 .pio_mask = ATA_PIO4_ONLY,
6873 .mwdma_mask = ATA_MWDMA2,
6874 .udma_mask = ATA_UDMA6,
6875 .port_ops = &ipr_sata_ops
6876 };
6877
6878 #ifdef CONFIG_PPC_PSERIES
6879 static const u16 ipr_blocked_processors[] = {
6880 PVR_NORTHSTAR,
6881 PVR_PULSAR,
6882 PVR_POWER4,
6883 PVR_ICESTAR,
6884 PVR_SSTAR,
6885 PVR_POWER4p,
6886 PVR_630,
6887 PVR_630p
6888 };
6889
6890 /**
6891 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6892 * @ioa_cfg: ioa cfg struct
6893 *
6894 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6895 * certain pSeries hardware. This function determines if the given
6896 * adapter is in one of these confgurations or not.
6897 *
6898 * Return value:
6899 * 1 if adapter is not supported / 0 if adapter is supported
6900 **/
ipr_invalid_adapter(struct ipr_ioa_cfg * ioa_cfg)6901 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6902 {
6903 int i;
6904
6905 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6906 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6907 if (pvr_version_is(ipr_blocked_processors[i]))
6908 return 1;
6909 }
6910 }
6911 return 0;
6912 }
6913 #else
6914 #define ipr_invalid_adapter(ioa_cfg) 0
6915 #endif
6916
6917 /**
6918 * ipr_ioa_bringdown_done - IOA bring down completion.
6919 * @ipr_cmd: ipr command struct
6920 *
6921 * This function processes the completion of an adapter bring down.
6922 * It wakes any reset sleepers.
6923 *
6924 * Return value:
6925 * IPR_RC_JOB_RETURN
6926 **/
ipr_ioa_bringdown_done(struct ipr_cmnd * ipr_cmd)6927 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6928 {
6929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6930 int i;
6931
6932 ENTER;
6933 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6934 ipr_trace;
6935 spin_unlock_irq(ioa_cfg->host->host_lock);
6936 scsi_unblock_requests(ioa_cfg->host);
6937 spin_lock_irq(ioa_cfg->host->host_lock);
6938 }
6939
6940 ioa_cfg->in_reset_reload = 0;
6941 ioa_cfg->reset_retries = 0;
6942 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6943 spin_lock(&ioa_cfg->hrrq[i]._lock);
6944 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6945 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6946 }
6947 wmb();
6948
6949 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6950 wake_up_all(&ioa_cfg->reset_wait_q);
6951 LEAVE;
6952
6953 return IPR_RC_JOB_RETURN;
6954 }
6955
6956 /**
6957 * ipr_ioa_reset_done - IOA reset completion.
6958 * @ipr_cmd: ipr command struct
6959 *
6960 * This function processes the completion of an adapter reset.
6961 * It schedules any necessary mid-layer add/removes and
6962 * wakes any reset sleepers.
6963 *
6964 * Return value:
6965 * IPR_RC_JOB_RETURN
6966 **/
ipr_ioa_reset_done(struct ipr_cmnd * ipr_cmd)6967 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6968 {
6969 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6970 struct ipr_resource_entry *res;
6971 struct ipr_hostrcb *hostrcb, *temp;
6972 int i = 0, j;
6973
6974 ENTER;
6975 ioa_cfg->in_reset_reload = 0;
6976 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6977 spin_lock(&ioa_cfg->hrrq[j]._lock);
6978 ioa_cfg->hrrq[j].allow_cmds = 1;
6979 spin_unlock(&ioa_cfg->hrrq[j]._lock);
6980 }
6981 wmb();
6982 ioa_cfg->reset_cmd = NULL;
6983 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6984
6985 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6986 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6987 ipr_trace;
6988 break;
6989 }
6990 }
6991 schedule_work(&ioa_cfg->work_q);
6992
6993 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6994 list_del(&hostrcb->queue);
6995 if (i++ < IPR_NUM_LOG_HCAMS)
6996 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6997 else
6998 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6999 }
7000
7001 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7002 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7003
7004 ioa_cfg->reset_retries = 0;
7005 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7006 wake_up_all(&ioa_cfg->reset_wait_q);
7007
7008 spin_unlock(ioa_cfg->host->host_lock);
7009 scsi_unblock_requests(ioa_cfg->host);
7010 spin_lock(ioa_cfg->host->host_lock);
7011
7012 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7013 scsi_block_requests(ioa_cfg->host);
7014
7015 LEAVE;
7016 return IPR_RC_JOB_RETURN;
7017 }
7018
7019 /**
7020 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7021 * @supported_dev: supported device struct
7022 * @vpids: vendor product id struct
7023 *
7024 * Return value:
7025 * none
7026 **/
ipr_set_sup_dev_dflt(struct ipr_supported_device * supported_dev,struct ipr_std_inq_vpids * vpids)7027 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7028 struct ipr_std_inq_vpids *vpids)
7029 {
7030 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7031 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7032 supported_dev->num_records = 1;
7033 supported_dev->data_length =
7034 cpu_to_be16(sizeof(struct ipr_supported_device));
7035 supported_dev->reserved = 0;
7036 }
7037
7038 /**
7039 * ipr_set_supported_devs - Send Set Supported Devices for a device
7040 * @ipr_cmd: ipr command struct
7041 *
7042 * This function sends a Set Supported Devices to the adapter
7043 *
7044 * Return value:
7045 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7046 **/
ipr_set_supported_devs(struct ipr_cmnd * ipr_cmd)7047 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7048 {
7049 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7050 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7051 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7052 struct ipr_resource_entry *res = ipr_cmd->u.res;
7053
7054 ipr_cmd->job_step = ipr_ioa_reset_done;
7055
7056 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7057 if (!ipr_is_scsi_disk(res))
7058 continue;
7059
7060 ipr_cmd->u.res = res;
7061 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7062
7063 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7064 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7065 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7066
7067 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7068 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7069 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7070 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7071
7072 ipr_init_ioadl(ipr_cmd,
7073 ioa_cfg->vpd_cbs_dma +
7074 offsetof(struct ipr_misc_cbs, supp_dev),
7075 sizeof(struct ipr_supported_device),
7076 IPR_IOADL_FLAGS_WRITE_LAST);
7077
7078 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7079 IPR_SET_SUP_DEVICE_TIMEOUT);
7080
7081 if (!ioa_cfg->sis64)
7082 ipr_cmd->job_step = ipr_set_supported_devs;
7083 LEAVE;
7084 return IPR_RC_JOB_RETURN;
7085 }
7086
7087 LEAVE;
7088 return IPR_RC_JOB_CONTINUE;
7089 }
7090
7091 /**
7092 * ipr_get_mode_page - Locate specified mode page
7093 * @mode_pages: mode page buffer
7094 * @page_code: page code to find
7095 * @len: minimum required length for mode page
7096 *
7097 * Return value:
7098 * pointer to mode page / NULL on failure
7099 **/
ipr_get_mode_page(struct ipr_mode_pages * mode_pages,u32 page_code,u32 len)7100 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7101 u32 page_code, u32 len)
7102 {
7103 struct ipr_mode_page_hdr *mode_hdr;
7104 u32 page_length;
7105 u32 length;
7106
7107 if (!mode_pages || (mode_pages->hdr.length == 0))
7108 return NULL;
7109
7110 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7111 mode_hdr = (struct ipr_mode_page_hdr *)
7112 (mode_pages->data + mode_pages->hdr.block_desc_len);
7113
7114 while (length) {
7115 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7116 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7117 return mode_hdr;
7118 break;
7119 } else {
7120 page_length = (sizeof(struct ipr_mode_page_hdr) +
7121 mode_hdr->page_length);
7122 length -= page_length;
7123 mode_hdr = (struct ipr_mode_page_hdr *)
7124 ((unsigned long)mode_hdr + page_length);
7125 }
7126 }
7127 return NULL;
7128 }
7129
7130 /**
7131 * ipr_check_term_power - Check for term power errors
7132 * @ioa_cfg: ioa config struct
7133 * @mode_pages: IOAFP mode pages buffer
7134 *
7135 * Check the IOAFP's mode page 28 for term power errors
7136 *
7137 * Return value:
7138 * nothing
7139 **/
ipr_check_term_power(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)7140 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7141 struct ipr_mode_pages *mode_pages)
7142 {
7143 int i;
7144 int entry_length;
7145 struct ipr_dev_bus_entry *bus;
7146 struct ipr_mode_page28 *mode_page;
7147
7148 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7149 sizeof(struct ipr_mode_page28));
7150
7151 entry_length = mode_page->entry_length;
7152
7153 bus = mode_page->bus;
7154
7155 for (i = 0; i < mode_page->num_entries; i++) {
7156 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7157 dev_err(&ioa_cfg->pdev->dev,
7158 "Term power is absent on scsi bus %d\n",
7159 bus->res_addr.bus);
7160 }
7161
7162 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7163 }
7164 }
7165
7166 /**
7167 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7168 * @ioa_cfg: ioa config struct
7169 *
7170 * Looks through the config table checking for SES devices. If
7171 * the SES device is in the SES table indicating a maximum SCSI
7172 * bus speed, the speed is limited for the bus.
7173 *
7174 * Return value:
7175 * none
7176 **/
ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg * ioa_cfg)7177 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7178 {
7179 u32 max_xfer_rate;
7180 int i;
7181
7182 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7183 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7184 ioa_cfg->bus_attr[i].bus_width);
7185
7186 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7187 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7188 }
7189 }
7190
7191 /**
7192 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7193 * @ioa_cfg: ioa config struct
7194 * @mode_pages: mode page 28 buffer
7195 *
7196 * Updates mode page 28 based on driver configuration
7197 *
7198 * Return value:
7199 * none
7200 **/
ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)7201 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7202 struct ipr_mode_pages *mode_pages)
7203 {
7204 int i, entry_length;
7205 struct ipr_dev_bus_entry *bus;
7206 struct ipr_bus_attributes *bus_attr;
7207 struct ipr_mode_page28 *mode_page;
7208
7209 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7210 sizeof(struct ipr_mode_page28));
7211
7212 entry_length = mode_page->entry_length;
7213
7214 /* Loop for each device bus entry */
7215 for (i = 0, bus = mode_page->bus;
7216 i < mode_page->num_entries;
7217 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7218 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7219 dev_err(&ioa_cfg->pdev->dev,
7220 "Invalid resource address reported: 0x%08X\n",
7221 IPR_GET_PHYS_LOC(bus->res_addr));
7222 continue;
7223 }
7224
7225 bus_attr = &ioa_cfg->bus_attr[i];
7226 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7227 bus->bus_width = bus_attr->bus_width;
7228 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7229 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7230 if (bus_attr->qas_enabled)
7231 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7232 else
7233 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7234 }
7235 }
7236
7237 /**
7238 * ipr_build_mode_select - Build a mode select command
7239 * @ipr_cmd: ipr command struct
7240 * @res_handle: resource handle to send command to
7241 * @parm: Byte 2 of Mode Sense command
7242 * @dma_addr: DMA buffer address
7243 * @xfer_len: data transfer length
7244 *
7245 * Return value:
7246 * none
7247 **/
ipr_build_mode_select(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)7248 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7249 __be32 res_handle, u8 parm,
7250 dma_addr_t dma_addr, u8 xfer_len)
7251 {
7252 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7253
7254 ioarcb->res_handle = res_handle;
7255 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7256 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7257 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7258 ioarcb->cmd_pkt.cdb[1] = parm;
7259 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7260
7261 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7262 }
7263
7264 /**
7265 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7266 * @ipr_cmd: ipr command struct
7267 *
7268 * This function sets up the SCSI bus attributes and sends
7269 * a Mode Select for Page 28 to activate them.
7270 *
7271 * Return value:
7272 * IPR_RC_JOB_RETURN
7273 **/
ipr_ioafp_mode_select_page28(struct ipr_cmnd * ipr_cmd)7274 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7275 {
7276 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7277 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7278 int length;
7279
7280 ENTER;
7281 ipr_scsi_bus_speed_limit(ioa_cfg);
7282 ipr_check_term_power(ioa_cfg, mode_pages);
7283 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7284 length = mode_pages->hdr.length + 1;
7285 mode_pages->hdr.length = 0;
7286
7287 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7288 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7289 length);
7290
7291 ipr_cmd->job_step = ipr_set_supported_devs;
7292 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7293 struct ipr_resource_entry, queue);
7294 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7295
7296 LEAVE;
7297 return IPR_RC_JOB_RETURN;
7298 }
7299
7300 /**
7301 * ipr_build_mode_sense - Builds a mode sense command
7302 * @ipr_cmd: ipr command struct
7303 * @res: resource entry struct
7304 * @parm: Byte 2 of mode sense command
7305 * @dma_addr: DMA address of mode sense buffer
7306 * @xfer_len: Size of DMA buffer
7307 *
7308 * Return value:
7309 * none
7310 **/
ipr_build_mode_sense(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)7311 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7312 __be32 res_handle,
7313 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7314 {
7315 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7316
7317 ioarcb->res_handle = res_handle;
7318 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7319 ioarcb->cmd_pkt.cdb[2] = parm;
7320 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7321 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7322
7323 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7324 }
7325
7326 /**
7327 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7328 * @ipr_cmd: ipr command struct
7329 *
7330 * This function handles the failure of an IOA bringup command.
7331 *
7332 * Return value:
7333 * IPR_RC_JOB_RETURN
7334 **/
ipr_reset_cmd_failed(struct ipr_cmnd * ipr_cmd)7335 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7336 {
7337 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7338 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7339
7340 dev_err(&ioa_cfg->pdev->dev,
7341 "0x%02X failed with IOASC: 0x%08X\n",
7342 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7343
7344 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7345 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7346 return IPR_RC_JOB_RETURN;
7347 }
7348
7349 /**
7350 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7351 * @ipr_cmd: ipr command struct
7352 *
7353 * This function handles the failure of a Mode Sense to the IOAFP.
7354 * Some adapters do not handle all mode pages.
7355 *
7356 * Return value:
7357 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7358 **/
ipr_reset_mode_sense_failed(struct ipr_cmnd * ipr_cmd)7359 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7360 {
7361 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7362 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7363
7364 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7365 ipr_cmd->job_step = ipr_set_supported_devs;
7366 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7367 struct ipr_resource_entry, queue);
7368 return IPR_RC_JOB_CONTINUE;
7369 }
7370
7371 return ipr_reset_cmd_failed(ipr_cmd);
7372 }
7373
7374 /**
7375 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7376 * @ipr_cmd: ipr command struct
7377 *
7378 * This function send a Page 28 mode sense to the IOA to
7379 * retrieve SCSI bus attributes.
7380 *
7381 * Return value:
7382 * IPR_RC_JOB_RETURN
7383 **/
ipr_ioafp_mode_sense_page28(struct ipr_cmnd * ipr_cmd)7384 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7385 {
7386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7387
7388 ENTER;
7389 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7390 0x28, ioa_cfg->vpd_cbs_dma +
7391 offsetof(struct ipr_misc_cbs, mode_pages),
7392 sizeof(struct ipr_mode_pages));
7393
7394 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7395 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7396
7397 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7398
7399 LEAVE;
7400 return IPR_RC_JOB_RETURN;
7401 }
7402
7403 /**
7404 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7405 * @ipr_cmd: ipr command struct
7406 *
7407 * This function enables dual IOA RAID support if possible.
7408 *
7409 * Return value:
7410 * IPR_RC_JOB_RETURN
7411 **/
ipr_ioafp_mode_select_page24(struct ipr_cmnd * ipr_cmd)7412 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7413 {
7414 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7415 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7416 struct ipr_mode_page24 *mode_page;
7417 int length;
7418
7419 ENTER;
7420 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7421 sizeof(struct ipr_mode_page24));
7422
7423 if (mode_page)
7424 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7425
7426 length = mode_pages->hdr.length + 1;
7427 mode_pages->hdr.length = 0;
7428
7429 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7430 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7431 length);
7432
7433 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7434 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7435
7436 LEAVE;
7437 return IPR_RC_JOB_RETURN;
7438 }
7439
7440 /**
7441 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7442 * @ipr_cmd: ipr command struct
7443 *
7444 * This function handles the failure of a Mode Sense to the IOAFP.
7445 * Some adapters do not handle all mode pages.
7446 *
7447 * Return value:
7448 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7449 **/
ipr_reset_mode_sense_page24_failed(struct ipr_cmnd * ipr_cmd)7450 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7451 {
7452 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7453
7454 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7455 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7456 return IPR_RC_JOB_CONTINUE;
7457 }
7458
7459 return ipr_reset_cmd_failed(ipr_cmd);
7460 }
7461
7462 /**
7463 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7464 * @ipr_cmd: ipr command struct
7465 *
7466 * This function send a mode sense to the IOA to retrieve
7467 * the IOA Advanced Function Control mode page.
7468 *
7469 * Return value:
7470 * IPR_RC_JOB_RETURN
7471 **/
ipr_ioafp_mode_sense_page24(struct ipr_cmnd * ipr_cmd)7472 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7473 {
7474 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7475
7476 ENTER;
7477 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7478 0x24, ioa_cfg->vpd_cbs_dma +
7479 offsetof(struct ipr_misc_cbs, mode_pages),
7480 sizeof(struct ipr_mode_pages));
7481
7482 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7483 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7484
7485 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7486
7487 LEAVE;
7488 return IPR_RC_JOB_RETURN;
7489 }
7490
7491 /**
7492 * ipr_init_res_table - Initialize the resource table
7493 * @ipr_cmd: ipr command struct
7494 *
7495 * This function looks through the existing resource table, comparing
7496 * it with the config table. This function will take care of old/new
7497 * devices and schedule adding/removing them from the mid-layer
7498 * as appropriate.
7499 *
7500 * Return value:
7501 * IPR_RC_JOB_CONTINUE
7502 **/
ipr_init_res_table(struct ipr_cmnd * ipr_cmd)7503 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7504 {
7505 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7506 struct ipr_resource_entry *res, *temp;
7507 struct ipr_config_table_entry_wrapper cfgtew;
7508 int entries, found, flag, i;
7509 LIST_HEAD(old_res);
7510
7511 ENTER;
7512 if (ioa_cfg->sis64)
7513 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7514 else
7515 flag = ioa_cfg->u.cfg_table->hdr.flags;
7516
7517 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7518 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7519
7520 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7521 list_move_tail(&res->queue, &old_res);
7522
7523 if (ioa_cfg->sis64)
7524 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7525 else
7526 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7527
7528 for (i = 0; i < entries; i++) {
7529 if (ioa_cfg->sis64)
7530 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7531 else
7532 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7533 found = 0;
7534
7535 list_for_each_entry_safe(res, temp, &old_res, queue) {
7536 if (ipr_is_same_device(res, &cfgtew)) {
7537 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7538 found = 1;
7539 break;
7540 }
7541 }
7542
7543 if (!found) {
7544 if (list_empty(&ioa_cfg->free_res_q)) {
7545 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7546 break;
7547 }
7548
7549 found = 1;
7550 res = list_entry(ioa_cfg->free_res_q.next,
7551 struct ipr_resource_entry, queue);
7552 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7553 ipr_init_res_entry(res, &cfgtew);
7554 res->add_to_ml = 1;
7555 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7556 res->sdev->allow_restart = 1;
7557
7558 if (found)
7559 ipr_update_res_entry(res, &cfgtew);
7560 }
7561
7562 list_for_each_entry_safe(res, temp, &old_res, queue) {
7563 if (res->sdev) {
7564 res->del_from_ml = 1;
7565 res->res_handle = IPR_INVALID_RES_HANDLE;
7566 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7567 }
7568 }
7569
7570 list_for_each_entry_safe(res, temp, &old_res, queue) {
7571 ipr_clear_res_target(res);
7572 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7573 }
7574
7575 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7576 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7577 else
7578 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7579
7580 LEAVE;
7581 return IPR_RC_JOB_CONTINUE;
7582 }
7583
7584 /**
7585 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7586 * @ipr_cmd: ipr command struct
7587 *
7588 * This function sends a Query IOA Configuration command
7589 * to the adapter to retrieve the IOA configuration table.
7590 *
7591 * Return value:
7592 * IPR_RC_JOB_RETURN
7593 **/
ipr_ioafp_query_ioa_cfg(struct ipr_cmnd * ipr_cmd)7594 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7595 {
7596 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7597 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7598 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7599 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7600
7601 ENTER;
7602 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7603 ioa_cfg->dual_raid = 1;
7604 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7605 ucode_vpd->major_release, ucode_vpd->card_type,
7606 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7607 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7608 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7609
7610 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7611 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7612 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7613 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7614
7615 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7616 IPR_IOADL_FLAGS_READ_LAST);
7617
7618 ipr_cmd->job_step = ipr_init_res_table;
7619
7620 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7621
7622 LEAVE;
7623 return IPR_RC_JOB_RETURN;
7624 }
7625
7626 /**
7627 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7628 * @ipr_cmd: ipr command struct
7629 *
7630 * This utility function sends an inquiry to the adapter.
7631 *
7632 * Return value:
7633 * none
7634 **/
ipr_ioafp_inquiry(struct ipr_cmnd * ipr_cmd,u8 flags,u8 page,dma_addr_t dma_addr,u8 xfer_len)7635 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7636 dma_addr_t dma_addr, u8 xfer_len)
7637 {
7638 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7639
7640 ENTER;
7641 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7642 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7643
7644 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7645 ioarcb->cmd_pkt.cdb[1] = flags;
7646 ioarcb->cmd_pkt.cdb[2] = page;
7647 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7648
7649 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7650
7651 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7652 LEAVE;
7653 }
7654
7655 /**
7656 * ipr_inquiry_page_supported - Is the given inquiry page supported
7657 * @page0: inquiry page 0 buffer
7658 * @page: page code.
7659 *
7660 * This function determines if the specified inquiry page is supported.
7661 *
7662 * Return value:
7663 * 1 if page is supported / 0 if not
7664 **/
ipr_inquiry_page_supported(struct ipr_inquiry_page0 * page0,u8 page)7665 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7666 {
7667 int i;
7668
7669 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7670 if (page0->page[i] == page)
7671 return 1;
7672
7673 return 0;
7674 }
7675
7676 /**
7677 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7678 * @ipr_cmd: ipr command struct
7679 *
7680 * This function sends a Page 0xD0 inquiry to the adapter
7681 * to retrieve adapter capabilities.
7682 *
7683 * Return value:
7684 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7685 **/
ipr_ioafp_cap_inquiry(struct ipr_cmnd * ipr_cmd)7686 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7687 {
7688 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7689 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7690 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7691
7692 ENTER;
7693 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7694 memset(cap, 0, sizeof(*cap));
7695
7696 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7697 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7698 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7699 sizeof(struct ipr_inquiry_cap));
7700 return IPR_RC_JOB_RETURN;
7701 }
7702
7703 LEAVE;
7704 return IPR_RC_JOB_CONTINUE;
7705 }
7706
7707 /**
7708 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7709 * @ipr_cmd: ipr command struct
7710 *
7711 * This function sends a Page 3 inquiry to the adapter
7712 * to retrieve software VPD information.
7713 *
7714 * Return value:
7715 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7716 **/
ipr_ioafp_page3_inquiry(struct ipr_cmnd * ipr_cmd)7717 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7718 {
7719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7720
7721 ENTER;
7722
7723 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7724
7725 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7726 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7727 sizeof(struct ipr_inquiry_page3));
7728
7729 LEAVE;
7730 return IPR_RC_JOB_RETURN;
7731 }
7732
7733 /**
7734 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7735 * @ipr_cmd: ipr command struct
7736 *
7737 * This function sends a Page 0 inquiry to the adapter
7738 * to retrieve supported inquiry pages.
7739 *
7740 * Return value:
7741 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7742 **/
ipr_ioafp_page0_inquiry(struct ipr_cmnd * ipr_cmd)7743 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7744 {
7745 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7746 char type[5];
7747
7748 ENTER;
7749
7750 /* Grab the type out of the VPD and store it away */
7751 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7752 type[4] = '\0';
7753 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7754
7755 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7756
7757 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7758 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7759 sizeof(struct ipr_inquiry_page0));
7760
7761 LEAVE;
7762 return IPR_RC_JOB_RETURN;
7763 }
7764
7765 /**
7766 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7767 * @ipr_cmd: ipr command struct
7768 *
7769 * This function sends a standard inquiry to the adapter.
7770 *
7771 * Return value:
7772 * IPR_RC_JOB_RETURN
7773 **/
ipr_ioafp_std_inquiry(struct ipr_cmnd * ipr_cmd)7774 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7775 {
7776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7777
7778 ENTER;
7779 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7780
7781 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7782 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7783 sizeof(struct ipr_ioa_vpd));
7784
7785 LEAVE;
7786 return IPR_RC_JOB_RETURN;
7787 }
7788
7789 /**
7790 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7791 * @ipr_cmd: ipr command struct
7792 *
7793 * This function send an Identify Host Request Response Queue
7794 * command to establish the HRRQ with the adapter.
7795 *
7796 * Return value:
7797 * IPR_RC_JOB_RETURN
7798 **/
ipr_ioafp_identify_hrrq(struct ipr_cmnd * ipr_cmd)7799 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7800 {
7801 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7802 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7803 struct ipr_hrr_queue *hrrq;
7804
7805 ENTER;
7806 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7807 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7808
7809 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7810 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7811
7812 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7813 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7814
7815 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7816 if (ioa_cfg->sis64)
7817 ioarcb->cmd_pkt.cdb[1] = 0x1;
7818
7819 if (ioa_cfg->nvectors == 1)
7820 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7821 else
7822 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7823
7824 ioarcb->cmd_pkt.cdb[2] =
7825 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7826 ioarcb->cmd_pkt.cdb[3] =
7827 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7828 ioarcb->cmd_pkt.cdb[4] =
7829 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7830 ioarcb->cmd_pkt.cdb[5] =
7831 ((u64) hrrq->host_rrq_dma) & 0xff;
7832 ioarcb->cmd_pkt.cdb[7] =
7833 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7834 ioarcb->cmd_pkt.cdb[8] =
7835 (sizeof(u32) * hrrq->size) & 0xff;
7836
7837 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7838 ioarcb->cmd_pkt.cdb[9] =
7839 ioa_cfg->identify_hrrq_index;
7840
7841 if (ioa_cfg->sis64) {
7842 ioarcb->cmd_pkt.cdb[10] =
7843 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7844 ioarcb->cmd_pkt.cdb[11] =
7845 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7846 ioarcb->cmd_pkt.cdb[12] =
7847 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7848 ioarcb->cmd_pkt.cdb[13] =
7849 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7850 }
7851
7852 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7853 ioarcb->cmd_pkt.cdb[14] =
7854 ioa_cfg->identify_hrrq_index;
7855
7856 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7857 IPR_INTERNAL_TIMEOUT);
7858
7859 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7860 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7861
7862 LEAVE;
7863 return IPR_RC_JOB_RETURN;
7864 }
7865
7866 LEAVE;
7867 return IPR_RC_JOB_CONTINUE;
7868 }
7869
7870 /**
7871 * ipr_reset_timer_done - Adapter reset timer function
7872 * @ipr_cmd: ipr command struct
7873 *
7874 * Description: This function is used in adapter reset processing
7875 * for timing events. If the reset_cmd pointer in the IOA
7876 * config struct is not this adapter's we are doing nested
7877 * resets and fail_all_ops will take care of freeing the
7878 * command block.
7879 *
7880 * Return value:
7881 * none
7882 **/
ipr_reset_timer_done(struct ipr_cmnd * ipr_cmd)7883 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7884 {
7885 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7886 unsigned long lock_flags = 0;
7887
7888 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7889
7890 if (ioa_cfg->reset_cmd == ipr_cmd) {
7891 list_del(&ipr_cmd->queue);
7892 ipr_cmd->done(ipr_cmd);
7893 }
7894
7895 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7896 }
7897
7898 /**
7899 * ipr_reset_start_timer - Start a timer for adapter reset job
7900 * @ipr_cmd: ipr command struct
7901 * @timeout: timeout value
7902 *
7903 * Description: This function is used in adapter reset processing
7904 * for timing events. If the reset_cmd pointer in the IOA
7905 * config struct is not this adapter's we are doing nested
7906 * resets and fail_all_ops will take care of freeing the
7907 * command block.
7908 *
7909 * Return value:
7910 * none
7911 **/
ipr_reset_start_timer(struct ipr_cmnd * ipr_cmd,unsigned long timeout)7912 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7913 unsigned long timeout)
7914 {
7915
7916 ENTER;
7917 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7918 ipr_cmd->done = ipr_reset_ioa_job;
7919
7920 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7921 ipr_cmd->timer.expires = jiffies + timeout;
7922 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7923 add_timer(&ipr_cmd->timer);
7924 }
7925
7926 /**
7927 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7928 * @ioa_cfg: ioa cfg struct
7929 *
7930 * Return value:
7931 * nothing
7932 **/
ipr_init_ioa_mem(struct ipr_ioa_cfg * ioa_cfg)7933 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7934 {
7935 struct ipr_hrr_queue *hrrq;
7936
7937 for_each_hrrq(hrrq, ioa_cfg) {
7938 spin_lock(&hrrq->_lock);
7939 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7940
7941 /* Initialize Host RRQ pointers */
7942 hrrq->hrrq_start = hrrq->host_rrq;
7943 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7944 hrrq->hrrq_curr = hrrq->hrrq_start;
7945 hrrq->toggle_bit = 1;
7946 spin_unlock(&hrrq->_lock);
7947 }
7948 wmb();
7949
7950 ioa_cfg->identify_hrrq_index = 0;
7951 if (ioa_cfg->hrrq_num == 1)
7952 atomic_set(&ioa_cfg->hrrq_index, 0);
7953 else
7954 atomic_set(&ioa_cfg->hrrq_index, 1);
7955
7956 /* Zero out config table */
7957 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7958 }
7959
7960 /**
7961 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7962 * @ipr_cmd: ipr command struct
7963 *
7964 * Return value:
7965 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7966 **/
ipr_reset_next_stage(struct ipr_cmnd * ipr_cmd)7967 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7968 {
7969 unsigned long stage, stage_time;
7970 u32 feedback;
7971 volatile u32 int_reg;
7972 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7973 u64 maskval = 0;
7974
7975 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7976 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7977 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7978
7979 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7980
7981 /* sanity check the stage_time value */
7982 if (stage_time == 0)
7983 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7984 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7985 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7986 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7987 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7988
7989 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7990 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7991 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7992 stage_time = ioa_cfg->transop_timeout;
7993 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7994 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7995 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7996 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7997 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7998 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7999 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8000 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8001 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8002 return IPR_RC_JOB_CONTINUE;
8003 }
8004 }
8005
8006 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8007 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8008 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8009 ipr_cmd->done = ipr_reset_ioa_job;
8010 add_timer(&ipr_cmd->timer);
8011
8012 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8013
8014 return IPR_RC_JOB_RETURN;
8015 }
8016
8017 /**
8018 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8019 * @ipr_cmd: ipr command struct
8020 *
8021 * This function reinitializes some control blocks and
8022 * enables destructive diagnostics on the adapter.
8023 *
8024 * Return value:
8025 * IPR_RC_JOB_RETURN
8026 **/
ipr_reset_enable_ioa(struct ipr_cmnd * ipr_cmd)8027 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8028 {
8029 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8030 volatile u32 int_reg;
8031 volatile u64 maskval;
8032 int i;
8033
8034 ENTER;
8035 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8036 ipr_init_ioa_mem(ioa_cfg);
8037
8038 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8039 spin_lock(&ioa_cfg->hrrq[i]._lock);
8040 ioa_cfg->hrrq[i].allow_interrupts = 1;
8041 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8042 }
8043 wmb();
8044 if (ioa_cfg->sis64) {
8045 /* Set the adapter to the correct endian mode. */
8046 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8047 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8048 }
8049
8050 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8051
8052 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8053 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8054 ioa_cfg->regs.clr_interrupt_mask_reg32);
8055 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8056 return IPR_RC_JOB_CONTINUE;
8057 }
8058
8059 /* Enable destructive diagnostics on IOA */
8060 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8061
8062 if (ioa_cfg->sis64) {
8063 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8064 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8065 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8066 } else
8067 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8068
8069 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8070
8071 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8072
8073 if (ioa_cfg->sis64) {
8074 ipr_cmd->job_step = ipr_reset_next_stage;
8075 return IPR_RC_JOB_CONTINUE;
8076 }
8077
8078 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8079 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8080 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8081 ipr_cmd->done = ipr_reset_ioa_job;
8082 add_timer(&ipr_cmd->timer);
8083 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8084
8085 LEAVE;
8086 return IPR_RC_JOB_RETURN;
8087 }
8088
8089 /**
8090 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8091 * @ipr_cmd: ipr command struct
8092 *
8093 * This function is invoked when an adapter dump has run out
8094 * of processing time.
8095 *
8096 * Return value:
8097 * IPR_RC_JOB_CONTINUE
8098 **/
ipr_reset_wait_for_dump(struct ipr_cmnd * ipr_cmd)8099 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8100 {
8101 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8102
8103 if (ioa_cfg->sdt_state == GET_DUMP)
8104 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8105 else if (ioa_cfg->sdt_state == READ_DUMP)
8106 ioa_cfg->sdt_state = ABORT_DUMP;
8107
8108 ioa_cfg->dump_timeout = 1;
8109 ipr_cmd->job_step = ipr_reset_alert;
8110
8111 return IPR_RC_JOB_CONTINUE;
8112 }
8113
8114 /**
8115 * ipr_unit_check_no_data - Log a unit check/no data error log
8116 * @ioa_cfg: ioa config struct
8117 *
8118 * Logs an error indicating the adapter unit checked, but for some
8119 * reason, we were unable to fetch the unit check buffer.
8120 *
8121 * Return value:
8122 * nothing
8123 **/
ipr_unit_check_no_data(struct ipr_ioa_cfg * ioa_cfg)8124 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8125 {
8126 ioa_cfg->errors_logged++;
8127 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8128 }
8129
8130 /**
8131 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8132 * @ioa_cfg: ioa config struct
8133 *
8134 * Fetches the unit check buffer from the adapter by clocking the data
8135 * through the mailbox register.
8136 *
8137 * Return value:
8138 * nothing
8139 **/
ipr_get_unit_check_buffer(struct ipr_ioa_cfg * ioa_cfg)8140 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8141 {
8142 unsigned long mailbox;
8143 struct ipr_hostrcb *hostrcb;
8144 struct ipr_uc_sdt sdt;
8145 int rc, length;
8146 u32 ioasc;
8147
8148 mailbox = readl(ioa_cfg->ioa_mailbox);
8149
8150 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8151 ipr_unit_check_no_data(ioa_cfg);
8152 return;
8153 }
8154
8155 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8156 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8157 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8158
8159 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8160 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8161 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8162 ipr_unit_check_no_data(ioa_cfg);
8163 return;
8164 }
8165
8166 /* Find length of the first sdt entry (UC buffer) */
8167 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8168 length = be32_to_cpu(sdt.entry[0].end_token);
8169 else
8170 length = (be32_to_cpu(sdt.entry[0].end_token) -
8171 be32_to_cpu(sdt.entry[0].start_token)) &
8172 IPR_FMT2_MBX_ADDR_MASK;
8173
8174 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8175 struct ipr_hostrcb, queue);
8176 list_del(&hostrcb->queue);
8177 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8178
8179 rc = ipr_get_ldump_data_section(ioa_cfg,
8180 be32_to_cpu(sdt.entry[0].start_token),
8181 (__be32 *)&hostrcb->hcam,
8182 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8183
8184 if (!rc) {
8185 ipr_handle_log_data(ioa_cfg, hostrcb);
8186 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8187 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8188 ioa_cfg->sdt_state == GET_DUMP)
8189 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8190 } else
8191 ipr_unit_check_no_data(ioa_cfg);
8192
8193 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8194 }
8195
8196 /**
8197 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8198 * @ipr_cmd: ipr command struct
8199 *
8200 * Description: This function will call to get the unit check buffer.
8201 *
8202 * Return value:
8203 * IPR_RC_JOB_RETURN
8204 **/
ipr_reset_get_unit_check_job(struct ipr_cmnd * ipr_cmd)8205 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8206 {
8207 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8208
8209 ENTER;
8210 ioa_cfg->ioa_unit_checked = 0;
8211 ipr_get_unit_check_buffer(ioa_cfg);
8212 ipr_cmd->job_step = ipr_reset_alert;
8213 ipr_reset_start_timer(ipr_cmd, 0);
8214
8215 LEAVE;
8216 return IPR_RC_JOB_RETURN;
8217 }
8218
8219 /**
8220 * ipr_reset_restore_cfg_space - Restore PCI config space.
8221 * @ipr_cmd: ipr command struct
8222 *
8223 * Description: This function restores the saved PCI config space of
8224 * the adapter, fails all outstanding ops back to the callers, and
8225 * fetches the dump/unit check if applicable to this reset.
8226 *
8227 * Return value:
8228 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8229 **/
ipr_reset_restore_cfg_space(struct ipr_cmnd * ipr_cmd)8230 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8231 {
8232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8233 u32 int_reg;
8234
8235 ENTER;
8236 ioa_cfg->pdev->state_saved = true;
8237 pci_restore_state(ioa_cfg->pdev);
8238
8239 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8240 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8241 return IPR_RC_JOB_CONTINUE;
8242 }
8243
8244 ipr_fail_all_ops(ioa_cfg);
8245
8246 if (ioa_cfg->sis64) {
8247 /* Set the adapter to the correct endian mode. */
8248 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8249 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8250 }
8251
8252 if (ioa_cfg->ioa_unit_checked) {
8253 if (ioa_cfg->sis64) {
8254 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8255 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8256 return IPR_RC_JOB_RETURN;
8257 } else {
8258 ioa_cfg->ioa_unit_checked = 0;
8259 ipr_get_unit_check_buffer(ioa_cfg);
8260 ipr_cmd->job_step = ipr_reset_alert;
8261 ipr_reset_start_timer(ipr_cmd, 0);
8262 return IPR_RC_JOB_RETURN;
8263 }
8264 }
8265
8266 if (ioa_cfg->in_ioa_bringdown) {
8267 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8268 } else {
8269 ipr_cmd->job_step = ipr_reset_enable_ioa;
8270
8271 if (GET_DUMP == ioa_cfg->sdt_state) {
8272 ioa_cfg->sdt_state = READ_DUMP;
8273 ioa_cfg->dump_timeout = 0;
8274 if (ioa_cfg->sis64)
8275 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8276 else
8277 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8278 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8279 schedule_work(&ioa_cfg->work_q);
8280 return IPR_RC_JOB_RETURN;
8281 }
8282 }
8283
8284 LEAVE;
8285 return IPR_RC_JOB_CONTINUE;
8286 }
8287
8288 /**
8289 * ipr_reset_bist_done - BIST has completed on the adapter.
8290 * @ipr_cmd: ipr command struct
8291 *
8292 * Description: Unblock config space and resume the reset process.
8293 *
8294 * Return value:
8295 * IPR_RC_JOB_CONTINUE
8296 **/
ipr_reset_bist_done(struct ipr_cmnd * ipr_cmd)8297 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8298 {
8299 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8300
8301 ENTER;
8302 if (ioa_cfg->cfg_locked)
8303 pci_cfg_access_unlock(ioa_cfg->pdev);
8304 ioa_cfg->cfg_locked = 0;
8305 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8306 LEAVE;
8307 return IPR_RC_JOB_CONTINUE;
8308 }
8309
8310 /**
8311 * ipr_reset_start_bist - Run BIST on the adapter.
8312 * @ipr_cmd: ipr command struct
8313 *
8314 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8315 *
8316 * Return value:
8317 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8318 **/
ipr_reset_start_bist(struct ipr_cmnd * ipr_cmd)8319 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8320 {
8321 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8322 int rc = PCIBIOS_SUCCESSFUL;
8323
8324 ENTER;
8325 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8326 writel(IPR_UPROCI_SIS64_START_BIST,
8327 ioa_cfg->regs.set_uproc_interrupt_reg32);
8328 else
8329 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8330
8331 if (rc == PCIBIOS_SUCCESSFUL) {
8332 ipr_cmd->job_step = ipr_reset_bist_done;
8333 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8334 rc = IPR_RC_JOB_RETURN;
8335 } else {
8336 if (ioa_cfg->cfg_locked)
8337 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8338 ioa_cfg->cfg_locked = 0;
8339 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8340 rc = IPR_RC_JOB_CONTINUE;
8341 }
8342
8343 LEAVE;
8344 return rc;
8345 }
8346
8347 /**
8348 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8349 * @ipr_cmd: ipr command struct
8350 *
8351 * Description: This clears PCI reset to the adapter and delays two seconds.
8352 *
8353 * Return value:
8354 * IPR_RC_JOB_RETURN
8355 **/
ipr_reset_slot_reset_done(struct ipr_cmnd * ipr_cmd)8356 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8357 {
8358 ENTER;
8359 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
8360 ipr_cmd->job_step = ipr_reset_bist_done;
8361 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8362 LEAVE;
8363 return IPR_RC_JOB_RETURN;
8364 }
8365
8366 /**
8367 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8368 * @ipr_cmd: ipr command struct
8369 *
8370 * Description: This asserts PCI reset to the adapter.
8371 *
8372 * Return value:
8373 * IPR_RC_JOB_RETURN
8374 **/
ipr_reset_slot_reset(struct ipr_cmnd * ipr_cmd)8375 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8376 {
8377 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8378 struct pci_dev *pdev = ioa_cfg->pdev;
8379
8380 ENTER;
8381 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8382 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8383 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
8384 LEAVE;
8385 return IPR_RC_JOB_RETURN;
8386 }
8387
8388 /**
8389 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8390 * @ipr_cmd: ipr command struct
8391 *
8392 * Description: This attempts to block config access to the IOA.
8393 *
8394 * Return value:
8395 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8396 **/
ipr_reset_block_config_access_wait(struct ipr_cmnd * ipr_cmd)8397 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8398 {
8399 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8400 int rc = IPR_RC_JOB_CONTINUE;
8401
8402 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8403 ioa_cfg->cfg_locked = 1;
8404 ipr_cmd->job_step = ioa_cfg->reset;
8405 } else {
8406 if (ipr_cmd->u.time_left) {
8407 rc = IPR_RC_JOB_RETURN;
8408 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8409 ipr_reset_start_timer(ipr_cmd,
8410 IPR_CHECK_FOR_RESET_TIMEOUT);
8411 } else {
8412 ipr_cmd->job_step = ioa_cfg->reset;
8413 dev_err(&ioa_cfg->pdev->dev,
8414 "Timed out waiting to lock config access. Resetting anyway.\n");
8415 }
8416 }
8417
8418 return rc;
8419 }
8420
8421 /**
8422 * ipr_reset_block_config_access - Block config access to the IOA
8423 * @ipr_cmd: ipr command struct
8424 *
8425 * Description: This attempts to block config access to the IOA
8426 *
8427 * Return value:
8428 * IPR_RC_JOB_CONTINUE
8429 **/
ipr_reset_block_config_access(struct ipr_cmnd * ipr_cmd)8430 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8431 {
8432 ipr_cmd->ioa_cfg->cfg_locked = 0;
8433 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8434 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8435 return IPR_RC_JOB_CONTINUE;
8436 }
8437
8438 /**
8439 * ipr_reset_allowed - Query whether or not IOA can be reset
8440 * @ioa_cfg: ioa config struct
8441 *
8442 * Return value:
8443 * 0 if reset not allowed / non-zero if reset is allowed
8444 **/
ipr_reset_allowed(struct ipr_ioa_cfg * ioa_cfg)8445 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8446 {
8447 volatile u32 temp_reg;
8448
8449 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8450 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8451 }
8452
8453 /**
8454 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8455 * @ipr_cmd: ipr command struct
8456 *
8457 * Description: This function waits for adapter permission to run BIST,
8458 * then runs BIST. If the adapter does not give permission after a
8459 * reasonable time, we will reset the adapter anyway. The impact of
8460 * resetting the adapter without warning the adapter is the risk of
8461 * losing the persistent error log on the adapter. If the adapter is
8462 * reset while it is writing to the flash on the adapter, the flash
8463 * segment will have bad ECC and be zeroed.
8464 *
8465 * Return value:
8466 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8467 **/
ipr_reset_wait_to_start_bist(struct ipr_cmnd * ipr_cmd)8468 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8469 {
8470 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8471 int rc = IPR_RC_JOB_RETURN;
8472
8473 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8474 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8475 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8476 } else {
8477 ipr_cmd->job_step = ipr_reset_block_config_access;
8478 rc = IPR_RC_JOB_CONTINUE;
8479 }
8480
8481 return rc;
8482 }
8483
8484 /**
8485 * ipr_reset_alert - Alert the adapter of a pending reset
8486 * @ipr_cmd: ipr command struct
8487 *
8488 * Description: This function alerts the adapter that it will be reset.
8489 * If memory space is not currently enabled, proceed directly
8490 * to running BIST on the adapter. The timer must always be started
8491 * so we guarantee we do not run BIST from ipr_isr.
8492 *
8493 * Return value:
8494 * IPR_RC_JOB_RETURN
8495 **/
ipr_reset_alert(struct ipr_cmnd * ipr_cmd)8496 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8497 {
8498 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8499 u16 cmd_reg;
8500 int rc;
8501
8502 ENTER;
8503 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8504
8505 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8506 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8507 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8508 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8509 } else {
8510 ipr_cmd->job_step = ipr_reset_block_config_access;
8511 }
8512
8513 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8514 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8515
8516 LEAVE;
8517 return IPR_RC_JOB_RETURN;
8518 }
8519
8520 /**
8521 * ipr_reset_ucode_download_done - Microcode download completion
8522 * @ipr_cmd: ipr command struct
8523 *
8524 * Description: This function unmaps the microcode download buffer.
8525 *
8526 * Return value:
8527 * IPR_RC_JOB_CONTINUE
8528 **/
ipr_reset_ucode_download_done(struct ipr_cmnd * ipr_cmd)8529 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8530 {
8531 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8532 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8533
8534 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
8535 sglist->num_sg, DMA_TO_DEVICE);
8536
8537 ipr_cmd->job_step = ipr_reset_alert;
8538 return IPR_RC_JOB_CONTINUE;
8539 }
8540
8541 /**
8542 * ipr_reset_ucode_download - Download microcode to the adapter
8543 * @ipr_cmd: ipr command struct
8544 *
8545 * Description: This function checks to see if it there is microcode
8546 * to download to the adapter. If there is, a download is performed.
8547 *
8548 * Return value:
8549 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8550 **/
ipr_reset_ucode_download(struct ipr_cmnd * ipr_cmd)8551 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8552 {
8553 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8554 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8555
8556 ENTER;
8557 ipr_cmd->job_step = ipr_reset_alert;
8558
8559 if (!sglist)
8560 return IPR_RC_JOB_CONTINUE;
8561
8562 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8563 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8564 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8565 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8566 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8567 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8568 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8569
8570 if (ioa_cfg->sis64)
8571 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8572 else
8573 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8574 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8575
8576 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8577 IPR_WRITE_BUFFER_TIMEOUT);
8578
8579 LEAVE;
8580 return IPR_RC_JOB_RETURN;
8581 }
8582
8583 /**
8584 * ipr_reset_shutdown_ioa - Shutdown the adapter
8585 * @ipr_cmd: ipr command struct
8586 *
8587 * Description: This function issues an adapter shutdown of the
8588 * specified type to the specified adapter as part of the
8589 * adapter reset job.
8590 *
8591 * Return value:
8592 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8593 **/
ipr_reset_shutdown_ioa(struct ipr_cmnd * ipr_cmd)8594 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8595 {
8596 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8597 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8598 unsigned long timeout;
8599 int rc = IPR_RC_JOB_CONTINUE;
8600
8601 ENTER;
8602 if (shutdown_type != IPR_SHUTDOWN_NONE &&
8603 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8604 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8605 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8606 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8607 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8608
8609 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8610 timeout = IPR_SHUTDOWN_TIMEOUT;
8611 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8612 timeout = IPR_INTERNAL_TIMEOUT;
8613 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8614 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8615 else
8616 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8617
8618 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8619
8620 rc = IPR_RC_JOB_RETURN;
8621 ipr_cmd->job_step = ipr_reset_ucode_download;
8622 } else
8623 ipr_cmd->job_step = ipr_reset_alert;
8624
8625 LEAVE;
8626 return rc;
8627 }
8628
8629 /**
8630 * ipr_reset_ioa_job - Adapter reset job
8631 * @ipr_cmd: ipr command struct
8632 *
8633 * Description: This function is the job router for the adapter reset job.
8634 *
8635 * Return value:
8636 * none
8637 **/
ipr_reset_ioa_job(struct ipr_cmnd * ipr_cmd)8638 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8639 {
8640 u32 rc, ioasc;
8641 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8642
8643 do {
8644 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8645
8646 if (ioa_cfg->reset_cmd != ipr_cmd) {
8647 /*
8648 * We are doing nested adapter resets and this is
8649 * not the current reset job.
8650 */
8651 list_add_tail(&ipr_cmd->queue,
8652 &ipr_cmd->hrrq->hrrq_free_q);
8653 return;
8654 }
8655
8656 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8657 rc = ipr_cmd->job_step_failed(ipr_cmd);
8658 if (rc == IPR_RC_JOB_RETURN)
8659 return;
8660 }
8661
8662 ipr_reinit_ipr_cmnd(ipr_cmd);
8663 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8664 rc = ipr_cmd->job_step(ipr_cmd);
8665 } while (rc == IPR_RC_JOB_CONTINUE);
8666 }
8667
8668 /**
8669 * _ipr_initiate_ioa_reset - Initiate an adapter reset
8670 * @ioa_cfg: ioa config struct
8671 * @job_step: first job step of reset job
8672 * @shutdown_type: shutdown type
8673 *
8674 * Description: This function will initiate the reset of the given adapter
8675 * starting at the selected job step.
8676 * If the caller needs to wait on the completion of the reset,
8677 * the caller must sleep on the reset_wait_q.
8678 *
8679 * Return value:
8680 * none
8681 **/
_ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,int (* job_step)(struct ipr_cmnd *),enum ipr_shutdown_type shutdown_type)8682 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8683 int (*job_step) (struct ipr_cmnd *),
8684 enum ipr_shutdown_type shutdown_type)
8685 {
8686 struct ipr_cmnd *ipr_cmd;
8687 int i;
8688
8689 ioa_cfg->in_reset_reload = 1;
8690 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8691 spin_lock(&ioa_cfg->hrrq[i]._lock);
8692 ioa_cfg->hrrq[i].allow_cmds = 0;
8693 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8694 }
8695 wmb();
8696 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8697 scsi_block_requests(ioa_cfg->host);
8698
8699 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8700 ioa_cfg->reset_cmd = ipr_cmd;
8701 ipr_cmd->job_step = job_step;
8702 ipr_cmd->u.shutdown_type = shutdown_type;
8703
8704 ipr_reset_ioa_job(ipr_cmd);
8705 }
8706
8707 /**
8708 * ipr_initiate_ioa_reset - Initiate an adapter reset
8709 * @ioa_cfg: ioa config struct
8710 * @shutdown_type: shutdown type
8711 *
8712 * Description: This function will initiate the reset of the given adapter.
8713 * If the caller needs to wait on the completion of the reset,
8714 * the caller must sleep on the reset_wait_q.
8715 *
8716 * Return value:
8717 * none
8718 **/
ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)8719 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8720 enum ipr_shutdown_type shutdown_type)
8721 {
8722 int i;
8723
8724 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8725 return;
8726
8727 if (ioa_cfg->in_reset_reload) {
8728 if (ioa_cfg->sdt_state == GET_DUMP)
8729 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8730 else if (ioa_cfg->sdt_state == READ_DUMP)
8731 ioa_cfg->sdt_state = ABORT_DUMP;
8732 }
8733
8734 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8735 dev_err(&ioa_cfg->pdev->dev,
8736 "IOA taken offline - error recovery failed\n");
8737
8738 ioa_cfg->reset_retries = 0;
8739 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8740 spin_lock(&ioa_cfg->hrrq[i]._lock);
8741 ioa_cfg->hrrq[i].ioa_is_dead = 1;
8742 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8743 }
8744 wmb();
8745
8746 if (ioa_cfg->in_ioa_bringdown) {
8747 ioa_cfg->reset_cmd = NULL;
8748 ioa_cfg->in_reset_reload = 0;
8749 ipr_fail_all_ops(ioa_cfg);
8750 wake_up_all(&ioa_cfg->reset_wait_q);
8751
8752 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8753 spin_unlock_irq(ioa_cfg->host->host_lock);
8754 scsi_unblock_requests(ioa_cfg->host);
8755 spin_lock_irq(ioa_cfg->host->host_lock);
8756 }
8757 return;
8758 } else {
8759 ioa_cfg->in_ioa_bringdown = 1;
8760 shutdown_type = IPR_SHUTDOWN_NONE;
8761 }
8762 }
8763
8764 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8765 shutdown_type);
8766 }
8767
8768 /**
8769 * ipr_reset_freeze - Hold off all I/O activity
8770 * @ipr_cmd: ipr command struct
8771 *
8772 * Description: If the PCI slot is frozen, hold off all I/O
8773 * activity; then, as soon as the slot is available again,
8774 * initiate an adapter reset.
8775 */
ipr_reset_freeze(struct ipr_cmnd * ipr_cmd)8776 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8777 {
8778 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8779 int i;
8780
8781 /* Disallow new interrupts, avoid loop */
8782 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8783 spin_lock(&ioa_cfg->hrrq[i]._lock);
8784 ioa_cfg->hrrq[i].allow_interrupts = 0;
8785 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8786 }
8787 wmb();
8788 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8789 ipr_cmd->done = ipr_reset_ioa_job;
8790 return IPR_RC_JOB_RETURN;
8791 }
8792
8793 /**
8794 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8795 * @pdev: PCI device struct
8796 *
8797 * Description: This routine is called to tell us that the MMIO
8798 * access to the IOA has been restored
8799 */
ipr_pci_mmio_enabled(struct pci_dev * pdev)8800 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8801 {
8802 unsigned long flags = 0;
8803 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8804
8805 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8806 if (!ioa_cfg->probe_done)
8807 pci_save_state(pdev);
8808 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8809 return PCI_ERS_RESULT_NEED_RESET;
8810 }
8811
8812 /**
8813 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8814 * @pdev: PCI device struct
8815 *
8816 * Description: This routine is called to tell us that the PCI bus
8817 * is down. Can't do anything here, except put the device driver
8818 * into a holding pattern, waiting for the PCI bus to come back.
8819 */
ipr_pci_frozen(struct pci_dev * pdev)8820 static void ipr_pci_frozen(struct pci_dev *pdev)
8821 {
8822 unsigned long flags = 0;
8823 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8824
8825 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8826 if (ioa_cfg->probe_done)
8827 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8829 }
8830
8831 /**
8832 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8833 * @pdev: PCI device struct
8834 *
8835 * Description: This routine is called by the pci error recovery
8836 * code after the PCI slot has been reset, just before we
8837 * should resume normal operations.
8838 */
ipr_pci_slot_reset(struct pci_dev * pdev)8839 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8840 {
8841 unsigned long flags = 0;
8842 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8843
8844 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8845 if (ioa_cfg->probe_done) {
8846 if (ioa_cfg->needs_warm_reset)
8847 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8848 else
8849 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8850 IPR_SHUTDOWN_NONE);
8851 } else
8852 wake_up_all(&ioa_cfg->eeh_wait_q);
8853 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8854 return PCI_ERS_RESULT_RECOVERED;
8855 }
8856
8857 /**
8858 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8859 * @pdev: PCI device struct
8860 *
8861 * Description: This routine is called when the PCI bus has
8862 * permanently failed.
8863 */
ipr_pci_perm_failure(struct pci_dev * pdev)8864 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8865 {
8866 unsigned long flags = 0;
8867 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8868 int i;
8869
8870 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8871 if (ioa_cfg->probe_done) {
8872 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8873 ioa_cfg->sdt_state = ABORT_DUMP;
8874 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8875 ioa_cfg->in_ioa_bringdown = 1;
8876 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8877 spin_lock(&ioa_cfg->hrrq[i]._lock);
8878 ioa_cfg->hrrq[i].allow_cmds = 0;
8879 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8880 }
8881 wmb();
8882 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8883 } else
8884 wake_up_all(&ioa_cfg->eeh_wait_q);
8885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8886 }
8887
8888 /**
8889 * ipr_pci_error_detected - Called when a PCI error is detected.
8890 * @pdev: PCI device struct
8891 * @state: PCI channel state
8892 *
8893 * Description: Called when a PCI error is detected.
8894 *
8895 * Return value:
8896 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8897 */
ipr_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8898 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8899 pci_channel_state_t state)
8900 {
8901 switch (state) {
8902 case pci_channel_io_frozen:
8903 ipr_pci_frozen(pdev);
8904 return PCI_ERS_RESULT_CAN_RECOVER;
8905 case pci_channel_io_perm_failure:
8906 ipr_pci_perm_failure(pdev);
8907 return PCI_ERS_RESULT_DISCONNECT;
8908 break;
8909 default:
8910 break;
8911 }
8912 return PCI_ERS_RESULT_NEED_RESET;
8913 }
8914
8915 /**
8916 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8917 * @ioa_cfg: ioa cfg struct
8918 *
8919 * Description: This is the second phase of adapter intialization
8920 * This function takes care of initilizing the adapter to the point
8921 * where it can accept new commands.
8922
8923 * Return value:
8924 * 0 on success / -EIO on failure
8925 **/
ipr_probe_ioa_part2(struct ipr_ioa_cfg * ioa_cfg)8926 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8927 {
8928 int rc = 0;
8929 unsigned long host_lock_flags = 0;
8930
8931 ENTER;
8932 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8933 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8934 ioa_cfg->probe_done = 1;
8935 if (ioa_cfg->needs_hard_reset) {
8936 ioa_cfg->needs_hard_reset = 0;
8937 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8938 } else
8939 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8940 IPR_SHUTDOWN_NONE);
8941 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8942 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8943 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8944
8945 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8946 rc = -EIO;
8947 } else if (ipr_invalid_adapter(ioa_cfg)) {
8948 if (!ipr_testmode)
8949 rc = -EIO;
8950
8951 dev_err(&ioa_cfg->pdev->dev,
8952 "Adapter not supported in this hardware configuration.\n");
8953 }
8954
8955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8956
8957 LEAVE;
8958 return rc;
8959 }
8960
8961 /**
8962 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8963 * @ioa_cfg: ioa config struct
8964 *
8965 * Return value:
8966 * none
8967 **/
ipr_free_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)8968 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8969 {
8970 int i;
8971
8972 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8973 if (ioa_cfg->ipr_cmnd_list[i])
8974 pci_pool_free(ioa_cfg->ipr_cmd_pool,
8975 ioa_cfg->ipr_cmnd_list[i],
8976 ioa_cfg->ipr_cmnd_list_dma[i]);
8977
8978 ioa_cfg->ipr_cmnd_list[i] = NULL;
8979 }
8980
8981 if (ioa_cfg->ipr_cmd_pool)
8982 pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
8983
8984 kfree(ioa_cfg->ipr_cmnd_list);
8985 kfree(ioa_cfg->ipr_cmnd_list_dma);
8986 ioa_cfg->ipr_cmnd_list = NULL;
8987 ioa_cfg->ipr_cmnd_list_dma = NULL;
8988 ioa_cfg->ipr_cmd_pool = NULL;
8989 }
8990
8991 /**
8992 * ipr_free_mem - Frees memory allocated for an adapter
8993 * @ioa_cfg: ioa cfg struct
8994 *
8995 * Return value:
8996 * nothing
8997 **/
ipr_free_mem(struct ipr_ioa_cfg * ioa_cfg)8998 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8999 {
9000 int i;
9001
9002 kfree(ioa_cfg->res_entries);
9003 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
9004 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9005 ipr_free_cmd_blks(ioa_cfg);
9006
9007 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9008 pci_free_consistent(ioa_cfg->pdev,
9009 sizeof(u32) * ioa_cfg->hrrq[i].size,
9010 ioa_cfg->hrrq[i].host_rrq,
9011 ioa_cfg->hrrq[i].host_rrq_dma);
9012
9013 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
9014 ioa_cfg->u.cfg_table,
9015 ioa_cfg->cfg_table_dma);
9016
9017 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9018 pci_free_consistent(ioa_cfg->pdev,
9019 sizeof(struct ipr_hostrcb),
9020 ioa_cfg->hostrcb[i],
9021 ioa_cfg->hostrcb_dma[i]);
9022 }
9023
9024 ipr_free_dump(ioa_cfg);
9025 kfree(ioa_cfg->trace);
9026 }
9027
9028 /**
9029 * ipr_free_all_resources - Free all allocated resources for an adapter.
9030 * @ipr_cmd: ipr command struct
9031 *
9032 * This function frees all allocated resources for the
9033 * specified adapter.
9034 *
9035 * Return value:
9036 * none
9037 **/
ipr_free_all_resources(struct ipr_ioa_cfg * ioa_cfg)9038 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9039 {
9040 struct pci_dev *pdev = ioa_cfg->pdev;
9041
9042 ENTER;
9043 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9044 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9045 int i;
9046 for (i = 0; i < ioa_cfg->nvectors; i++)
9047 free_irq(ioa_cfg->vectors_info[i].vec,
9048 &ioa_cfg->hrrq[i]);
9049 } else
9050 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9051
9052 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9053 pci_disable_msi(pdev);
9054 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9055 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9056 pci_disable_msix(pdev);
9057 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9058 }
9059
9060 iounmap(ioa_cfg->hdw_dma_regs);
9061 pci_release_regions(pdev);
9062 ipr_free_mem(ioa_cfg);
9063 scsi_host_put(ioa_cfg->host);
9064 pci_disable_device(pdev);
9065 LEAVE;
9066 }
9067
9068 /**
9069 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9070 * @ioa_cfg: ioa config struct
9071 *
9072 * Return value:
9073 * 0 on success / -ENOMEM on allocation failure
9074 **/
ipr_alloc_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)9075 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9076 {
9077 struct ipr_cmnd *ipr_cmd;
9078 struct ipr_ioarcb *ioarcb;
9079 dma_addr_t dma_addr;
9080 int i, entries_each_hrrq, hrrq_id = 0;
9081
9082 ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
9083 sizeof(struct ipr_cmnd), 512, 0);
9084
9085 if (!ioa_cfg->ipr_cmd_pool)
9086 return -ENOMEM;
9087
9088 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9089 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9090
9091 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9092 ipr_free_cmd_blks(ioa_cfg);
9093 return -ENOMEM;
9094 }
9095
9096 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9097 if (ioa_cfg->hrrq_num > 1) {
9098 if (i == 0) {
9099 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9100 ioa_cfg->hrrq[i].min_cmd_id = 0;
9101 ioa_cfg->hrrq[i].max_cmd_id =
9102 (entries_each_hrrq - 1);
9103 } else {
9104 entries_each_hrrq =
9105 IPR_NUM_BASE_CMD_BLKS/
9106 (ioa_cfg->hrrq_num - 1);
9107 ioa_cfg->hrrq[i].min_cmd_id =
9108 IPR_NUM_INTERNAL_CMD_BLKS +
9109 (i - 1) * entries_each_hrrq;
9110 ioa_cfg->hrrq[i].max_cmd_id =
9111 (IPR_NUM_INTERNAL_CMD_BLKS +
9112 i * entries_each_hrrq - 1);
9113 }
9114 } else {
9115 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9116 ioa_cfg->hrrq[i].min_cmd_id = 0;
9117 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9118 }
9119 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9120 }
9121
9122 BUG_ON(ioa_cfg->hrrq_num == 0);
9123
9124 i = IPR_NUM_CMD_BLKS -
9125 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9126 if (i > 0) {
9127 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9128 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9129 }
9130
9131 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9132 ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9133
9134 if (!ipr_cmd) {
9135 ipr_free_cmd_blks(ioa_cfg);
9136 return -ENOMEM;
9137 }
9138
9139 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9140 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9141 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9142
9143 ioarcb = &ipr_cmd->ioarcb;
9144 ipr_cmd->dma_addr = dma_addr;
9145 if (ioa_cfg->sis64)
9146 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9147 else
9148 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9149
9150 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9151 if (ioa_cfg->sis64) {
9152 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9153 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9154 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9155 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9156 } else {
9157 ioarcb->write_ioadl_addr =
9158 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9159 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9160 ioarcb->ioasa_host_pci_addr =
9161 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9162 }
9163 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9164 ipr_cmd->cmd_index = i;
9165 ipr_cmd->ioa_cfg = ioa_cfg;
9166 ipr_cmd->sense_buffer_dma = dma_addr +
9167 offsetof(struct ipr_cmnd, sense_buffer);
9168
9169 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9170 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9171 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9172 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9173 hrrq_id++;
9174 }
9175
9176 return 0;
9177 }
9178
9179 /**
9180 * ipr_alloc_mem - Allocate memory for an adapter
9181 * @ioa_cfg: ioa config struct
9182 *
9183 * Return value:
9184 * 0 on success / non-zero for error
9185 **/
ipr_alloc_mem(struct ipr_ioa_cfg * ioa_cfg)9186 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9187 {
9188 struct pci_dev *pdev = ioa_cfg->pdev;
9189 int i, rc = -ENOMEM;
9190
9191 ENTER;
9192 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9193 ioa_cfg->max_devs_supported, GFP_KERNEL);
9194
9195 if (!ioa_cfg->res_entries)
9196 goto out;
9197
9198 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9199 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9200 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9201 }
9202
9203 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
9204 sizeof(struct ipr_misc_cbs),
9205 &ioa_cfg->vpd_cbs_dma);
9206
9207 if (!ioa_cfg->vpd_cbs)
9208 goto out_free_res_entries;
9209
9210 if (ipr_alloc_cmd_blks(ioa_cfg))
9211 goto out_free_vpd_cbs;
9212
9213 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9214 ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
9215 sizeof(u32) * ioa_cfg->hrrq[i].size,
9216 &ioa_cfg->hrrq[i].host_rrq_dma);
9217
9218 if (!ioa_cfg->hrrq[i].host_rrq) {
9219 while (--i > 0)
9220 pci_free_consistent(pdev,
9221 sizeof(u32) * ioa_cfg->hrrq[i].size,
9222 ioa_cfg->hrrq[i].host_rrq,
9223 ioa_cfg->hrrq[i].host_rrq_dma);
9224 goto out_ipr_free_cmd_blocks;
9225 }
9226 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9227 }
9228
9229 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
9230 ioa_cfg->cfg_table_size,
9231 &ioa_cfg->cfg_table_dma);
9232
9233 if (!ioa_cfg->u.cfg_table)
9234 goto out_free_host_rrq;
9235
9236 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9237 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
9238 sizeof(struct ipr_hostrcb),
9239 &ioa_cfg->hostrcb_dma[i]);
9240
9241 if (!ioa_cfg->hostrcb[i])
9242 goto out_free_hostrcb_dma;
9243
9244 ioa_cfg->hostrcb[i]->hostrcb_dma =
9245 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9246 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9247 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9248 }
9249
9250 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9251 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9252
9253 if (!ioa_cfg->trace)
9254 goto out_free_hostrcb_dma;
9255
9256 rc = 0;
9257 out:
9258 LEAVE;
9259 return rc;
9260
9261 out_free_hostrcb_dma:
9262 while (i-- > 0) {
9263 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
9264 ioa_cfg->hostrcb[i],
9265 ioa_cfg->hostrcb_dma[i]);
9266 }
9267 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
9268 ioa_cfg->u.cfg_table,
9269 ioa_cfg->cfg_table_dma);
9270 out_free_host_rrq:
9271 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9272 pci_free_consistent(pdev,
9273 sizeof(u32) * ioa_cfg->hrrq[i].size,
9274 ioa_cfg->hrrq[i].host_rrq,
9275 ioa_cfg->hrrq[i].host_rrq_dma);
9276 }
9277 out_ipr_free_cmd_blocks:
9278 ipr_free_cmd_blks(ioa_cfg);
9279 out_free_vpd_cbs:
9280 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
9281 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9282 out_free_res_entries:
9283 kfree(ioa_cfg->res_entries);
9284 goto out;
9285 }
9286
9287 /**
9288 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9289 * @ioa_cfg: ioa config struct
9290 *
9291 * Return value:
9292 * none
9293 **/
ipr_initialize_bus_attr(struct ipr_ioa_cfg * ioa_cfg)9294 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9295 {
9296 int i;
9297
9298 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9299 ioa_cfg->bus_attr[i].bus = i;
9300 ioa_cfg->bus_attr[i].qas_enabled = 0;
9301 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9302 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9303 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9304 else
9305 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9306 }
9307 }
9308
9309 /**
9310 * ipr_init_regs - Initialize IOA registers
9311 * @ioa_cfg: ioa config struct
9312 *
9313 * Return value:
9314 * none
9315 **/
ipr_init_regs(struct ipr_ioa_cfg * ioa_cfg)9316 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9317 {
9318 const struct ipr_interrupt_offsets *p;
9319 struct ipr_interrupts *t;
9320 void __iomem *base;
9321
9322 p = &ioa_cfg->chip_cfg->regs;
9323 t = &ioa_cfg->regs;
9324 base = ioa_cfg->hdw_dma_regs;
9325
9326 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9327 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9328 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9329 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9330 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9331 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9332 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9333 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9334 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9335 t->ioarrin_reg = base + p->ioarrin_reg;
9336 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9337 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9338 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9339 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9340 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9341 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9342
9343 if (ioa_cfg->sis64) {
9344 t->init_feedback_reg = base + p->init_feedback_reg;
9345 t->dump_addr_reg = base + p->dump_addr_reg;
9346 t->dump_data_reg = base + p->dump_data_reg;
9347 t->endian_swap_reg = base + p->endian_swap_reg;
9348 }
9349 }
9350
9351 /**
9352 * ipr_init_ioa_cfg - Initialize IOA config struct
9353 * @ioa_cfg: ioa config struct
9354 * @host: scsi host struct
9355 * @pdev: PCI dev struct
9356 *
9357 * Return value:
9358 * none
9359 **/
ipr_init_ioa_cfg(struct ipr_ioa_cfg * ioa_cfg,struct Scsi_Host * host,struct pci_dev * pdev)9360 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9361 struct Scsi_Host *host, struct pci_dev *pdev)
9362 {
9363 int i;
9364
9365 ioa_cfg->host = host;
9366 ioa_cfg->pdev = pdev;
9367 ioa_cfg->log_level = ipr_log_level;
9368 ioa_cfg->doorbell = IPR_DOORBELL;
9369 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9370 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9371 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9372 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9373 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9374 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9375
9376 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9377 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9378 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9379 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9380 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9381 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9382 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9383 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9384 ioa_cfg->sdt_state = INACTIVE;
9385
9386 ipr_initialize_bus_attr(ioa_cfg);
9387 ioa_cfg->max_devs_supported = ipr_max_devs;
9388
9389 if (ioa_cfg->sis64) {
9390 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9391 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9392 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9393 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9394 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9395 + ((sizeof(struct ipr_config_table_entry64)
9396 * ioa_cfg->max_devs_supported)));
9397 } else {
9398 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9399 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9400 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9401 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9402 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9403 + ((sizeof(struct ipr_config_table_entry)
9404 * ioa_cfg->max_devs_supported)));
9405 }
9406
9407 host->max_channel = IPR_MAX_BUS_TO_SCAN;
9408 host->unique_id = host->host_no;
9409 host->max_cmd_len = IPR_MAX_CDB_LEN;
9410 host->can_queue = ioa_cfg->max_cmds;
9411 pci_set_drvdata(pdev, ioa_cfg);
9412
9413 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9414 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9415 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9416 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9417 if (i == 0)
9418 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9419 else
9420 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9421 }
9422 }
9423
9424 /**
9425 * ipr_get_chip_info - Find adapter chip information
9426 * @dev_id: PCI device id struct
9427 *
9428 * Return value:
9429 * ptr to chip information on success / NULL on failure
9430 **/
9431 static const struct ipr_chip_t *
ipr_get_chip_info(const struct pci_device_id * dev_id)9432 ipr_get_chip_info(const struct pci_device_id *dev_id)
9433 {
9434 int i;
9435
9436 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9437 if (ipr_chip[i].vendor == dev_id->vendor &&
9438 ipr_chip[i].device == dev_id->device)
9439 return &ipr_chip[i];
9440 return NULL;
9441 }
9442
9443 /**
9444 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9445 * during probe time
9446 * @ioa_cfg: ioa config struct
9447 *
9448 * Return value:
9449 * None
9450 **/
ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg * ioa_cfg)9451 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9452 {
9453 struct pci_dev *pdev = ioa_cfg->pdev;
9454
9455 if (pci_channel_offline(pdev)) {
9456 wait_event_timeout(ioa_cfg->eeh_wait_q,
9457 !pci_channel_offline(pdev),
9458 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9459 pci_restore_state(pdev);
9460 }
9461 }
9462
ipr_enable_msix(struct ipr_ioa_cfg * ioa_cfg)9463 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9464 {
9465 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9466 int i, vectors;
9467
9468 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9469 entries[i].entry = i;
9470
9471 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9472 entries, 1, ipr_number_of_msix);
9473 if (vectors < 0) {
9474 ipr_wait_for_pci_err_recovery(ioa_cfg);
9475 return vectors;
9476 }
9477
9478 for (i = 0; i < vectors; i++)
9479 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9480 ioa_cfg->nvectors = vectors;
9481
9482 return 0;
9483 }
9484
ipr_enable_msi(struct ipr_ioa_cfg * ioa_cfg)9485 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9486 {
9487 int i, vectors;
9488
9489 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9490 if (vectors < 0) {
9491 ipr_wait_for_pci_err_recovery(ioa_cfg);
9492 return vectors;
9493 }
9494
9495 for (i = 0; i < vectors; i++)
9496 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9497 ioa_cfg->nvectors = vectors;
9498
9499 return 0;
9500 }
9501
name_msi_vectors(struct ipr_ioa_cfg * ioa_cfg)9502 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9503 {
9504 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9505
9506 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9507 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9508 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9509 ioa_cfg->vectors_info[vec_idx].
9510 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9511 }
9512 }
9513
ipr_request_other_msi_irqs(struct ipr_ioa_cfg * ioa_cfg)9514 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9515 {
9516 int i, rc;
9517
9518 for (i = 1; i < ioa_cfg->nvectors; i++) {
9519 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9520 ipr_isr_mhrrq,
9521 0,
9522 ioa_cfg->vectors_info[i].desc,
9523 &ioa_cfg->hrrq[i]);
9524 if (rc) {
9525 while (--i >= 0)
9526 free_irq(ioa_cfg->vectors_info[i].vec,
9527 &ioa_cfg->hrrq[i]);
9528 return rc;
9529 }
9530 }
9531 return 0;
9532 }
9533
9534 /**
9535 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9536 * @pdev: PCI device struct
9537 *
9538 * Description: Simply set the msi_received flag to 1 indicating that
9539 * Message Signaled Interrupts are supported.
9540 *
9541 * Return value:
9542 * 0 on success / non-zero on failure
9543 **/
ipr_test_intr(int irq,void * devp)9544 static irqreturn_t ipr_test_intr(int irq, void *devp)
9545 {
9546 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9547 unsigned long lock_flags = 0;
9548 irqreturn_t rc = IRQ_HANDLED;
9549
9550 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9551 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9552
9553 ioa_cfg->msi_received = 1;
9554 wake_up(&ioa_cfg->msi_wait_q);
9555
9556 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9557 return rc;
9558 }
9559
9560 /**
9561 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9562 * @pdev: PCI device struct
9563 *
9564 * Description: The return value from pci_enable_msi_range() can not always be
9565 * trusted. This routine sets up and initiates a test interrupt to determine
9566 * if the interrupt is received via the ipr_test_intr() service routine.
9567 * If the tests fails, the driver will fall back to LSI.
9568 *
9569 * Return value:
9570 * 0 on success / non-zero on failure
9571 **/
ipr_test_msi(struct ipr_ioa_cfg * ioa_cfg,struct pci_dev * pdev)9572 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9573 {
9574 int rc;
9575 volatile u32 int_reg;
9576 unsigned long lock_flags = 0;
9577
9578 ENTER;
9579
9580 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9581 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9582 ioa_cfg->msi_received = 0;
9583 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9584 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9585 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9586 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9587
9588 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9589 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9590 else
9591 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9592 if (rc) {
9593 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9594 return rc;
9595 } else if (ipr_debug)
9596 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9597
9598 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9599 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9600 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9602 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9603
9604 if (!ioa_cfg->msi_received) {
9605 /* MSI test failed */
9606 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9607 rc = -EOPNOTSUPP;
9608 } else if (ipr_debug)
9609 dev_info(&pdev->dev, "MSI test succeeded.\n");
9610
9611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9612
9613 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9614 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9615 else
9616 free_irq(pdev->irq, ioa_cfg);
9617
9618 LEAVE;
9619
9620 return rc;
9621 }
9622
9623 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9624 * @pdev: PCI device struct
9625 * @dev_id: PCI device id struct
9626 *
9627 * Return value:
9628 * 0 on success / non-zero on failure
9629 **/
ipr_probe_ioa(struct pci_dev * pdev,const struct pci_device_id * dev_id)9630 static int ipr_probe_ioa(struct pci_dev *pdev,
9631 const struct pci_device_id *dev_id)
9632 {
9633 struct ipr_ioa_cfg *ioa_cfg;
9634 struct Scsi_Host *host;
9635 unsigned long ipr_regs_pci;
9636 void __iomem *ipr_regs;
9637 int rc = PCIBIOS_SUCCESSFUL;
9638 volatile u32 mask, uproc, interrupts;
9639 unsigned long lock_flags, driver_lock_flags;
9640
9641 ENTER;
9642
9643 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9644 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9645
9646 if (!host) {
9647 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9648 rc = -ENOMEM;
9649 goto out;
9650 }
9651
9652 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9653 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9654 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9655
9656 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9657
9658 if (!ioa_cfg->ipr_chip) {
9659 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9660 dev_id->vendor, dev_id->device);
9661 goto out_scsi_host_put;
9662 }
9663
9664 /* set SIS 32 or SIS 64 */
9665 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9666 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9667 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9668 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9669
9670 if (ipr_transop_timeout)
9671 ioa_cfg->transop_timeout = ipr_transop_timeout;
9672 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9673 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9674 else
9675 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9676
9677 ioa_cfg->revid = pdev->revision;
9678
9679 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9680
9681 ipr_regs_pci = pci_resource_start(pdev, 0);
9682
9683 rc = pci_request_regions(pdev, IPR_NAME);
9684 if (rc < 0) {
9685 dev_err(&pdev->dev,
9686 "Couldn't register memory range of registers\n");
9687 goto out_scsi_host_put;
9688 }
9689
9690 rc = pci_enable_device(pdev);
9691
9692 if (rc || pci_channel_offline(pdev)) {
9693 if (pci_channel_offline(pdev)) {
9694 ipr_wait_for_pci_err_recovery(ioa_cfg);
9695 rc = pci_enable_device(pdev);
9696 }
9697
9698 if (rc) {
9699 dev_err(&pdev->dev, "Cannot enable adapter\n");
9700 ipr_wait_for_pci_err_recovery(ioa_cfg);
9701 goto out_release_regions;
9702 }
9703 }
9704
9705 ipr_regs = pci_ioremap_bar(pdev, 0);
9706
9707 if (!ipr_regs) {
9708 dev_err(&pdev->dev,
9709 "Couldn't map memory range of registers\n");
9710 rc = -ENOMEM;
9711 goto out_disable;
9712 }
9713
9714 ioa_cfg->hdw_dma_regs = ipr_regs;
9715 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9716 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9717
9718 ipr_init_regs(ioa_cfg);
9719
9720 if (ioa_cfg->sis64) {
9721 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9722 if (rc < 0) {
9723 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
9724 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9725 }
9726 } else
9727 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9728
9729 if (rc < 0) {
9730 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
9731 goto cleanup_nomem;
9732 }
9733
9734 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9735 ioa_cfg->chip_cfg->cache_line_size);
9736
9737 if (rc != PCIBIOS_SUCCESSFUL) {
9738 dev_err(&pdev->dev, "Write of cache line size failed\n");
9739 ipr_wait_for_pci_err_recovery(ioa_cfg);
9740 rc = -EIO;
9741 goto cleanup_nomem;
9742 }
9743
9744 /* Issue MMIO read to ensure card is not in EEH */
9745 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9746 ipr_wait_for_pci_err_recovery(ioa_cfg);
9747
9748 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9749 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9750 IPR_MAX_MSIX_VECTORS);
9751 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9752 }
9753
9754 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9755 ipr_enable_msix(ioa_cfg) == 0)
9756 ioa_cfg->intr_flag = IPR_USE_MSIX;
9757 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9758 ipr_enable_msi(ioa_cfg) == 0)
9759 ioa_cfg->intr_flag = IPR_USE_MSI;
9760 else {
9761 ioa_cfg->intr_flag = IPR_USE_LSI;
9762 ioa_cfg->clear_isr = 1;
9763 ioa_cfg->nvectors = 1;
9764 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9765 }
9766
9767 pci_set_master(pdev);
9768
9769 if (pci_channel_offline(pdev)) {
9770 ipr_wait_for_pci_err_recovery(ioa_cfg);
9771 pci_set_master(pdev);
9772 if (pci_channel_offline(pdev)) {
9773 rc = -EIO;
9774 goto out_msi_disable;
9775 }
9776 }
9777
9778 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9779 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9780 rc = ipr_test_msi(ioa_cfg, pdev);
9781 if (rc == -EOPNOTSUPP) {
9782 ipr_wait_for_pci_err_recovery(ioa_cfg);
9783 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9784 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9785 pci_disable_msi(pdev);
9786 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9787 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9788 pci_disable_msix(pdev);
9789 }
9790
9791 ioa_cfg->intr_flag = IPR_USE_LSI;
9792 ioa_cfg->nvectors = 1;
9793 }
9794 else if (rc)
9795 goto out_msi_disable;
9796 else {
9797 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9798 dev_info(&pdev->dev,
9799 "Request for %d MSIs succeeded with starting IRQ: %d\n",
9800 ioa_cfg->nvectors, pdev->irq);
9801 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9802 dev_info(&pdev->dev,
9803 "Request for %d MSIXs succeeded.",
9804 ioa_cfg->nvectors);
9805 }
9806 }
9807
9808 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9809 (unsigned int)num_online_cpus(),
9810 (unsigned int)IPR_MAX_HRRQ_NUM);
9811
9812 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9813 goto out_msi_disable;
9814
9815 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9816 goto out_msi_disable;
9817
9818 rc = ipr_alloc_mem(ioa_cfg);
9819 if (rc < 0) {
9820 dev_err(&pdev->dev,
9821 "Couldn't allocate enough memory for device driver!\n");
9822 goto out_msi_disable;
9823 }
9824
9825 /* Save away PCI config space for use following IOA reset */
9826 rc = pci_save_state(pdev);
9827
9828 if (rc != PCIBIOS_SUCCESSFUL) {
9829 dev_err(&pdev->dev, "Failed to save PCI config space\n");
9830 rc = -EIO;
9831 goto cleanup_nolog;
9832 }
9833
9834 /*
9835 * If HRRQ updated interrupt is not masked, or reset alert is set,
9836 * the card is in an unknown state and needs a hard reset
9837 */
9838 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9839 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9840 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9841 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9842 ioa_cfg->needs_hard_reset = 1;
9843 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9844 ioa_cfg->needs_hard_reset = 1;
9845 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9846 ioa_cfg->ioa_unit_checked = 1;
9847
9848 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9849 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9850 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9851
9852 if (ioa_cfg->intr_flag == IPR_USE_MSI
9853 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
9854 name_msi_vectors(ioa_cfg);
9855 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
9856 0,
9857 ioa_cfg->vectors_info[0].desc,
9858 &ioa_cfg->hrrq[0]);
9859 if (!rc)
9860 rc = ipr_request_other_msi_irqs(ioa_cfg);
9861 } else {
9862 rc = request_irq(pdev->irq, ipr_isr,
9863 IRQF_SHARED,
9864 IPR_NAME, &ioa_cfg->hrrq[0]);
9865 }
9866 if (rc) {
9867 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9868 pdev->irq, rc);
9869 goto cleanup_nolog;
9870 }
9871
9872 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9873 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9874 ioa_cfg->needs_warm_reset = 1;
9875 ioa_cfg->reset = ipr_reset_slot_reset;
9876 } else
9877 ioa_cfg->reset = ipr_reset_start_bist;
9878
9879 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9880 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9881 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9882
9883 LEAVE;
9884 out:
9885 return rc;
9886
9887 cleanup_nolog:
9888 ipr_free_mem(ioa_cfg);
9889 out_msi_disable:
9890 ipr_wait_for_pci_err_recovery(ioa_cfg);
9891 if (ioa_cfg->intr_flag == IPR_USE_MSI)
9892 pci_disable_msi(pdev);
9893 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9894 pci_disable_msix(pdev);
9895 cleanup_nomem:
9896 iounmap(ipr_regs);
9897 out_disable:
9898 pci_disable_device(pdev);
9899 out_release_regions:
9900 pci_release_regions(pdev);
9901 out_scsi_host_put:
9902 scsi_host_put(host);
9903 goto out;
9904 }
9905
9906 /**
9907 * ipr_scan_vsets - Scans for VSET devices
9908 * @ioa_cfg: ioa config struct
9909 *
9910 * Description: Since the VSET resources do not follow SAM in that we can have
9911 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9912 *
9913 * Return value:
9914 * none
9915 **/
ipr_scan_vsets(struct ipr_ioa_cfg * ioa_cfg)9916 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9917 {
9918 int target, lun;
9919
9920 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9921 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9922 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9923 }
9924
9925 /**
9926 * ipr_initiate_ioa_bringdown - Bring down an adapter
9927 * @ioa_cfg: ioa config struct
9928 * @shutdown_type: shutdown type
9929 *
9930 * Description: This function will initiate bringing down the adapter.
9931 * This consists of issuing an IOA shutdown to the adapter
9932 * to flush the cache, and running BIST.
9933 * If the caller needs to wait on the completion of the reset,
9934 * the caller must sleep on the reset_wait_q.
9935 *
9936 * Return value:
9937 * none
9938 **/
ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)9939 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9940 enum ipr_shutdown_type shutdown_type)
9941 {
9942 ENTER;
9943 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9944 ioa_cfg->sdt_state = ABORT_DUMP;
9945 ioa_cfg->reset_retries = 0;
9946 ioa_cfg->in_ioa_bringdown = 1;
9947 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9948 LEAVE;
9949 }
9950
9951 /**
9952 * __ipr_remove - Remove a single adapter
9953 * @pdev: pci device struct
9954 *
9955 * Adapter hot plug remove entry point.
9956 *
9957 * Return value:
9958 * none
9959 **/
__ipr_remove(struct pci_dev * pdev)9960 static void __ipr_remove(struct pci_dev *pdev)
9961 {
9962 unsigned long host_lock_flags = 0;
9963 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9964 int i;
9965 unsigned long driver_lock_flags;
9966 ENTER;
9967
9968 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9969 while (ioa_cfg->in_reset_reload) {
9970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9971 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9972 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9973 }
9974
9975 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9976 spin_lock(&ioa_cfg->hrrq[i]._lock);
9977 ioa_cfg->hrrq[i].removing_ioa = 1;
9978 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9979 }
9980 wmb();
9981 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9982
9983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9984 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9985 flush_work(&ioa_cfg->work_q);
9986 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9987 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9988
9989 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9990 list_del(&ioa_cfg->queue);
9991 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9992
9993 if (ioa_cfg->sdt_state == ABORT_DUMP)
9994 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9995 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9996
9997 ipr_free_all_resources(ioa_cfg);
9998
9999 LEAVE;
10000 }
10001
10002 /**
10003 * ipr_remove - IOA hot plug remove entry point
10004 * @pdev: pci device struct
10005 *
10006 * Adapter hot plug remove entry point.
10007 *
10008 * Return value:
10009 * none
10010 **/
ipr_remove(struct pci_dev * pdev)10011 static void ipr_remove(struct pci_dev *pdev)
10012 {
10013 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10014
10015 ENTER;
10016
10017 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10018 &ipr_trace_attr);
10019 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10020 &ipr_dump_attr);
10021 scsi_remove_host(ioa_cfg->host);
10022
10023 __ipr_remove(pdev);
10024
10025 LEAVE;
10026 }
10027
10028 /**
10029 * ipr_probe - Adapter hot plug add entry point
10030 *
10031 * Return value:
10032 * 0 on success / non-zero on failure
10033 **/
ipr_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)10034 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10035 {
10036 struct ipr_ioa_cfg *ioa_cfg;
10037 int rc, i;
10038
10039 rc = ipr_probe_ioa(pdev, dev_id);
10040
10041 if (rc)
10042 return rc;
10043
10044 ioa_cfg = pci_get_drvdata(pdev);
10045 rc = ipr_probe_ioa_part2(ioa_cfg);
10046
10047 if (rc) {
10048 __ipr_remove(pdev);
10049 return rc;
10050 }
10051
10052 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10053
10054 if (rc) {
10055 __ipr_remove(pdev);
10056 return rc;
10057 }
10058
10059 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10060 &ipr_trace_attr);
10061
10062 if (rc) {
10063 scsi_remove_host(ioa_cfg->host);
10064 __ipr_remove(pdev);
10065 return rc;
10066 }
10067
10068 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10069 &ipr_dump_attr);
10070
10071 if (rc) {
10072 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10073 &ipr_trace_attr);
10074 scsi_remove_host(ioa_cfg->host);
10075 __ipr_remove(pdev);
10076 return rc;
10077 }
10078
10079 scsi_scan_host(ioa_cfg->host);
10080 ipr_scan_vsets(ioa_cfg);
10081 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
10082 ioa_cfg->allow_ml_add_del = 1;
10083 ioa_cfg->host->max_channel = IPR_VSET_BUS;
10084 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10085
10086 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10087 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10088 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10089 ioa_cfg->iopoll_weight, ipr_iopoll);
10090 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10091 }
10092 }
10093
10094 schedule_work(&ioa_cfg->work_q);
10095 return 0;
10096 }
10097
10098 /**
10099 * ipr_shutdown - Shutdown handler.
10100 * @pdev: pci device struct
10101 *
10102 * This function is invoked upon system shutdown/reboot. It will issue
10103 * an adapter shutdown to the adapter to flush the write cache.
10104 *
10105 * Return value:
10106 * none
10107 **/
ipr_shutdown(struct pci_dev * pdev)10108 static void ipr_shutdown(struct pci_dev *pdev)
10109 {
10110 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10111 unsigned long lock_flags = 0;
10112 int i;
10113
10114 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10115 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10116 ioa_cfg->iopoll_weight = 0;
10117 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10118 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10119 }
10120
10121 while (ioa_cfg->in_reset_reload) {
10122 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10123 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10124 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10125 }
10126
10127 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10128 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10129 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10130 }
10131
10132 static struct pci_device_id ipr_pci_table[] = {
10133 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10134 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10135 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10136 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10137 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10138 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10139 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10140 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10141 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10142 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10143 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10144 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10145 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10146 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10147 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10148 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10149 IPR_USE_LONG_TRANSOP_TIMEOUT },
10150 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10151 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10152 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10153 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10154 IPR_USE_LONG_TRANSOP_TIMEOUT },
10155 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10156 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10157 IPR_USE_LONG_TRANSOP_TIMEOUT },
10158 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10159 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10160 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10161 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10162 IPR_USE_LONG_TRANSOP_TIMEOUT},
10163 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10164 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10165 IPR_USE_LONG_TRANSOP_TIMEOUT },
10166 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10167 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10168 IPR_USE_LONG_TRANSOP_TIMEOUT },
10169 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10170 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10171 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10172 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10173 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10174 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10175 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10177 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10178 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10179 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10180 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10181 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10182 IPR_USE_LONG_TRANSOP_TIMEOUT },
10183 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10184 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10185 IPR_USE_LONG_TRANSOP_TIMEOUT },
10186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10187 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10188 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10189 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10195 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10197 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10200 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10201 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10202 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10203 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10204 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10205 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10206 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10207 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10208 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10209 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10210 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10211 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10212 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10213 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10214 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10215 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10216 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10217 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10218 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10219 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10220 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10221 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10222 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10223 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10224 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10225 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10226 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10227 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10228 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10229 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10230 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10231 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10232 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10233 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10234 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10235 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10236 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10237 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10238 { }
10239 };
10240 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10241
10242 static const struct pci_error_handlers ipr_err_handler = {
10243 .error_detected = ipr_pci_error_detected,
10244 .mmio_enabled = ipr_pci_mmio_enabled,
10245 .slot_reset = ipr_pci_slot_reset,
10246 };
10247
10248 static struct pci_driver ipr_driver = {
10249 .name = IPR_NAME,
10250 .id_table = ipr_pci_table,
10251 .probe = ipr_probe,
10252 .remove = ipr_remove,
10253 .shutdown = ipr_shutdown,
10254 .err_handler = &ipr_err_handler,
10255 };
10256
10257 /**
10258 * ipr_halt_done - Shutdown prepare completion
10259 *
10260 * Return value:
10261 * none
10262 **/
ipr_halt_done(struct ipr_cmnd * ipr_cmd)10263 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10264 {
10265 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10266 }
10267
10268 /**
10269 * ipr_halt - Issue shutdown prepare to all adapters
10270 *
10271 * Return value:
10272 * NOTIFY_OK on success / NOTIFY_DONE on failure
10273 **/
ipr_halt(struct notifier_block * nb,ulong event,void * buf)10274 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10275 {
10276 struct ipr_cmnd *ipr_cmd;
10277 struct ipr_ioa_cfg *ioa_cfg;
10278 unsigned long flags = 0, driver_lock_flags;
10279
10280 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10281 return NOTIFY_DONE;
10282
10283 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10284
10285 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10286 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10287 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
10288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10289 continue;
10290 }
10291
10292 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10293 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10294 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10295 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10296 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10297
10298 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10300 }
10301 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10302
10303 return NOTIFY_OK;
10304 }
10305
10306 static struct notifier_block ipr_notifier = {
10307 ipr_halt, NULL, 0
10308 };
10309
10310 /**
10311 * ipr_init - Module entry point
10312 *
10313 * Return value:
10314 * 0 on success / negative value on failure
10315 **/
ipr_init(void)10316 static int __init ipr_init(void)
10317 {
10318 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10319 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10320
10321 register_reboot_notifier(&ipr_notifier);
10322 return pci_register_driver(&ipr_driver);
10323 }
10324
10325 /**
10326 * ipr_exit - Module unload
10327 *
10328 * Module unload entry point.
10329 *
10330 * Return value:
10331 * none
10332 **/
ipr_exit(void)10333 static void __exit ipr_exit(void)
10334 {
10335 unregister_reboot_notifier(&ipr_notifier);
10336 pci_unregister_driver(&ipr_driver);
10337 }
10338
10339 module_init(ipr_init);
10340 module_exit(ipr_exit);
10341