1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90 * Global Data
91 */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108 .mailbox = 0x0042C,
109 .max_cmds = 100,
110 .cache_line_size = 0x20,
111 .clear_isr = 1,
112 .iopoll_weight = 0,
113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
116 .clr_interrupt_mask_reg32 = 0x00230,
117 .sense_interrupt_mask_reg = 0x0022C,
118 .sense_interrupt_mask_reg32 = 0x0022C,
119 .clr_interrupt_reg = 0x00228,
120 .clr_interrupt_reg32 = 0x00228,
121 .sense_interrupt_reg = 0x00224,
122 .sense_interrupt_reg32 = 0x00224,
123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
125 .sense_uproc_interrupt_reg32 = 0x00214,
126 .set_uproc_interrupt_reg = 0x00214,
127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
134 .max_cmds = 100,
135 .cache_line_size = 0x20,
136 .clear_isr = 1,
137 .iopoll_weight = 0,
138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
141 .clr_interrupt_mask_reg32 = 0x0028C,
142 .sense_interrupt_mask_reg = 0x00288,
143 .sense_interrupt_mask_reg32 = 0x00288,
144 .clr_interrupt_reg = 0x00284,
145 .clr_interrupt_reg32 = 0x00284,
146 .sense_interrupt_reg = 0x00280,
147 .sense_interrupt_reg32 = 0x00280,
148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
150 .sense_uproc_interrupt_reg32 = 0x00290,
151 .set_uproc_interrupt_reg = 0x00290,
152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
155 }
156 },
157 { /* CRoC */
158 .mailbox = 0x00044,
159 .max_cmds = 1000,
160 .cache_line_size = 0x20,
161 .clear_isr = 0,
162 .iopoll_weight = 64,
163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
166 .clr_interrupt_mask_reg32 = 0x0001C,
167 .sense_interrupt_mask_reg = 0x00010,
168 .sense_interrupt_mask_reg32 = 0x00014,
169 .clr_interrupt_reg = 0x00008,
170 .clr_interrupt_reg32 = 0x0000C,
171 .sense_interrupt_reg = 0x00000,
172 .sense_interrupt_reg32 = 0x00004,
173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
175 .sense_uproc_interrupt_reg32 = 0x00024,
176 .set_uproc_interrupt_reg = 0x00020,
177 .set_uproc_interrupt_reg32 = 0x00024,
178 .clr_uproc_interrupt_reg = 0x00028,
179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
181 .dump_addr_reg = 0x00064,
182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
184 }
185 },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
198 };
199
200 static int ipr_max_bus_speeds[] = {
201 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202 };
203
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed, ipr_max_speed, uint, 0);
207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level, ipr_log_level, uint, 0);
209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode, ipr_testmode, int, 0);
211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs, ipr_max_devs, int, 0);
221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION);
229
230 /* A constant array of IOASCs/URCs/Error Messages */
231 static const
232 struct ipr_error_table_t ipr_error_table[] = {
233 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
234 "8155: An unknown error was received"},
235 {0x00330000, 0, 0,
236 "Soft underlength error"},
237 {0x005A0000, 0, 0,
238 "Command to be cancelled not found"},
239 {0x00808000, 0, 0,
240 "Qualified success"},
241 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
242 "FFFE: Soft device bus error recovered by the IOA"},
243 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
244 "4101: Soft device bus fabric error"},
245 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246 "FFFC: Logical block guard error recovered by the device"},
247 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFFC: Logical block reference tag error recovered by the device"},
249 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250 "4171: Recovered scatter list tag / sequence number error"},
251 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "FFFD: Recovered logical block reference tag error detected by the IOA"},
257 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258 "FFFD: Logical block guard error recovered by the IOA"},
259 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
260 "FFF9: Device sector reassign successful"},
261 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
262 "FFF7: Media error recovered by device rewrite procedures"},
263 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
264 "7001: IOA sector reassignment successful"},
265 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
266 "FFF9: Soft media error. Sector reassignment recommended"},
267 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
268 "FFF7: Media error recovered by IOA rewrite procedures"},
269 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
270 "FF3D: Soft PCI bus error recovered by the IOA"},
271 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
272 "FFF6: Device hardware error recovered by the IOA"},
273 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
274 "FFF6: Device hardware error recovered by the device"},
275 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
276 "FF3D: Soft IOA error recovered by the IOA"},
277 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
278 "FFFA: Undefined device response recovered by the IOA"},
279 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280 "FFF6: Device bus error, message or command phase"},
281 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
282 "FFFE: Task Management Function failed"},
283 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
284 "FFF6: Failure prediction threshold exceeded"},
285 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
286 "8009: Impending cache battery pack failure"},
287 {0x02040100, 0, 0,
288 "Logical Unit in process of becoming ready"},
289 {0x02040200, 0, 0,
290 "Initializing command required"},
291 {0x02040400, 0, 0,
292 "34FF: Disk device format in progress"},
293 {0x02040C00, 0, 0,
294 "Logical unit not accessible, target port in unavailable state"},
295 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296 "9070: IOA requested reset"},
297 {0x023F0000, 0, 0,
298 "Synchronization required"},
299 {0x02408500, 0, 0,
300 "IOA microcode download required"},
301 {0x02408600, 0, 0,
302 "Device bus connection is prohibited by host"},
303 {0x024E0000, 0, 0,
304 "No ready, IOA shutdown"},
305 {0x025A0000, 0, 0,
306 "Not ready, IOA has been shutdown"},
307 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
308 "3020: Storage subsystem configuration error"},
309 {0x03110B00, 0, 0,
310 "FFF5: Medium error, data unreadable, recommend reassign"},
311 {0x03110C00, 0, 0,
312 "7000: Medium error, data unreadable, do not reassign"},
313 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
314 "FFF3: Disk media format bad"},
315 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
316 "3002: Addressed device failed to respond to selection"},
317 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
318 "3100: Device bus error"},
319 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
320 "3109: IOA timed out a device command"},
321 {0x04088000, 0, 0,
322 "3120: SCSI bus is not operational"},
323 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
324 "4100: Hard device bus fabric error"},
325 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326 "310C: Logical block guard error detected by the device"},
327 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328 "310C: Logical block reference tag error detected by the device"},
329 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330 "4170: Scatter list tag / sequence number error"},
331 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332 "8150: Logical block CRC error on IOA to Host transfer"},
333 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334 "4170: Logical block sequence number error on IOA to Host transfer"},
335 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336 "310D: Logical block reference tag error detected by the IOA"},
337 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338 "310D: Logical block guard error detected by the IOA"},
339 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
340 "9000: IOA reserved area data check"},
341 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
342 "9001: IOA reserved area invalid data pattern"},
343 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
344 "9002: IOA reserved area LRC error"},
345 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346 "Hardware Error, IOA metadata access error"},
347 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
348 "102E: Out of alternate sectors for disk storage"},
349 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
350 "FFF4: Data transfer underlength error"},
351 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
352 "FFF4: Data transfer overlength error"},
353 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
354 "3400: Logical unit failure"},
355 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
356 "FFF4: Device microcode is corrupt"},
357 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
358 "8150: PCI bus error"},
359 {0x04430000, 1, 0,
360 "Unsupported device bus message received"},
361 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
362 "FFF4: Disk device problem"},
363 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
364 "8150: Permanent IOA failure"},
365 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
366 "3010: Disk device returned wrong response to IOA"},
367 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
368 "8151: IOA microcode error"},
369 {0x04448500, 0, 0,
370 "Device bus status error"},
371 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
372 "8157: IOA error requiring IOA reset to recover"},
373 {0x04448700, 0, 0,
374 "ATA device status error"},
375 {0x04490000, 0, 0,
376 "Message reject received from the device"},
377 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
378 "8008: A permanent cache battery pack failure occurred"},
379 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9090: Disk unit has been modified after the last known status"},
381 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9081: IOA detected device error"},
383 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
384 "9082: IOA detected device error"},
385 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
386 "3110: Device bus error, message or command phase"},
387 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
388 "3110: SAS Command / Task Management Function failed"},
389 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
390 "9091: Incorrect hardware configuration change has been detected"},
391 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
392 "9073: Invalid multi-adapter configuration"},
393 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
394 "4010: Incorrect connection between cascaded expanders"},
395 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
396 "4020: Connections exceed IOA design limits"},
397 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
398 "4030: Incorrect multipath connection"},
399 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
400 "4110: Unsupported enclosure function"},
401 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402 "4120: SAS cable VPD cannot be read"},
403 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
404 "FFF4: Command to logical unit failed"},
405 {0x05240000, 1, 0,
406 "Illegal request, invalid request type or request packet"},
407 {0x05250000, 0, 0,
408 "Illegal request, invalid resource handle"},
409 {0x05258000, 0, 0,
410 "Illegal request, commands not allowed to this device"},
411 {0x05258100, 0, 0,
412 "Illegal request, command not allowed to a secondary adapter"},
413 {0x05258200, 0, 0,
414 "Illegal request, command not allowed to a non-optimized resource"},
415 {0x05260000, 0, 0,
416 "Illegal request, invalid field in parameter list"},
417 {0x05260100, 0, 0,
418 "Illegal request, parameter not supported"},
419 {0x05260200, 0, 0,
420 "Illegal request, parameter value invalid"},
421 {0x052C0000, 0, 0,
422 "Illegal request, command sequence error"},
423 {0x052C8000, 1, 0,
424 "Illegal request, dual adapter support not enabled"},
425 {0x052C8100, 1, 0,
426 "Illegal request, another cable connector was physically disabled"},
427 {0x054E8000, 1, 0,
428 "Illegal request, inconsistent group id/group count"},
429 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
430 "9031: Array protection temporarily suspended, protection resuming"},
431 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
432 "9040: Array protection temporarily suspended, protection resuming"},
433 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4080: IOA exceeded maximum operating temperature"},
435 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436 "4085: Service required"},
437 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
438 "3140: Device bus not ready to ready transition"},
439 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
440 "FFFB: SCSI bus was reset"},
441 {0x06290500, 0, 0,
442 "FFFE: SCSI bus transition to single ended"},
443 {0x06290600, 0, 0,
444 "FFFE: SCSI bus transition to LVD"},
445 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
446 "FFFB: SCSI bus was reset by another initiator"},
447 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
448 "3029: A device replacement has occurred"},
449 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450 "4102: Device bus fabric performance degradation"},
451 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
452 "9051: IOA cache data exists for a missing or failed device"},
453 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
454 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
456 "9025: Disk unit is not supported at its physical location"},
457 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
458 "3020: IOA detected a SCSI bus configuration error"},
459 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
460 "3150: SCSI bus configuration error"},
461 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
462 "9074: Asymmetric advanced function disk configuration"},
463 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
464 "4040: Incomplete multipath connection between IOA and enclosure"},
465 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
466 "4041: Incomplete multipath connection between enclosure and device"},
467 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
468 "9075: Incomplete multipath connection between IOA and remote IOA"},
469 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
470 "9076: Configuration error, missing remote IOA"},
471 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
472 "4050: Enclosure does not support a required multipath function"},
473 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474 "4121: Configuration error, required cable is missing"},
475 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476 "4122: Cable is not plugged into the correct location on remote IOA"},
477 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478 "4123: Configuration error, invalid cable vital product data"},
479 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480 "4124: Configuration error, both cable ends are plugged into the same IOA"},
481 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482 "4070: Logically bad block written on device"},
483 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
484 "9041: Array protection temporarily suspended"},
485 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
486 "9042: Corrupt array parity detected on specified device"},
487 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
488 "9030: Array no longer protected due to missing or failed disk unit"},
489 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
490 "9071: Link operational transition"},
491 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
492 "9072: Link not operational transition"},
493 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
494 "9032: Array exposed but still protected"},
495 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496 "70DD: Device forced failed by disrupt device command"},
497 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
498 "4061: Multipath redundancy level got better"},
499 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
500 "4060: Multipath redundancy level got worse"},
501 {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
502 "9083: Device raw mode enabled"},
503 {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
504 "9084: Device raw mode disabled"},
505 {0x07270000, 0, 0,
506 "Failure due to other device"},
507 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
508 "9008: IOA does not support functions expected by devices"},
509 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
510 "9010: Cache data associated with attached devices cannot be found"},
511 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
512 "9011: Cache data belongs to devices other than those attached"},
513 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
514 "9020: Array missing 2 or more devices with only 1 device present"},
515 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
516 "9021: Array missing 2 or more devices with 2 or more devices present"},
517 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
518 "9022: Exposed array is missing a required device"},
519 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
520 "9023: Array member(s) not at required physical locations"},
521 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
522 "9024: Array not functional due to present hardware configuration"},
523 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
524 "9026: Array not functional due to present hardware configuration"},
525 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
526 "9027: Array is missing a device and parity is out of sync"},
527 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
528 "9028: Maximum number of arrays already exist"},
529 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
530 "9050: Required cache data cannot be located for a disk unit"},
531 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
532 "9052: Cache data exists for a device that has been modified"},
533 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
534 "9054: IOA resources not available due to previous problems"},
535 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
536 "9092: Disk unit requires initialization before use"},
537 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
538 "9029: Incorrect hardware configuration change has been detected"},
539 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
540 "9060: One or more disk pairs are missing from an array"},
541 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
542 "9061: One or more disks are missing from an array"},
543 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
544 "9062: One or more disks are missing from an array"},
545 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
546 "9063: Maximum number of functional arrays has been exceeded"},
547 {0x07279A00, 0, 0,
548 "Data protect, other volume set problem"},
549 {0x0B260000, 0, 0,
550 "Aborted command, invalid descriptor"},
551 {0x0B3F9000, 0, 0,
552 "Target operating conditions have changed, dual adapter takeover"},
553 {0x0B530200, 0, 0,
554 "Aborted command, medium removal prevented"},
555 {0x0B5A0000, 0, 0,
556 "Command terminated by host"},
557 {0x0B5B8000, 0, 0,
558 "Aborted command, command terminated by host"}
559 };
560
561 static const struct ipr_ses_table_entry ipr_ses_table[] = {
562 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
563 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
569 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
575 };
576
577 /*
578 * Function Prototypes
579 */
580 static int ipr_reset_alert(struct ipr_cmnd *);
581 static void ipr_process_ccn(struct ipr_cmnd *);
582 static void ipr_process_error(struct ipr_cmnd *);
583 static void ipr_reset_ioa_job(struct ipr_cmnd *);
584 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
585 enum ipr_shutdown_type);
586
587 #ifdef CONFIG_SCSI_IPR_TRACE
588 /**
589 * ipr_trc_hook - Add a trace entry to the driver trace
590 * @ipr_cmd: ipr command struct
591 * @type: trace type
592 * @add_data: additional data
593 *
594 * Return value:
595 * none
596 **/
ipr_trc_hook(struct ipr_cmnd * ipr_cmd,u8 type,u32 add_data)597 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
598 u8 type, u32 add_data)
599 {
600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602 unsigned int trace_index;
603
604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
605 trace_entry = &ioa_cfg->trace[trace_index];
606 trace_entry->time = jiffies;
607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
608 trace_entry->type = type;
609 if (ipr_cmd->ioa_cfg->sis64)
610 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
611 else
612 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
613 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
614 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
615 trace_entry->u.add_data = add_data;
616 wmb();
617 }
618 #else
619 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
620 #endif
621
622 /**
623 * ipr_lock_and_done - Acquire lock and complete command
624 * @ipr_cmd: ipr command struct
625 *
626 * Return value:
627 * none
628 **/
ipr_lock_and_done(struct ipr_cmnd * ipr_cmd)629 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
630 {
631 unsigned long lock_flags;
632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
633
634 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
635 ipr_cmd->done(ipr_cmd);
636 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
637 }
638
639 /**
640 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
641 * @ipr_cmd: ipr command struct
642 *
643 * Return value:
644 * none
645 **/
ipr_reinit_ipr_cmnd(struct ipr_cmnd * ipr_cmd)646 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
647 {
648 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
649 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
650 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
651 dma_addr_t dma_addr = ipr_cmd->dma_addr;
652 int hrrq_id;
653
654 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
655 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
656 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
657 ioarcb->data_transfer_length = 0;
658 ioarcb->read_data_transfer_length = 0;
659 ioarcb->ioadl_len = 0;
660 ioarcb->read_ioadl_len = 0;
661
662 if (ipr_cmd->ioa_cfg->sis64) {
663 ioarcb->u.sis64_addr_data.data_ioadl_addr =
664 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
665 ioasa64->u.gata.status = 0;
666 } else {
667 ioarcb->write_ioadl_addr =
668 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
669 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
670 ioasa->u.gata.status = 0;
671 }
672
673 ioasa->hdr.ioasc = 0;
674 ioasa->hdr.residual_data_len = 0;
675 ipr_cmd->scsi_cmd = NULL;
676 ipr_cmd->qc = NULL;
677 ipr_cmd->sense_buffer[0] = 0;
678 ipr_cmd->dma_use_sg = 0;
679 }
680
681 /**
682 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
683 * @ipr_cmd: ipr command struct
684 *
685 * Return value:
686 * none
687 **/
ipr_init_ipr_cmnd(struct ipr_cmnd * ipr_cmd,void (* fast_done)(struct ipr_cmnd *))688 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
689 void (*fast_done) (struct ipr_cmnd *))
690 {
691 ipr_reinit_ipr_cmnd(ipr_cmd);
692 ipr_cmd->u.scratch = 0;
693 ipr_cmd->sibling = NULL;
694 ipr_cmd->eh_comp = NULL;
695 ipr_cmd->fast_done = fast_done;
696 init_timer(&ipr_cmd->timer);
697 }
698
699 /**
700 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
701 * @ioa_cfg: ioa config struct
702 *
703 * Return value:
704 * pointer to ipr command struct
705 **/
706 static
__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue * hrrq)707 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
708 {
709 struct ipr_cmnd *ipr_cmd = NULL;
710
711 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
712 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
713 struct ipr_cmnd, queue);
714 list_del(&ipr_cmd->queue);
715 }
716
717
718 return ipr_cmd;
719 }
720
721 /**
722 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
723 * @ioa_cfg: ioa config struct
724 *
725 * Return value:
726 * pointer to ipr command struct
727 **/
728 static
ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg * ioa_cfg)729 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
730 {
731 struct ipr_cmnd *ipr_cmd =
732 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
733 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
734 return ipr_cmd;
735 }
736
737 /**
738 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
739 * @ioa_cfg: ioa config struct
740 * @clr_ints: interrupts to clear
741 *
742 * This function masks all interrupts on the adapter, then clears the
743 * interrupts specified in the mask
744 *
745 * Return value:
746 * none
747 **/
ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg * ioa_cfg,u32 clr_ints)748 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
749 u32 clr_ints)
750 {
751 volatile u32 int_reg;
752 int i;
753
754 /* Stop new interrupts */
755 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
756 spin_lock(&ioa_cfg->hrrq[i]._lock);
757 ioa_cfg->hrrq[i].allow_interrupts = 0;
758 spin_unlock(&ioa_cfg->hrrq[i]._lock);
759 }
760 wmb();
761
762 /* Set interrupt mask to stop all new interrupts */
763 if (ioa_cfg->sis64)
764 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
765 else
766 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767
768 /* Clear any pending interrupts */
769 if (ioa_cfg->sis64)
770 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
771 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
772 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
773 }
774
775 /**
776 * ipr_save_pcix_cmd_reg - Save PCI-X command register
777 * @ioa_cfg: ioa config struct
778 *
779 * Return value:
780 * 0 on success / -EIO on failure
781 **/
ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)782 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
783 {
784 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
785
786 if (pcix_cmd_reg == 0)
787 return 0;
788
789 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
790 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
791 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
792 return -EIO;
793 }
794
795 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
796 return 0;
797 }
798
799 /**
800 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
801 * @ioa_cfg: ioa config struct
802 *
803 * Return value:
804 * 0 on success / -EIO on failure
805 **/
ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)806 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
807 {
808 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
809
810 if (pcix_cmd_reg) {
811 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
812 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
813 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
814 return -EIO;
815 }
816 }
817
818 return 0;
819 }
820
821 /**
822 * ipr_sata_eh_done - done function for aborted SATA commands
823 * @ipr_cmd: ipr command struct
824 *
825 * This function is invoked for ops generated to SATA
826 * devices which are being aborted.
827 *
828 * Return value:
829 * none
830 **/
ipr_sata_eh_done(struct ipr_cmnd * ipr_cmd)831 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
832 {
833 struct ata_queued_cmd *qc = ipr_cmd->qc;
834 struct ipr_sata_port *sata_port = qc->ap->private_data;
835
836 qc->err_mask |= AC_ERR_OTHER;
837 sata_port->ioasa.status |= ATA_BUSY;
838 ata_qc_complete(qc);
839 if (ipr_cmd->eh_comp)
840 complete(ipr_cmd->eh_comp);
841 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
842 }
843
844 /**
845 * ipr_scsi_eh_done - mid-layer done function for aborted ops
846 * @ipr_cmd: ipr command struct
847 *
848 * This function is invoked by the interrupt handler for
849 * ops generated by the SCSI mid-layer which are being aborted.
850 *
851 * Return value:
852 * none
853 **/
ipr_scsi_eh_done(struct ipr_cmnd * ipr_cmd)854 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
855 {
856 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
857
858 scsi_cmd->result |= (DID_ERROR << 16);
859
860 scsi_dma_unmap(ipr_cmd->scsi_cmd);
861 scsi_cmd->scsi_done(scsi_cmd);
862 if (ipr_cmd->eh_comp)
863 complete(ipr_cmd->eh_comp);
864 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
865 }
866
867 /**
868 * ipr_fail_all_ops - Fails all outstanding ops.
869 * @ioa_cfg: ioa config struct
870 *
871 * This function fails all outstanding ops.
872 *
873 * Return value:
874 * none
875 **/
ipr_fail_all_ops(struct ipr_ioa_cfg * ioa_cfg)876 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
877 {
878 struct ipr_cmnd *ipr_cmd, *temp;
879 struct ipr_hrr_queue *hrrq;
880
881 ENTER;
882 for_each_hrrq(hrrq, ioa_cfg) {
883 spin_lock(&hrrq->_lock);
884 list_for_each_entry_safe(ipr_cmd,
885 temp, &hrrq->hrrq_pending_q, queue) {
886 list_del(&ipr_cmd->queue);
887
888 ipr_cmd->s.ioasa.hdr.ioasc =
889 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
890 ipr_cmd->s.ioasa.hdr.ilid =
891 cpu_to_be32(IPR_DRIVER_ILID);
892
893 if (ipr_cmd->scsi_cmd)
894 ipr_cmd->done = ipr_scsi_eh_done;
895 else if (ipr_cmd->qc)
896 ipr_cmd->done = ipr_sata_eh_done;
897
898 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
899 IPR_IOASC_IOA_WAS_RESET);
900 del_timer(&ipr_cmd->timer);
901 ipr_cmd->done(ipr_cmd);
902 }
903 spin_unlock(&hrrq->_lock);
904 }
905 LEAVE;
906 }
907
908 /**
909 * ipr_send_command - Send driver initiated requests.
910 * @ipr_cmd: ipr command struct
911 *
912 * This function sends a command to the adapter using the correct write call.
913 * In the case of sis64, calculate the ioarcb size required. Then or in the
914 * appropriate bits.
915 *
916 * Return value:
917 * none
918 **/
ipr_send_command(struct ipr_cmnd * ipr_cmd)919 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
920 {
921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
922 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
923
924 if (ioa_cfg->sis64) {
925 /* The default size is 256 bytes */
926 send_dma_addr |= 0x1;
927
928 /* If the number of ioadls * size of ioadl > 128 bytes,
929 then use a 512 byte ioarcb */
930 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
931 send_dma_addr |= 0x4;
932 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
933 } else
934 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
935 }
936
937 /**
938 * ipr_do_req - Send driver initiated requests.
939 * @ipr_cmd: ipr command struct
940 * @done: done function
941 * @timeout_func: timeout function
942 * @timeout: timeout value
943 *
944 * This function sends the specified command to the adapter with the
945 * timeout given. The done function is invoked on command completion.
946 *
947 * Return value:
948 * none
949 **/
ipr_do_req(struct ipr_cmnd * ipr_cmd,void (* done)(struct ipr_cmnd *),void (* timeout_func)(struct ipr_cmnd *),u32 timeout)950 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
951 void (*done) (struct ipr_cmnd *),
952 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
953 {
954 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
955
956 ipr_cmd->done = done;
957
958 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
959 ipr_cmd->timer.expires = jiffies + timeout;
960 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
961
962 add_timer(&ipr_cmd->timer);
963
964 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
965
966 ipr_send_command(ipr_cmd);
967 }
968
969 /**
970 * ipr_internal_cmd_done - Op done function for an internally generated op.
971 * @ipr_cmd: ipr command struct
972 *
973 * This function is the op done function for an internally generated,
974 * blocking op. It simply wakes the sleeping thread.
975 *
976 * Return value:
977 * none
978 **/
ipr_internal_cmd_done(struct ipr_cmnd * ipr_cmd)979 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
980 {
981 if (ipr_cmd->sibling)
982 ipr_cmd->sibling = NULL;
983 else
984 complete(&ipr_cmd->completion);
985 }
986
987 /**
988 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
989 * @ipr_cmd: ipr command struct
990 * @dma_addr: dma address
991 * @len: transfer length
992 * @flags: ioadl flag value
993 *
994 * This function initializes an ioadl in the case where there is only a single
995 * descriptor.
996 *
997 * Return value:
998 * nothing
999 **/
ipr_init_ioadl(struct ipr_cmnd * ipr_cmd,dma_addr_t dma_addr,u32 len,int flags)1000 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1001 u32 len, int flags)
1002 {
1003 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1004 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1005
1006 ipr_cmd->dma_use_sg = 1;
1007
1008 if (ipr_cmd->ioa_cfg->sis64) {
1009 ioadl64->flags = cpu_to_be32(flags);
1010 ioadl64->data_len = cpu_to_be32(len);
1011 ioadl64->address = cpu_to_be64(dma_addr);
1012
1013 ipr_cmd->ioarcb.ioadl_len =
1014 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1015 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1016 } else {
1017 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1018 ioadl->address = cpu_to_be32(dma_addr);
1019
1020 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1021 ipr_cmd->ioarcb.read_ioadl_len =
1022 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1023 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1024 } else {
1025 ipr_cmd->ioarcb.ioadl_len =
1026 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1027 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1028 }
1029 }
1030 }
1031
1032 /**
1033 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1034 * @ipr_cmd: ipr command struct
1035 * @timeout_func: function to invoke if command times out
1036 * @timeout: timeout
1037 *
1038 * Return value:
1039 * none
1040 **/
ipr_send_blocking_cmd(struct ipr_cmnd * ipr_cmd,void (* timeout_func)(struct ipr_cmnd * ipr_cmd),u32 timeout)1041 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1042 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1043 u32 timeout)
1044 {
1045 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1046
1047 init_completion(&ipr_cmd->completion);
1048 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1049
1050 spin_unlock_irq(ioa_cfg->host->host_lock);
1051 wait_for_completion(&ipr_cmd->completion);
1052 spin_lock_irq(ioa_cfg->host->host_lock);
1053 }
1054
ipr_get_hrrq_index(struct ipr_ioa_cfg * ioa_cfg)1055 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1056 {
1057 unsigned int hrrq;
1058
1059 if (ioa_cfg->hrrq_num == 1)
1060 hrrq = 0;
1061 else {
1062 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1063 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1064 }
1065 return hrrq;
1066 }
1067
1068 /**
1069 * ipr_send_hcam - Send an HCAM to the adapter.
1070 * @ioa_cfg: ioa config struct
1071 * @type: HCAM type
1072 * @hostrcb: hostrcb struct
1073 *
1074 * This function will send a Host Controlled Async command to the adapter.
1075 * If HCAMs are currently not allowed to be issued to the adapter, it will
1076 * place the hostrcb on the free queue.
1077 *
1078 * Return value:
1079 * none
1080 **/
ipr_send_hcam(struct ipr_ioa_cfg * ioa_cfg,u8 type,struct ipr_hostrcb * hostrcb)1081 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1082 struct ipr_hostrcb *hostrcb)
1083 {
1084 struct ipr_cmnd *ipr_cmd;
1085 struct ipr_ioarcb *ioarcb;
1086
1087 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1088 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1089 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1090 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1091
1092 ipr_cmd->u.hostrcb = hostrcb;
1093 ioarcb = &ipr_cmd->ioarcb;
1094
1095 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1096 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1097 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1098 ioarcb->cmd_pkt.cdb[1] = type;
1099 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1100 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1101
1102 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1103 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1104
1105 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1106 ipr_cmd->done = ipr_process_ccn;
1107 else
1108 ipr_cmd->done = ipr_process_error;
1109
1110 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1111
1112 ipr_send_command(ipr_cmd);
1113 } else {
1114 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1115 }
1116 }
1117
1118 /**
1119 * ipr_update_ata_class - Update the ata class in the resource entry
1120 * @res: resource entry struct
1121 * @proto: cfgte device bus protocol value
1122 *
1123 * Return value:
1124 * none
1125 **/
ipr_update_ata_class(struct ipr_resource_entry * res,unsigned int proto)1126 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1127 {
1128 switch (proto) {
1129 case IPR_PROTO_SATA:
1130 case IPR_PROTO_SAS_STP:
1131 res->ata_class = ATA_DEV_ATA;
1132 break;
1133 case IPR_PROTO_SATA_ATAPI:
1134 case IPR_PROTO_SAS_STP_ATAPI:
1135 res->ata_class = ATA_DEV_ATAPI;
1136 break;
1137 default:
1138 res->ata_class = ATA_DEV_UNKNOWN;
1139 break;
1140 };
1141 }
1142
1143 /**
1144 * ipr_init_res_entry - Initialize a resource entry struct.
1145 * @res: resource entry struct
1146 * @cfgtew: config table entry wrapper struct
1147 *
1148 * Return value:
1149 * none
1150 **/
ipr_init_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1151 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1152 struct ipr_config_table_entry_wrapper *cfgtew)
1153 {
1154 int found = 0;
1155 unsigned int proto;
1156 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1157 struct ipr_resource_entry *gscsi_res = NULL;
1158
1159 res->needs_sync_complete = 0;
1160 res->in_erp = 0;
1161 res->add_to_ml = 0;
1162 res->del_from_ml = 0;
1163 res->resetting_device = 0;
1164 res->reset_occurred = 0;
1165 res->sdev = NULL;
1166 res->sata_port = NULL;
1167
1168 if (ioa_cfg->sis64) {
1169 proto = cfgtew->u.cfgte64->proto;
1170 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1171 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1172 res->qmodel = IPR_QUEUEING_MODEL64(res);
1173 res->type = cfgtew->u.cfgte64->res_type;
1174
1175 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1176 sizeof(res->res_path));
1177
1178 res->bus = 0;
1179 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1180 sizeof(res->dev_lun.scsi_lun));
1181 res->lun = scsilun_to_int(&res->dev_lun);
1182
1183 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1184 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1185 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1186 found = 1;
1187 res->target = gscsi_res->target;
1188 break;
1189 }
1190 }
1191 if (!found) {
1192 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1193 ioa_cfg->max_devs_supported);
1194 set_bit(res->target, ioa_cfg->target_ids);
1195 }
1196 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1197 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1198 res->target = 0;
1199 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1200 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1201 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1202 ioa_cfg->max_devs_supported);
1203 set_bit(res->target, ioa_cfg->array_ids);
1204 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1205 res->bus = IPR_VSET_VIRTUAL_BUS;
1206 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1207 ioa_cfg->max_devs_supported);
1208 set_bit(res->target, ioa_cfg->vset_ids);
1209 } else {
1210 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1211 ioa_cfg->max_devs_supported);
1212 set_bit(res->target, ioa_cfg->target_ids);
1213 }
1214 } else {
1215 proto = cfgtew->u.cfgte->proto;
1216 res->qmodel = IPR_QUEUEING_MODEL(res);
1217 res->flags = cfgtew->u.cfgte->flags;
1218 if (res->flags & IPR_IS_IOA_RESOURCE)
1219 res->type = IPR_RES_TYPE_IOAFP;
1220 else
1221 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1222
1223 res->bus = cfgtew->u.cfgte->res_addr.bus;
1224 res->target = cfgtew->u.cfgte->res_addr.target;
1225 res->lun = cfgtew->u.cfgte->res_addr.lun;
1226 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1227 }
1228
1229 ipr_update_ata_class(res, proto);
1230 }
1231
1232 /**
1233 * ipr_is_same_device - Determine if two devices are the same.
1234 * @res: resource entry struct
1235 * @cfgtew: config table entry wrapper struct
1236 *
1237 * Return value:
1238 * 1 if the devices are the same / 0 otherwise
1239 **/
ipr_is_same_device(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1240 static int ipr_is_same_device(struct ipr_resource_entry *res,
1241 struct ipr_config_table_entry_wrapper *cfgtew)
1242 {
1243 if (res->ioa_cfg->sis64) {
1244 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1245 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1246 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1247 sizeof(cfgtew->u.cfgte64->lun))) {
1248 return 1;
1249 }
1250 } else {
1251 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1252 res->target == cfgtew->u.cfgte->res_addr.target &&
1253 res->lun == cfgtew->u.cfgte->res_addr.lun)
1254 return 1;
1255 }
1256
1257 return 0;
1258 }
1259
1260 /**
1261 * __ipr_format_res_path - Format the resource path for printing.
1262 * @res_path: resource path
1263 * @buf: buffer
1264 * @len: length of buffer provided
1265 *
1266 * Return value:
1267 * pointer to buffer
1268 **/
__ipr_format_res_path(u8 * res_path,char * buffer,int len)1269 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1270 {
1271 int i;
1272 char *p = buffer;
1273
1274 *p = '\0';
1275 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1276 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1277 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1278
1279 return buffer;
1280 }
1281
1282 /**
1283 * ipr_format_res_path - Format the resource path for printing.
1284 * @ioa_cfg: ioa config struct
1285 * @res_path: resource path
1286 * @buf: buffer
1287 * @len: length of buffer provided
1288 *
1289 * Return value:
1290 * pointer to buffer
1291 **/
ipr_format_res_path(struct ipr_ioa_cfg * ioa_cfg,u8 * res_path,char * buffer,int len)1292 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1293 u8 *res_path, char *buffer, int len)
1294 {
1295 char *p = buffer;
1296
1297 *p = '\0';
1298 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1299 __ipr_format_res_path(res_path, p, len - (buffer - p));
1300 return buffer;
1301 }
1302
1303 /**
1304 * ipr_update_res_entry - Update the resource entry.
1305 * @res: resource entry struct
1306 * @cfgtew: config table entry wrapper struct
1307 *
1308 * Return value:
1309 * none
1310 **/
ipr_update_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1311 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1312 struct ipr_config_table_entry_wrapper *cfgtew)
1313 {
1314 char buffer[IPR_MAX_RES_PATH_LENGTH];
1315 unsigned int proto;
1316 int new_path = 0;
1317
1318 if (res->ioa_cfg->sis64) {
1319 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1320 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1321 res->type = cfgtew->u.cfgte64->res_type;
1322
1323 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1324 sizeof(struct ipr_std_inq_data));
1325
1326 res->qmodel = IPR_QUEUEING_MODEL64(res);
1327 proto = cfgtew->u.cfgte64->proto;
1328 res->res_handle = cfgtew->u.cfgte64->res_handle;
1329 res->dev_id = cfgtew->u.cfgte64->dev_id;
1330
1331 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1332 sizeof(res->dev_lun.scsi_lun));
1333
1334 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1335 sizeof(res->res_path))) {
1336 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1337 sizeof(res->res_path));
1338 new_path = 1;
1339 }
1340
1341 if (res->sdev && new_path)
1342 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1343 ipr_format_res_path(res->ioa_cfg,
1344 res->res_path, buffer, sizeof(buffer)));
1345 } else {
1346 res->flags = cfgtew->u.cfgte->flags;
1347 if (res->flags & IPR_IS_IOA_RESOURCE)
1348 res->type = IPR_RES_TYPE_IOAFP;
1349 else
1350 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1351
1352 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1353 sizeof(struct ipr_std_inq_data));
1354
1355 res->qmodel = IPR_QUEUEING_MODEL(res);
1356 proto = cfgtew->u.cfgte->proto;
1357 res->res_handle = cfgtew->u.cfgte->res_handle;
1358 }
1359
1360 ipr_update_ata_class(res, proto);
1361 }
1362
1363 /**
1364 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1365 * for the resource.
1366 * @res: resource entry struct
1367 * @cfgtew: config table entry wrapper struct
1368 *
1369 * Return value:
1370 * none
1371 **/
ipr_clear_res_target(struct ipr_resource_entry * res)1372 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1373 {
1374 struct ipr_resource_entry *gscsi_res = NULL;
1375 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1376
1377 if (!ioa_cfg->sis64)
1378 return;
1379
1380 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1381 clear_bit(res->target, ioa_cfg->array_ids);
1382 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1383 clear_bit(res->target, ioa_cfg->vset_ids);
1384 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1385 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1386 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1387 return;
1388 clear_bit(res->target, ioa_cfg->target_ids);
1389
1390 } else if (res->bus == 0)
1391 clear_bit(res->target, ioa_cfg->target_ids);
1392 }
1393
1394 /**
1395 * ipr_handle_config_change - Handle a config change from the adapter
1396 * @ioa_cfg: ioa config struct
1397 * @hostrcb: hostrcb
1398 *
1399 * Return value:
1400 * none
1401 **/
ipr_handle_config_change(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1402 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1403 struct ipr_hostrcb *hostrcb)
1404 {
1405 struct ipr_resource_entry *res = NULL;
1406 struct ipr_config_table_entry_wrapper cfgtew;
1407 __be32 cc_res_handle;
1408
1409 u32 is_ndn = 1;
1410
1411 if (ioa_cfg->sis64) {
1412 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1413 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1414 } else {
1415 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1416 cc_res_handle = cfgtew.u.cfgte->res_handle;
1417 }
1418
1419 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1420 if (res->res_handle == cc_res_handle) {
1421 is_ndn = 0;
1422 break;
1423 }
1424 }
1425
1426 if (is_ndn) {
1427 if (list_empty(&ioa_cfg->free_res_q)) {
1428 ipr_send_hcam(ioa_cfg,
1429 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1430 hostrcb);
1431 return;
1432 }
1433
1434 res = list_entry(ioa_cfg->free_res_q.next,
1435 struct ipr_resource_entry, queue);
1436
1437 list_del(&res->queue);
1438 ipr_init_res_entry(res, &cfgtew);
1439 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1440 }
1441
1442 ipr_update_res_entry(res, &cfgtew);
1443
1444 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1445 if (res->sdev) {
1446 res->del_from_ml = 1;
1447 res->res_handle = IPR_INVALID_RES_HANDLE;
1448 schedule_work(&ioa_cfg->work_q);
1449 } else {
1450 ipr_clear_res_target(res);
1451 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1452 }
1453 } else if (!res->sdev || res->del_from_ml) {
1454 res->add_to_ml = 1;
1455 schedule_work(&ioa_cfg->work_q);
1456 }
1457
1458 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1459 }
1460
1461 /**
1462 * ipr_process_ccn - Op done function for a CCN.
1463 * @ipr_cmd: ipr command struct
1464 *
1465 * This function is the op done function for a configuration
1466 * change notification host controlled async from the adapter.
1467 *
1468 * Return value:
1469 * none
1470 **/
ipr_process_ccn(struct ipr_cmnd * ipr_cmd)1471 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1472 {
1473 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1474 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1475 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1476
1477 list_del(&hostrcb->queue);
1478 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1479
1480 if (ioasc) {
1481 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1482 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1483 dev_err(&ioa_cfg->pdev->dev,
1484 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1485
1486 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1487 } else {
1488 ipr_handle_config_change(ioa_cfg, hostrcb);
1489 }
1490 }
1491
1492 /**
1493 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1494 * @i: index into buffer
1495 * @buf: string to modify
1496 *
1497 * This function will strip all trailing whitespace, pad the end
1498 * of the string with a single space, and NULL terminate the string.
1499 *
1500 * Return value:
1501 * new length of string
1502 **/
strip_and_pad_whitespace(int i,char * buf)1503 static int strip_and_pad_whitespace(int i, char *buf)
1504 {
1505 while (i && buf[i] == ' ')
1506 i--;
1507 buf[i+1] = ' ';
1508 buf[i+2] = '\0';
1509 return i + 2;
1510 }
1511
1512 /**
1513 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1514 * @prefix: string to print at start of printk
1515 * @hostrcb: hostrcb pointer
1516 * @vpd: vendor/product id/sn struct
1517 *
1518 * Return value:
1519 * none
1520 **/
ipr_log_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_vpd * vpd)1521 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1522 struct ipr_vpd *vpd)
1523 {
1524 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1525 int i = 0;
1526
1527 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1528 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1529
1530 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1531 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1532
1533 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1534 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1535
1536 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1537 }
1538
1539 /**
1540 * ipr_log_vpd - Log the passed VPD to the error log.
1541 * @vpd: vendor/product id/sn struct
1542 *
1543 * Return value:
1544 * none
1545 **/
ipr_log_vpd(struct ipr_vpd * vpd)1546 static void ipr_log_vpd(struct ipr_vpd *vpd)
1547 {
1548 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1549 + IPR_SERIAL_NUM_LEN];
1550
1551 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1552 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1553 IPR_PROD_ID_LEN);
1554 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1555 ipr_err("Vendor/Product ID: %s\n", buffer);
1556
1557 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1558 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1559 ipr_err(" Serial Number: %s\n", buffer);
1560 }
1561
1562 /**
1563 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1564 * @prefix: string to print at start of printk
1565 * @hostrcb: hostrcb pointer
1566 * @vpd: vendor/product id/sn/wwn struct
1567 *
1568 * Return value:
1569 * none
1570 **/
ipr_log_ext_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_ext_vpd * vpd)1571 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1572 struct ipr_ext_vpd *vpd)
1573 {
1574 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1575 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1576 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1577 }
1578
1579 /**
1580 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1581 * @vpd: vendor/product id/sn/wwn struct
1582 *
1583 * Return value:
1584 * none
1585 **/
ipr_log_ext_vpd(struct ipr_ext_vpd * vpd)1586 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1587 {
1588 ipr_log_vpd(&vpd->vpd);
1589 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1590 be32_to_cpu(vpd->wwid[1]));
1591 }
1592
1593 /**
1594 * ipr_log_enhanced_cache_error - Log a cache error.
1595 * @ioa_cfg: ioa config struct
1596 * @hostrcb: hostrcb struct
1597 *
1598 * Return value:
1599 * none
1600 **/
ipr_log_enhanced_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1601 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1602 struct ipr_hostrcb *hostrcb)
1603 {
1604 struct ipr_hostrcb_type_12_error *error;
1605
1606 if (ioa_cfg->sis64)
1607 error = &hostrcb->hcam.u.error64.u.type_12_error;
1608 else
1609 error = &hostrcb->hcam.u.error.u.type_12_error;
1610
1611 ipr_err("-----Current Configuration-----\n");
1612 ipr_err("Cache Directory Card Information:\n");
1613 ipr_log_ext_vpd(&error->ioa_vpd);
1614 ipr_err("Adapter Card Information:\n");
1615 ipr_log_ext_vpd(&error->cfc_vpd);
1616
1617 ipr_err("-----Expected Configuration-----\n");
1618 ipr_err("Cache Directory Card Information:\n");
1619 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1620 ipr_err("Adapter Card Information:\n");
1621 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1622
1623 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1624 be32_to_cpu(error->ioa_data[0]),
1625 be32_to_cpu(error->ioa_data[1]),
1626 be32_to_cpu(error->ioa_data[2]));
1627 }
1628
1629 /**
1630 * ipr_log_cache_error - Log a cache error.
1631 * @ioa_cfg: ioa config struct
1632 * @hostrcb: hostrcb struct
1633 *
1634 * Return value:
1635 * none
1636 **/
ipr_log_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1637 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1638 struct ipr_hostrcb *hostrcb)
1639 {
1640 struct ipr_hostrcb_type_02_error *error =
1641 &hostrcb->hcam.u.error.u.type_02_error;
1642
1643 ipr_err("-----Current Configuration-----\n");
1644 ipr_err("Cache Directory Card Information:\n");
1645 ipr_log_vpd(&error->ioa_vpd);
1646 ipr_err("Adapter Card Information:\n");
1647 ipr_log_vpd(&error->cfc_vpd);
1648
1649 ipr_err("-----Expected Configuration-----\n");
1650 ipr_err("Cache Directory Card Information:\n");
1651 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1652 ipr_err("Adapter Card Information:\n");
1653 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1654
1655 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1656 be32_to_cpu(error->ioa_data[0]),
1657 be32_to_cpu(error->ioa_data[1]),
1658 be32_to_cpu(error->ioa_data[2]));
1659 }
1660
1661 /**
1662 * ipr_log_enhanced_config_error - Log a configuration error.
1663 * @ioa_cfg: ioa config struct
1664 * @hostrcb: hostrcb struct
1665 *
1666 * Return value:
1667 * none
1668 **/
ipr_log_enhanced_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1669 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1670 struct ipr_hostrcb *hostrcb)
1671 {
1672 int errors_logged, i;
1673 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1674 struct ipr_hostrcb_type_13_error *error;
1675
1676 error = &hostrcb->hcam.u.error.u.type_13_error;
1677 errors_logged = be32_to_cpu(error->errors_logged);
1678
1679 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1680 be32_to_cpu(error->errors_detected), errors_logged);
1681
1682 dev_entry = error->dev;
1683
1684 for (i = 0; i < errors_logged; i++, dev_entry++) {
1685 ipr_err_separator;
1686
1687 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1688 ipr_log_ext_vpd(&dev_entry->vpd);
1689
1690 ipr_err("-----New Device Information-----\n");
1691 ipr_log_ext_vpd(&dev_entry->new_vpd);
1692
1693 ipr_err("Cache Directory Card Information:\n");
1694 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1695
1696 ipr_err("Adapter Card Information:\n");
1697 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1698 }
1699 }
1700
1701 /**
1702 * ipr_log_sis64_config_error - Log a device error.
1703 * @ioa_cfg: ioa config struct
1704 * @hostrcb: hostrcb struct
1705 *
1706 * Return value:
1707 * none
1708 **/
ipr_log_sis64_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1709 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1710 struct ipr_hostrcb *hostrcb)
1711 {
1712 int errors_logged, i;
1713 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1714 struct ipr_hostrcb_type_23_error *error;
1715 char buffer[IPR_MAX_RES_PATH_LENGTH];
1716
1717 error = &hostrcb->hcam.u.error64.u.type_23_error;
1718 errors_logged = be32_to_cpu(error->errors_logged);
1719
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723 dev_entry = error->dev;
1724
1725 for (i = 0; i < errors_logged; i++, dev_entry++) {
1726 ipr_err_separator;
1727
1728 ipr_err("Device %d : %s", i + 1,
1729 __ipr_format_res_path(dev_entry->res_path,
1730 buffer, sizeof(buffer)));
1731 ipr_log_ext_vpd(&dev_entry->vpd);
1732
1733 ipr_err("-----New Device Information-----\n");
1734 ipr_log_ext_vpd(&dev_entry->new_vpd);
1735
1736 ipr_err("Cache Directory Card Information:\n");
1737 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1738
1739 ipr_err("Adapter Card Information:\n");
1740 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1741 }
1742 }
1743
1744 /**
1745 * ipr_log_config_error - Log a configuration error.
1746 * @ioa_cfg: ioa config struct
1747 * @hostrcb: hostrcb struct
1748 *
1749 * Return value:
1750 * none
1751 **/
ipr_log_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1752 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1753 struct ipr_hostrcb *hostrcb)
1754 {
1755 int errors_logged, i;
1756 struct ipr_hostrcb_device_data_entry *dev_entry;
1757 struct ipr_hostrcb_type_03_error *error;
1758
1759 error = &hostrcb->hcam.u.error.u.type_03_error;
1760 errors_logged = be32_to_cpu(error->errors_logged);
1761
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765 dev_entry = error->dev;
1766
1767 for (i = 0; i < errors_logged; i++, dev_entry++) {
1768 ipr_err_separator;
1769
1770 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1771 ipr_log_vpd(&dev_entry->vpd);
1772
1773 ipr_err("-----New Device Information-----\n");
1774 ipr_log_vpd(&dev_entry->new_vpd);
1775
1776 ipr_err("Cache Directory Card Information:\n");
1777 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1778
1779 ipr_err("Adapter Card Information:\n");
1780 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1781
1782 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1783 be32_to_cpu(dev_entry->ioa_data[0]),
1784 be32_to_cpu(dev_entry->ioa_data[1]),
1785 be32_to_cpu(dev_entry->ioa_data[2]),
1786 be32_to_cpu(dev_entry->ioa_data[3]),
1787 be32_to_cpu(dev_entry->ioa_data[4]));
1788 }
1789 }
1790
1791 /**
1792 * ipr_log_enhanced_array_error - Log an array configuration error.
1793 * @ioa_cfg: ioa config struct
1794 * @hostrcb: hostrcb struct
1795 *
1796 * Return value:
1797 * none
1798 **/
ipr_log_enhanced_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1799 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1800 struct ipr_hostrcb *hostrcb)
1801 {
1802 int i, num_entries;
1803 struct ipr_hostrcb_type_14_error *error;
1804 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1805 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1806
1807 error = &hostrcb->hcam.u.error.u.type_14_error;
1808
1809 ipr_err_separator;
1810
1811 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1812 error->protection_level,
1813 ioa_cfg->host->host_no,
1814 error->last_func_vset_res_addr.bus,
1815 error->last_func_vset_res_addr.target,
1816 error->last_func_vset_res_addr.lun);
1817
1818 ipr_err_separator;
1819
1820 array_entry = error->array_member;
1821 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1822 ARRAY_SIZE(error->array_member));
1823
1824 for (i = 0; i < num_entries; i++, array_entry++) {
1825 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1826 continue;
1827
1828 if (be32_to_cpu(error->exposed_mode_adn) == i)
1829 ipr_err("Exposed Array Member %d:\n", i);
1830 else
1831 ipr_err("Array Member %d:\n", i);
1832
1833 ipr_log_ext_vpd(&array_entry->vpd);
1834 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1835 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1836 "Expected Location");
1837
1838 ipr_err_separator;
1839 }
1840 }
1841
1842 /**
1843 * ipr_log_array_error - Log an array configuration error.
1844 * @ioa_cfg: ioa config struct
1845 * @hostrcb: hostrcb struct
1846 *
1847 * Return value:
1848 * none
1849 **/
ipr_log_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1850 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1851 struct ipr_hostrcb *hostrcb)
1852 {
1853 int i;
1854 struct ipr_hostrcb_type_04_error *error;
1855 struct ipr_hostrcb_array_data_entry *array_entry;
1856 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1857
1858 error = &hostrcb->hcam.u.error.u.type_04_error;
1859
1860 ipr_err_separator;
1861
1862 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1863 error->protection_level,
1864 ioa_cfg->host->host_no,
1865 error->last_func_vset_res_addr.bus,
1866 error->last_func_vset_res_addr.target,
1867 error->last_func_vset_res_addr.lun);
1868
1869 ipr_err_separator;
1870
1871 array_entry = error->array_member;
1872
1873 for (i = 0; i < 18; i++) {
1874 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1875 continue;
1876
1877 if (be32_to_cpu(error->exposed_mode_adn) == i)
1878 ipr_err("Exposed Array Member %d:\n", i);
1879 else
1880 ipr_err("Array Member %d:\n", i);
1881
1882 ipr_log_vpd(&array_entry->vpd);
1883
1884 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1885 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1886 "Expected Location");
1887
1888 ipr_err_separator;
1889
1890 if (i == 9)
1891 array_entry = error->array_member2;
1892 else
1893 array_entry++;
1894 }
1895 }
1896
1897 /**
1898 * ipr_log_hex_data - Log additional hex IOA error data.
1899 * @ioa_cfg: ioa config struct
1900 * @data: IOA error data
1901 * @len: data length
1902 *
1903 * Return value:
1904 * none
1905 **/
ipr_log_hex_data(struct ipr_ioa_cfg * ioa_cfg,__be32 * data,int len)1906 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1907 {
1908 int i;
1909
1910 if (len == 0)
1911 return;
1912
1913 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1914 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1915
1916 for (i = 0; i < len / 4; i += 4) {
1917 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1918 be32_to_cpu(data[i]),
1919 be32_to_cpu(data[i+1]),
1920 be32_to_cpu(data[i+2]),
1921 be32_to_cpu(data[i+3]));
1922 }
1923 }
1924
1925 /**
1926 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1927 * @ioa_cfg: ioa config struct
1928 * @hostrcb: hostrcb struct
1929 *
1930 * Return value:
1931 * none
1932 **/
ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1933 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1934 struct ipr_hostrcb *hostrcb)
1935 {
1936 struct ipr_hostrcb_type_17_error *error;
1937
1938 if (ioa_cfg->sis64)
1939 error = &hostrcb->hcam.u.error64.u.type_17_error;
1940 else
1941 error = &hostrcb->hcam.u.error.u.type_17_error;
1942
1943 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1944 strim(error->failure_reason);
1945
1946 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1947 be32_to_cpu(hostrcb->hcam.u.error.prc));
1948 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1949 ipr_log_hex_data(ioa_cfg, error->data,
1950 be32_to_cpu(hostrcb->hcam.length) -
1951 (offsetof(struct ipr_hostrcb_error, u) +
1952 offsetof(struct ipr_hostrcb_type_17_error, data)));
1953 }
1954
1955 /**
1956 * ipr_log_dual_ioa_error - Log a dual adapter error.
1957 * @ioa_cfg: ioa config struct
1958 * @hostrcb: hostrcb struct
1959 *
1960 * Return value:
1961 * none
1962 **/
ipr_log_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1963 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1964 struct ipr_hostrcb *hostrcb)
1965 {
1966 struct ipr_hostrcb_type_07_error *error;
1967
1968 error = &hostrcb->hcam.u.error.u.type_07_error;
1969 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1970 strim(error->failure_reason);
1971
1972 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1973 be32_to_cpu(hostrcb->hcam.u.error.prc));
1974 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1975 ipr_log_hex_data(ioa_cfg, error->data,
1976 be32_to_cpu(hostrcb->hcam.length) -
1977 (offsetof(struct ipr_hostrcb_error, u) +
1978 offsetof(struct ipr_hostrcb_type_07_error, data)));
1979 }
1980
1981 static const struct {
1982 u8 active;
1983 char *desc;
1984 } path_active_desc[] = {
1985 { IPR_PATH_NO_INFO, "Path" },
1986 { IPR_PATH_ACTIVE, "Active path" },
1987 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1988 };
1989
1990 static const struct {
1991 u8 state;
1992 char *desc;
1993 } path_state_desc[] = {
1994 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1995 { IPR_PATH_HEALTHY, "is healthy" },
1996 { IPR_PATH_DEGRADED, "is degraded" },
1997 { IPR_PATH_FAILED, "is failed" }
1998 };
1999
2000 /**
2001 * ipr_log_fabric_path - Log a fabric path error
2002 * @hostrcb: hostrcb struct
2003 * @fabric: fabric descriptor
2004 *
2005 * Return value:
2006 * none
2007 **/
ipr_log_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_fabric_desc * fabric)2008 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2009 struct ipr_hostrcb_fabric_desc *fabric)
2010 {
2011 int i, j;
2012 u8 path_state = fabric->path_state;
2013 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2014 u8 state = path_state & IPR_PATH_STATE_MASK;
2015
2016 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2017 if (path_active_desc[i].active != active)
2018 continue;
2019
2020 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2021 if (path_state_desc[j].state != state)
2022 continue;
2023
2024 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2025 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2026 path_active_desc[i].desc, path_state_desc[j].desc,
2027 fabric->ioa_port);
2028 } else if (fabric->cascaded_expander == 0xff) {
2029 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2030 path_active_desc[i].desc, path_state_desc[j].desc,
2031 fabric->ioa_port, fabric->phy);
2032 } else if (fabric->phy == 0xff) {
2033 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2034 path_active_desc[i].desc, path_state_desc[j].desc,
2035 fabric->ioa_port, fabric->cascaded_expander);
2036 } else {
2037 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2038 path_active_desc[i].desc, path_state_desc[j].desc,
2039 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2040 }
2041 return;
2042 }
2043 }
2044
2045 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2046 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2047 }
2048
2049 /**
2050 * ipr_log64_fabric_path - Log a fabric path error
2051 * @hostrcb: hostrcb struct
2052 * @fabric: fabric descriptor
2053 *
2054 * Return value:
2055 * none
2056 **/
ipr_log64_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_fabric_desc * fabric)2057 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2058 struct ipr_hostrcb64_fabric_desc *fabric)
2059 {
2060 int i, j;
2061 u8 path_state = fabric->path_state;
2062 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2063 u8 state = path_state & IPR_PATH_STATE_MASK;
2064 char buffer[IPR_MAX_RES_PATH_LENGTH];
2065
2066 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2067 if (path_active_desc[i].active != active)
2068 continue;
2069
2070 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2071 if (path_state_desc[j].state != state)
2072 continue;
2073
2074 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2075 path_active_desc[i].desc, path_state_desc[j].desc,
2076 ipr_format_res_path(hostrcb->ioa_cfg,
2077 fabric->res_path,
2078 buffer, sizeof(buffer)));
2079 return;
2080 }
2081 }
2082
2083 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2084 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2085 buffer, sizeof(buffer)));
2086 }
2087
2088 static const struct {
2089 u8 type;
2090 char *desc;
2091 } path_type_desc[] = {
2092 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2093 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2094 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2095 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2096 };
2097
2098 static const struct {
2099 u8 status;
2100 char *desc;
2101 } path_status_desc[] = {
2102 { IPR_PATH_CFG_NO_PROB, "Functional" },
2103 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2104 { IPR_PATH_CFG_FAILED, "Failed" },
2105 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2106 { IPR_PATH_NOT_DETECTED, "Missing" },
2107 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2108 };
2109
2110 static const char *link_rate[] = {
2111 "unknown",
2112 "disabled",
2113 "phy reset problem",
2114 "spinup hold",
2115 "port selector",
2116 "unknown",
2117 "unknown",
2118 "unknown",
2119 "1.5Gbps",
2120 "3.0Gbps",
2121 "unknown",
2122 "unknown",
2123 "unknown",
2124 "unknown",
2125 "unknown",
2126 "unknown"
2127 };
2128
2129 /**
2130 * ipr_log_path_elem - Log a fabric path element.
2131 * @hostrcb: hostrcb struct
2132 * @cfg: fabric path element struct
2133 *
2134 * Return value:
2135 * none
2136 **/
ipr_log_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_config_element * cfg)2137 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2138 struct ipr_hostrcb_config_element *cfg)
2139 {
2140 int i, j;
2141 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2142 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2143
2144 if (type == IPR_PATH_CFG_NOT_EXIST)
2145 return;
2146
2147 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2148 if (path_type_desc[i].type != type)
2149 continue;
2150
2151 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2152 if (path_status_desc[j].status != status)
2153 continue;
2154
2155 if (type == IPR_PATH_CFG_IOA_PORT) {
2156 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2157 path_status_desc[j].desc, path_type_desc[i].desc,
2158 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2159 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2160 } else {
2161 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2162 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2163 path_status_desc[j].desc, path_type_desc[i].desc,
2164 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2165 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2166 } else if (cfg->cascaded_expander == 0xff) {
2167 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2168 "WWN=%08X%08X\n", path_status_desc[j].desc,
2169 path_type_desc[i].desc, cfg->phy,
2170 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2171 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2172 } else if (cfg->phy == 0xff) {
2173 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2174 "WWN=%08X%08X\n", path_status_desc[j].desc,
2175 path_type_desc[i].desc, cfg->cascaded_expander,
2176 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2177 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2178 } else {
2179 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2180 "WWN=%08X%08X\n", path_status_desc[j].desc,
2181 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2182 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2183 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2184 }
2185 }
2186 return;
2187 }
2188 }
2189
2190 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2191 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2192 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2193 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2194 }
2195
2196 /**
2197 * ipr_log64_path_elem - Log a fabric path element.
2198 * @hostrcb: hostrcb struct
2199 * @cfg: fabric path element struct
2200 *
2201 * Return value:
2202 * none
2203 **/
ipr_log64_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_config_element * cfg)2204 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2205 struct ipr_hostrcb64_config_element *cfg)
2206 {
2207 int i, j;
2208 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2209 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2210 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2211 char buffer[IPR_MAX_RES_PATH_LENGTH];
2212
2213 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2214 return;
2215
2216 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2217 if (path_type_desc[i].type != type)
2218 continue;
2219
2220 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2221 if (path_status_desc[j].status != status)
2222 continue;
2223
2224 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2225 path_status_desc[j].desc, path_type_desc[i].desc,
2226 ipr_format_res_path(hostrcb->ioa_cfg,
2227 cfg->res_path, buffer, sizeof(buffer)),
2228 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2229 be32_to_cpu(cfg->wwid[0]),
2230 be32_to_cpu(cfg->wwid[1]));
2231 return;
2232 }
2233 }
2234 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2235 "WWN=%08X%08X\n", cfg->type_status,
2236 ipr_format_res_path(hostrcb->ioa_cfg,
2237 cfg->res_path, buffer, sizeof(buffer)),
2238 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2239 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2240 }
2241
2242 /**
2243 * ipr_log_fabric_error - Log a fabric error.
2244 * @ioa_cfg: ioa config struct
2245 * @hostrcb: hostrcb struct
2246 *
2247 * Return value:
2248 * none
2249 **/
ipr_log_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2250 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2251 struct ipr_hostrcb *hostrcb)
2252 {
2253 struct ipr_hostrcb_type_20_error *error;
2254 struct ipr_hostrcb_fabric_desc *fabric;
2255 struct ipr_hostrcb_config_element *cfg;
2256 int i, add_len;
2257
2258 error = &hostrcb->hcam.u.error.u.type_20_error;
2259 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2260 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2261
2262 add_len = be32_to_cpu(hostrcb->hcam.length) -
2263 (offsetof(struct ipr_hostrcb_error, u) +
2264 offsetof(struct ipr_hostrcb_type_20_error, desc));
2265
2266 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2267 ipr_log_fabric_path(hostrcb, fabric);
2268 for_each_fabric_cfg(fabric, cfg)
2269 ipr_log_path_elem(hostrcb, cfg);
2270
2271 add_len -= be16_to_cpu(fabric->length);
2272 fabric = (struct ipr_hostrcb_fabric_desc *)
2273 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2274 }
2275
2276 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2277 }
2278
2279 /**
2280 * ipr_log_sis64_array_error - Log a sis64 array error.
2281 * @ioa_cfg: ioa config struct
2282 * @hostrcb: hostrcb struct
2283 *
2284 * Return value:
2285 * none
2286 **/
ipr_log_sis64_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2287 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2288 struct ipr_hostrcb *hostrcb)
2289 {
2290 int i, num_entries;
2291 struct ipr_hostrcb_type_24_error *error;
2292 struct ipr_hostrcb64_array_data_entry *array_entry;
2293 char buffer[IPR_MAX_RES_PATH_LENGTH];
2294 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2295
2296 error = &hostrcb->hcam.u.error64.u.type_24_error;
2297
2298 ipr_err_separator;
2299
2300 ipr_err("RAID %s Array Configuration: %s\n",
2301 error->protection_level,
2302 ipr_format_res_path(ioa_cfg, error->last_res_path,
2303 buffer, sizeof(buffer)));
2304
2305 ipr_err_separator;
2306
2307 array_entry = error->array_member;
2308 num_entries = min_t(u32, error->num_entries,
2309 ARRAY_SIZE(error->array_member));
2310
2311 for (i = 0; i < num_entries; i++, array_entry++) {
2312
2313 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2314 continue;
2315
2316 if (error->exposed_mode_adn == i)
2317 ipr_err("Exposed Array Member %d:\n", i);
2318 else
2319 ipr_err("Array Member %d:\n", i);
2320
2321 ipr_err("Array Member %d:\n", i);
2322 ipr_log_ext_vpd(&array_entry->vpd);
2323 ipr_err("Current Location: %s\n",
2324 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2325 buffer, sizeof(buffer)));
2326 ipr_err("Expected Location: %s\n",
2327 ipr_format_res_path(ioa_cfg,
2328 array_entry->expected_res_path,
2329 buffer, sizeof(buffer)));
2330
2331 ipr_err_separator;
2332 }
2333 }
2334
2335 /**
2336 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2337 * @ioa_cfg: ioa config struct
2338 * @hostrcb: hostrcb struct
2339 *
2340 * Return value:
2341 * none
2342 **/
ipr_log_sis64_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2343 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2344 struct ipr_hostrcb *hostrcb)
2345 {
2346 struct ipr_hostrcb_type_30_error *error;
2347 struct ipr_hostrcb64_fabric_desc *fabric;
2348 struct ipr_hostrcb64_config_element *cfg;
2349 int i, add_len;
2350
2351 error = &hostrcb->hcam.u.error64.u.type_30_error;
2352
2353 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2354 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2355
2356 add_len = be32_to_cpu(hostrcb->hcam.length) -
2357 (offsetof(struct ipr_hostrcb64_error, u) +
2358 offsetof(struct ipr_hostrcb_type_30_error, desc));
2359
2360 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2361 ipr_log64_fabric_path(hostrcb, fabric);
2362 for_each_fabric_cfg(fabric, cfg)
2363 ipr_log64_path_elem(hostrcb, cfg);
2364
2365 add_len -= be16_to_cpu(fabric->length);
2366 fabric = (struct ipr_hostrcb64_fabric_desc *)
2367 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2368 }
2369
2370 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2371 }
2372
2373 /**
2374 * ipr_log_generic_error - Log an adapter error.
2375 * @ioa_cfg: ioa config struct
2376 * @hostrcb: hostrcb struct
2377 *
2378 * Return value:
2379 * none
2380 **/
ipr_log_generic_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2381 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2382 struct ipr_hostrcb *hostrcb)
2383 {
2384 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2385 be32_to_cpu(hostrcb->hcam.length));
2386 }
2387
2388 /**
2389 * ipr_log_sis64_device_error - Log a cache error.
2390 * @ioa_cfg: ioa config struct
2391 * @hostrcb: hostrcb struct
2392 *
2393 * Return value:
2394 * none
2395 **/
ipr_log_sis64_device_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2396 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2397 struct ipr_hostrcb *hostrcb)
2398 {
2399 struct ipr_hostrcb_type_21_error *error;
2400 char buffer[IPR_MAX_RES_PATH_LENGTH];
2401
2402 error = &hostrcb->hcam.u.error64.u.type_21_error;
2403
2404 ipr_err("-----Failing Device Information-----\n");
2405 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2406 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2407 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2408 ipr_err("Device Resource Path: %s\n",
2409 __ipr_format_res_path(error->res_path,
2410 buffer, sizeof(buffer)));
2411 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2412 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2413 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2414 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2415 ipr_err("SCSI Sense Data:\n");
2416 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2417 ipr_err("SCSI Command Descriptor Block: \n");
2418 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2419
2420 ipr_err("Additional IOA Data:\n");
2421 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2422 }
2423
2424 /**
2425 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2426 * @ioasc: IOASC
2427 *
2428 * This function will return the index of into the ipr_error_table
2429 * for the specified IOASC. If the IOASC is not in the table,
2430 * 0 will be returned, which points to the entry used for unknown errors.
2431 *
2432 * Return value:
2433 * index into the ipr_error_table
2434 **/
ipr_get_error(u32 ioasc)2435 static u32 ipr_get_error(u32 ioasc)
2436 {
2437 int i;
2438
2439 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2440 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2441 return i;
2442
2443 return 0;
2444 }
2445
2446 /**
2447 * ipr_handle_log_data - Log an adapter error.
2448 * @ioa_cfg: ioa config struct
2449 * @hostrcb: hostrcb struct
2450 *
2451 * This function logs an adapter error to the system.
2452 *
2453 * Return value:
2454 * none
2455 **/
ipr_handle_log_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2456 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2457 struct ipr_hostrcb *hostrcb)
2458 {
2459 u32 ioasc;
2460 int error_index;
2461 struct ipr_hostrcb_type_21_error *error;
2462
2463 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2464 return;
2465
2466 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2467 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2468
2469 if (ioa_cfg->sis64)
2470 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2471 else
2472 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2473
2474 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2475 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2476 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2477 scsi_report_bus_reset(ioa_cfg->host,
2478 hostrcb->hcam.u.error.fd_res_addr.bus);
2479 }
2480
2481 error_index = ipr_get_error(ioasc);
2482
2483 if (!ipr_error_table[error_index].log_hcam)
2484 return;
2485
2486 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2487 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2488 error = &hostrcb->hcam.u.error64.u.type_21_error;
2489
2490 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2491 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2492 return;
2493 }
2494
2495 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2496
2497 /* Set indication we have logged an error */
2498 ioa_cfg->errors_logged++;
2499
2500 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2501 return;
2502 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2503 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2504
2505 switch (hostrcb->hcam.overlay_id) {
2506 case IPR_HOST_RCB_OVERLAY_ID_2:
2507 ipr_log_cache_error(ioa_cfg, hostrcb);
2508 break;
2509 case IPR_HOST_RCB_OVERLAY_ID_3:
2510 ipr_log_config_error(ioa_cfg, hostrcb);
2511 break;
2512 case IPR_HOST_RCB_OVERLAY_ID_4:
2513 case IPR_HOST_RCB_OVERLAY_ID_6:
2514 ipr_log_array_error(ioa_cfg, hostrcb);
2515 break;
2516 case IPR_HOST_RCB_OVERLAY_ID_7:
2517 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2518 break;
2519 case IPR_HOST_RCB_OVERLAY_ID_12:
2520 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2521 break;
2522 case IPR_HOST_RCB_OVERLAY_ID_13:
2523 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2524 break;
2525 case IPR_HOST_RCB_OVERLAY_ID_14:
2526 case IPR_HOST_RCB_OVERLAY_ID_16:
2527 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2528 break;
2529 case IPR_HOST_RCB_OVERLAY_ID_17:
2530 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2531 break;
2532 case IPR_HOST_RCB_OVERLAY_ID_20:
2533 ipr_log_fabric_error(ioa_cfg, hostrcb);
2534 break;
2535 case IPR_HOST_RCB_OVERLAY_ID_21:
2536 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2537 break;
2538 case IPR_HOST_RCB_OVERLAY_ID_23:
2539 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2540 break;
2541 case IPR_HOST_RCB_OVERLAY_ID_24:
2542 case IPR_HOST_RCB_OVERLAY_ID_26:
2543 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2544 break;
2545 case IPR_HOST_RCB_OVERLAY_ID_30:
2546 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2547 break;
2548 case IPR_HOST_RCB_OVERLAY_ID_1:
2549 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2550 default:
2551 ipr_log_generic_error(ioa_cfg, hostrcb);
2552 break;
2553 }
2554 }
2555
2556 /**
2557 * ipr_process_error - Op done function for an adapter error log.
2558 * @ipr_cmd: ipr command struct
2559 *
2560 * This function is the op done function for an error log host
2561 * controlled async from the adapter. It will log the error and
2562 * send the HCAM back to the adapter.
2563 *
2564 * Return value:
2565 * none
2566 **/
ipr_process_error(struct ipr_cmnd * ipr_cmd)2567 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2568 {
2569 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2570 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2571 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2572 u32 fd_ioasc;
2573
2574 if (ioa_cfg->sis64)
2575 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2576 else
2577 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2578
2579 list_del(&hostrcb->queue);
2580 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2581
2582 if (!ioasc) {
2583 ipr_handle_log_data(ioa_cfg, hostrcb);
2584 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2585 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2586 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2587 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2588 dev_err(&ioa_cfg->pdev->dev,
2589 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2590 }
2591
2592 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2593 }
2594
2595 /**
2596 * ipr_timeout - An internally generated op has timed out.
2597 * @ipr_cmd: ipr command struct
2598 *
2599 * This function blocks host requests and initiates an
2600 * adapter reset.
2601 *
2602 * Return value:
2603 * none
2604 **/
ipr_timeout(struct ipr_cmnd * ipr_cmd)2605 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2606 {
2607 unsigned long lock_flags = 0;
2608 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2609
2610 ENTER;
2611 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2612
2613 ioa_cfg->errors_logged++;
2614 dev_err(&ioa_cfg->pdev->dev,
2615 "Adapter being reset due to command timeout.\n");
2616
2617 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2618 ioa_cfg->sdt_state = GET_DUMP;
2619
2620 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2621 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2622
2623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2624 LEAVE;
2625 }
2626
2627 /**
2628 * ipr_oper_timeout - Adapter timed out transitioning to operational
2629 * @ipr_cmd: ipr command struct
2630 *
2631 * This function blocks host requests and initiates an
2632 * adapter reset.
2633 *
2634 * Return value:
2635 * none
2636 **/
ipr_oper_timeout(struct ipr_cmnd * ipr_cmd)2637 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2638 {
2639 unsigned long lock_flags = 0;
2640 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2641
2642 ENTER;
2643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2644
2645 ioa_cfg->errors_logged++;
2646 dev_err(&ioa_cfg->pdev->dev,
2647 "Adapter timed out transitioning to operational.\n");
2648
2649 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2650 ioa_cfg->sdt_state = GET_DUMP;
2651
2652 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2653 if (ipr_fastfail)
2654 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2655 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2656 }
2657
2658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2659 LEAVE;
2660 }
2661
2662 /**
2663 * ipr_find_ses_entry - Find matching SES in SES table
2664 * @res: resource entry struct of SES
2665 *
2666 * Return value:
2667 * pointer to SES table entry / NULL on failure
2668 **/
2669 static const struct ipr_ses_table_entry *
ipr_find_ses_entry(struct ipr_resource_entry * res)2670 ipr_find_ses_entry(struct ipr_resource_entry *res)
2671 {
2672 int i, j, matches;
2673 struct ipr_std_inq_vpids *vpids;
2674 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2675
2676 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2677 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2678 if (ste->compare_product_id_byte[j] == 'X') {
2679 vpids = &res->std_inq_data.vpids;
2680 if (vpids->product_id[j] == ste->product_id[j])
2681 matches++;
2682 else
2683 break;
2684 } else
2685 matches++;
2686 }
2687
2688 if (matches == IPR_PROD_ID_LEN)
2689 return ste;
2690 }
2691
2692 return NULL;
2693 }
2694
2695 /**
2696 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2697 * @ioa_cfg: ioa config struct
2698 * @bus: SCSI bus
2699 * @bus_width: bus width
2700 *
2701 * Return value:
2702 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2703 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2704 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2705 * max 160MHz = max 320MB/sec).
2706 **/
ipr_get_max_scsi_speed(struct ipr_ioa_cfg * ioa_cfg,u8 bus,u8 bus_width)2707 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2708 {
2709 struct ipr_resource_entry *res;
2710 const struct ipr_ses_table_entry *ste;
2711 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2712
2713 /* Loop through each config table entry in the config table buffer */
2714 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2715 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2716 continue;
2717
2718 if (bus != res->bus)
2719 continue;
2720
2721 if (!(ste = ipr_find_ses_entry(res)))
2722 continue;
2723
2724 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2725 }
2726
2727 return max_xfer_rate;
2728 }
2729
2730 /**
2731 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2732 * @ioa_cfg: ioa config struct
2733 * @max_delay: max delay in micro-seconds to wait
2734 *
2735 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2736 *
2737 * Return value:
2738 * 0 on success / other on failure
2739 **/
ipr_wait_iodbg_ack(struct ipr_ioa_cfg * ioa_cfg,int max_delay)2740 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2741 {
2742 volatile u32 pcii_reg;
2743 int delay = 1;
2744
2745 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2746 while (delay < max_delay) {
2747 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2748
2749 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2750 return 0;
2751
2752 /* udelay cannot be used if delay is more than a few milliseconds */
2753 if ((delay / 1000) > MAX_UDELAY_MS)
2754 mdelay(delay / 1000);
2755 else
2756 udelay(delay);
2757
2758 delay += delay;
2759 }
2760 return -EIO;
2761 }
2762
2763 /**
2764 * ipr_get_sis64_dump_data_section - Dump IOA memory
2765 * @ioa_cfg: ioa config struct
2766 * @start_addr: adapter address to dump
2767 * @dest: destination kernel buffer
2768 * @length_in_words: length to dump in 4 byte words
2769 *
2770 * Return value:
2771 * 0 on success
2772 **/
ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2773 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2774 u32 start_addr,
2775 __be32 *dest, u32 length_in_words)
2776 {
2777 int i;
2778
2779 for (i = 0; i < length_in_words; i++) {
2780 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2781 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2782 dest++;
2783 }
2784
2785 return 0;
2786 }
2787
2788 /**
2789 * ipr_get_ldump_data_section - Dump IOA memory
2790 * @ioa_cfg: ioa config struct
2791 * @start_addr: adapter address to dump
2792 * @dest: destination kernel buffer
2793 * @length_in_words: length to dump in 4 byte words
2794 *
2795 * Return value:
2796 * 0 on success / -EIO on failure
2797 **/
ipr_get_ldump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2798 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2799 u32 start_addr,
2800 __be32 *dest, u32 length_in_words)
2801 {
2802 volatile u32 temp_pcii_reg;
2803 int i, delay = 0;
2804
2805 if (ioa_cfg->sis64)
2806 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2807 dest, length_in_words);
2808
2809 /* Write IOA interrupt reg starting LDUMP state */
2810 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2811 ioa_cfg->regs.set_uproc_interrupt_reg32);
2812
2813 /* Wait for IO debug acknowledge */
2814 if (ipr_wait_iodbg_ack(ioa_cfg,
2815 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2816 dev_err(&ioa_cfg->pdev->dev,
2817 "IOA dump long data transfer timeout\n");
2818 return -EIO;
2819 }
2820
2821 /* Signal LDUMP interlocked - clear IO debug ack */
2822 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2823 ioa_cfg->regs.clr_interrupt_reg);
2824
2825 /* Write Mailbox with starting address */
2826 writel(start_addr, ioa_cfg->ioa_mailbox);
2827
2828 /* Signal address valid - clear IOA Reset alert */
2829 writel(IPR_UPROCI_RESET_ALERT,
2830 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2831
2832 for (i = 0; i < length_in_words; i++) {
2833 /* Wait for IO debug acknowledge */
2834 if (ipr_wait_iodbg_ack(ioa_cfg,
2835 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2836 dev_err(&ioa_cfg->pdev->dev,
2837 "IOA dump short data transfer timeout\n");
2838 return -EIO;
2839 }
2840
2841 /* Read data from mailbox and increment destination pointer */
2842 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2843 dest++;
2844
2845 /* For all but the last word of data, signal data received */
2846 if (i < (length_in_words - 1)) {
2847 /* Signal dump data received - Clear IO debug Ack */
2848 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2849 ioa_cfg->regs.clr_interrupt_reg);
2850 }
2851 }
2852
2853 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2854 writel(IPR_UPROCI_RESET_ALERT,
2855 ioa_cfg->regs.set_uproc_interrupt_reg32);
2856
2857 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2858 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2859
2860 /* Signal dump data received - Clear IO debug Ack */
2861 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2862 ioa_cfg->regs.clr_interrupt_reg);
2863
2864 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2865 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2866 temp_pcii_reg =
2867 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2868
2869 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2870 return 0;
2871
2872 udelay(10);
2873 delay += 10;
2874 }
2875
2876 return 0;
2877 }
2878
2879 #ifdef CONFIG_SCSI_IPR_DUMP
2880 /**
2881 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2882 * @ioa_cfg: ioa config struct
2883 * @pci_address: adapter address
2884 * @length: length of data to copy
2885 *
2886 * Copy data from PCI adapter to kernel buffer.
2887 * Note: length MUST be a 4 byte multiple
2888 * Return value:
2889 * 0 on success / other on failure
2890 **/
ipr_sdt_copy(struct ipr_ioa_cfg * ioa_cfg,unsigned long pci_address,u32 length)2891 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2892 unsigned long pci_address, u32 length)
2893 {
2894 int bytes_copied = 0;
2895 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2896 __be32 *page;
2897 unsigned long lock_flags = 0;
2898 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2899
2900 if (ioa_cfg->sis64)
2901 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2902 else
2903 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2904
2905 while (bytes_copied < length &&
2906 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2907 if (ioa_dump->page_offset >= PAGE_SIZE ||
2908 ioa_dump->page_offset == 0) {
2909 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2910
2911 if (!page) {
2912 ipr_trace;
2913 return bytes_copied;
2914 }
2915
2916 ioa_dump->page_offset = 0;
2917 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2918 ioa_dump->next_page_index++;
2919 } else
2920 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2921
2922 rem_len = length - bytes_copied;
2923 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2924 cur_len = min(rem_len, rem_page_len);
2925
2926 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2927 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2928 rc = -EIO;
2929 } else {
2930 rc = ipr_get_ldump_data_section(ioa_cfg,
2931 pci_address + bytes_copied,
2932 &page[ioa_dump->page_offset / 4],
2933 (cur_len / sizeof(u32)));
2934 }
2935 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2936
2937 if (!rc) {
2938 ioa_dump->page_offset += cur_len;
2939 bytes_copied += cur_len;
2940 } else {
2941 ipr_trace;
2942 break;
2943 }
2944 schedule();
2945 }
2946
2947 return bytes_copied;
2948 }
2949
2950 /**
2951 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2952 * @hdr: dump entry header struct
2953 *
2954 * Return value:
2955 * nothing
2956 **/
ipr_init_dump_entry_hdr(struct ipr_dump_entry_header * hdr)2957 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2958 {
2959 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2960 hdr->num_elems = 1;
2961 hdr->offset = sizeof(*hdr);
2962 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2963 }
2964
2965 /**
2966 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2967 * @ioa_cfg: ioa config struct
2968 * @driver_dump: driver dump struct
2969 *
2970 * Return value:
2971 * nothing
2972 **/
ipr_dump_ioa_type_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2973 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2974 struct ipr_driver_dump *driver_dump)
2975 {
2976 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2977
2978 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2979 driver_dump->ioa_type_entry.hdr.len =
2980 sizeof(struct ipr_dump_ioa_type_entry) -
2981 sizeof(struct ipr_dump_entry_header);
2982 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2983 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2984 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2985 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2986 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2987 ucode_vpd->minor_release[1];
2988 driver_dump->hdr.num_entries++;
2989 }
2990
2991 /**
2992 * ipr_dump_version_data - Fill in the driver version in the dump.
2993 * @ioa_cfg: ioa config struct
2994 * @driver_dump: driver dump struct
2995 *
2996 * Return value:
2997 * nothing
2998 **/
ipr_dump_version_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2999 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3000 struct ipr_driver_dump *driver_dump)
3001 {
3002 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3003 driver_dump->version_entry.hdr.len =
3004 sizeof(struct ipr_dump_version_entry) -
3005 sizeof(struct ipr_dump_entry_header);
3006 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3007 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3008 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3009 driver_dump->hdr.num_entries++;
3010 }
3011
3012 /**
3013 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3014 * @ioa_cfg: ioa config struct
3015 * @driver_dump: driver dump struct
3016 *
3017 * Return value:
3018 * nothing
3019 **/
ipr_dump_trace_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)3020 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3021 struct ipr_driver_dump *driver_dump)
3022 {
3023 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3024 driver_dump->trace_entry.hdr.len =
3025 sizeof(struct ipr_dump_trace_entry) -
3026 sizeof(struct ipr_dump_entry_header);
3027 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3028 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3029 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3030 driver_dump->hdr.num_entries++;
3031 }
3032
3033 /**
3034 * ipr_dump_location_data - Fill in the IOA location in the dump.
3035 * @ioa_cfg: ioa config struct
3036 * @driver_dump: driver dump struct
3037 *
3038 * Return value:
3039 * nothing
3040 **/
ipr_dump_location_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)3041 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3042 struct ipr_driver_dump *driver_dump)
3043 {
3044 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3045 driver_dump->location_entry.hdr.len =
3046 sizeof(struct ipr_dump_location_entry) -
3047 sizeof(struct ipr_dump_entry_header);
3048 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3049 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3050 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3051 driver_dump->hdr.num_entries++;
3052 }
3053
3054 /**
3055 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3056 * @ioa_cfg: ioa config struct
3057 * @dump: dump struct
3058 *
3059 * Return value:
3060 * nothing
3061 **/
ipr_get_ioa_dump(struct ipr_ioa_cfg * ioa_cfg,struct ipr_dump * dump)3062 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3063 {
3064 unsigned long start_addr, sdt_word;
3065 unsigned long lock_flags = 0;
3066 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3067 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3068 u32 num_entries, max_num_entries, start_off, end_off;
3069 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3070 struct ipr_sdt *sdt;
3071 int valid = 1;
3072 int i;
3073
3074 ENTER;
3075
3076 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3077
3078 if (ioa_cfg->sdt_state != READ_DUMP) {
3079 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3080 return;
3081 }
3082
3083 if (ioa_cfg->sis64) {
3084 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3085 ssleep(IPR_DUMP_DELAY_SECONDS);
3086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3087 }
3088
3089 start_addr = readl(ioa_cfg->ioa_mailbox);
3090
3091 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3092 dev_err(&ioa_cfg->pdev->dev,
3093 "Invalid dump table format: %lx\n", start_addr);
3094 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3095 return;
3096 }
3097
3098 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3099
3100 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3101
3102 /* Initialize the overall dump header */
3103 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3104 driver_dump->hdr.num_entries = 1;
3105 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3106 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3107 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3108 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3109
3110 ipr_dump_version_data(ioa_cfg, driver_dump);
3111 ipr_dump_location_data(ioa_cfg, driver_dump);
3112 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3113 ipr_dump_trace_data(ioa_cfg, driver_dump);
3114
3115 /* Update dump_header */
3116 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3117
3118 /* IOA Dump entry */
3119 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3120 ioa_dump->hdr.len = 0;
3121 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3122 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3123
3124 /* First entries in sdt are actually a list of dump addresses and
3125 lengths to gather the real dump data. sdt represents the pointer
3126 to the ioa generated dump table. Dump data will be extracted based
3127 on entries in this table */
3128 sdt = &ioa_dump->sdt;
3129
3130 if (ioa_cfg->sis64) {
3131 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3132 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3133 } else {
3134 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3135 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3136 }
3137
3138 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3139 (max_num_entries * sizeof(struct ipr_sdt_entry));
3140 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3141 bytes_to_copy / sizeof(__be32));
3142
3143 /* Smart Dump table is ready to use and the first entry is valid */
3144 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3145 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3146 dev_err(&ioa_cfg->pdev->dev,
3147 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3148 rc, be32_to_cpu(sdt->hdr.state));
3149 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3150 ioa_cfg->sdt_state = DUMP_OBTAINED;
3151 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3152 return;
3153 }
3154
3155 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3156
3157 if (num_entries > max_num_entries)
3158 num_entries = max_num_entries;
3159
3160 /* Update dump length to the actual data to be copied */
3161 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3162 if (ioa_cfg->sis64)
3163 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3164 else
3165 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3166
3167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3168
3169 for (i = 0; i < num_entries; i++) {
3170 if (ioa_dump->hdr.len > max_dump_size) {
3171 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3172 break;
3173 }
3174
3175 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3176 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3177 if (ioa_cfg->sis64)
3178 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3179 else {
3180 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3181 end_off = be32_to_cpu(sdt->entry[i].end_token);
3182
3183 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3184 bytes_to_copy = end_off - start_off;
3185 else
3186 valid = 0;
3187 }
3188 if (valid) {
3189 if (bytes_to_copy > max_dump_size) {
3190 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3191 continue;
3192 }
3193
3194 /* Copy data from adapter to driver buffers */
3195 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3196 bytes_to_copy);
3197
3198 ioa_dump->hdr.len += bytes_copied;
3199
3200 if (bytes_copied != bytes_to_copy) {
3201 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3202 break;
3203 }
3204 }
3205 }
3206 }
3207
3208 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3209
3210 /* Update dump_header */
3211 driver_dump->hdr.len += ioa_dump->hdr.len;
3212 wmb();
3213 ioa_cfg->sdt_state = DUMP_OBTAINED;
3214 LEAVE;
3215 }
3216
3217 #else
3218 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3219 #endif
3220
3221 /**
3222 * ipr_release_dump - Free adapter dump memory
3223 * @kref: kref struct
3224 *
3225 * Return value:
3226 * nothing
3227 **/
ipr_release_dump(struct kref * kref)3228 static void ipr_release_dump(struct kref *kref)
3229 {
3230 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3231 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3232 unsigned long lock_flags = 0;
3233 int i;
3234
3235 ENTER;
3236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3237 ioa_cfg->dump = NULL;
3238 ioa_cfg->sdt_state = INACTIVE;
3239 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3240
3241 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3242 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3243
3244 vfree(dump->ioa_dump.ioa_data);
3245 kfree(dump);
3246 LEAVE;
3247 }
3248
3249 /**
3250 * ipr_worker_thread - Worker thread
3251 * @work: ioa config struct
3252 *
3253 * Called at task level from a work thread. This function takes care
3254 * of adding and removing device from the mid-layer as configuration
3255 * changes are detected by the adapter.
3256 *
3257 * Return value:
3258 * nothing
3259 **/
ipr_worker_thread(struct work_struct * work)3260 static void ipr_worker_thread(struct work_struct *work)
3261 {
3262 unsigned long lock_flags;
3263 struct ipr_resource_entry *res;
3264 struct scsi_device *sdev;
3265 struct ipr_dump *dump;
3266 struct ipr_ioa_cfg *ioa_cfg =
3267 container_of(work, struct ipr_ioa_cfg, work_q);
3268 u8 bus, target, lun;
3269 int did_work;
3270
3271 ENTER;
3272 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3273
3274 if (ioa_cfg->sdt_state == READ_DUMP) {
3275 dump = ioa_cfg->dump;
3276 if (!dump) {
3277 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3278 return;
3279 }
3280 kref_get(&dump->kref);
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 ipr_get_ioa_dump(ioa_cfg, dump);
3283 kref_put(&dump->kref, ipr_release_dump);
3284
3285 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3286 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3287 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3289 return;
3290 }
3291
3292 restart:
3293 do {
3294 did_work = 0;
3295 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3296 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3297 return;
3298 }
3299
3300 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3301 if (res->del_from_ml && res->sdev) {
3302 did_work = 1;
3303 sdev = res->sdev;
3304 if (!scsi_device_get(sdev)) {
3305 if (!res->add_to_ml)
3306 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3307 else
3308 res->del_from_ml = 0;
3309 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3310 scsi_remove_device(sdev);
3311 scsi_device_put(sdev);
3312 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3313 }
3314 break;
3315 }
3316 }
3317 } while (did_work);
3318
3319 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3320 if (res->add_to_ml) {
3321 bus = res->bus;
3322 target = res->target;
3323 lun = res->lun;
3324 res->add_to_ml = 0;
3325 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3326 scsi_add_device(ioa_cfg->host, bus, target, lun);
3327 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3328 goto restart;
3329 }
3330 }
3331
3332 ioa_cfg->scan_done = 1;
3333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3334 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3335 LEAVE;
3336 }
3337
3338 #ifdef CONFIG_SCSI_IPR_TRACE
3339 /**
3340 * ipr_read_trace - Dump the adapter trace
3341 * @filp: open sysfs file
3342 * @kobj: kobject struct
3343 * @bin_attr: bin_attribute struct
3344 * @buf: buffer
3345 * @off: offset
3346 * @count: buffer size
3347 *
3348 * Return value:
3349 * number of bytes printed to buffer
3350 **/
ipr_read_trace(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3351 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3352 struct bin_attribute *bin_attr,
3353 char *buf, loff_t off, size_t count)
3354 {
3355 struct device *dev = container_of(kobj, struct device, kobj);
3356 struct Scsi_Host *shost = class_to_shost(dev);
3357 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3358 unsigned long lock_flags = 0;
3359 ssize_t ret;
3360
3361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3362 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3363 IPR_TRACE_SIZE);
3364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3365
3366 return ret;
3367 }
3368
3369 static struct bin_attribute ipr_trace_attr = {
3370 .attr = {
3371 .name = "trace",
3372 .mode = S_IRUGO,
3373 },
3374 .size = 0,
3375 .read = ipr_read_trace,
3376 };
3377 #endif
3378
3379 /**
3380 * ipr_show_fw_version - Show the firmware version
3381 * @dev: class device struct
3382 * @buf: buffer
3383 *
3384 * Return value:
3385 * number of bytes printed to buffer
3386 **/
ipr_show_fw_version(struct device * dev,struct device_attribute * attr,char * buf)3387 static ssize_t ipr_show_fw_version(struct device *dev,
3388 struct device_attribute *attr, char *buf)
3389 {
3390 struct Scsi_Host *shost = class_to_shost(dev);
3391 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3392 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3393 unsigned long lock_flags = 0;
3394 int len;
3395
3396 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3397 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3398 ucode_vpd->major_release, ucode_vpd->card_type,
3399 ucode_vpd->minor_release[0],
3400 ucode_vpd->minor_release[1]);
3401 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3402 return len;
3403 }
3404
3405 static struct device_attribute ipr_fw_version_attr = {
3406 .attr = {
3407 .name = "fw_version",
3408 .mode = S_IRUGO,
3409 },
3410 .show = ipr_show_fw_version,
3411 };
3412
3413 /**
3414 * ipr_show_log_level - Show the adapter's error logging level
3415 * @dev: class device struct
3416 * @buf: buffer
3417 *
3418 * Return value:
3419 * number of bytes printed to buffer
3420 **/
ipr_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3421 static ssize_t ipr_show_log_level(struct device *dev,
3422 struct device_attribute *attr, char *buf)
3423 {
3424 struct Scsi_Host *shost = class_to_shost(dev);
3425 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3426 unsigned long lock_flags = 0;
3427 int len;
3428
3429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3430 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3432 return len;
3433 }
3434
3435 /**
3436 * ipr_store_log_level - Change the adapter's error logging level
3437 * @dev: class device struct
3438 * @buf: buffer
3439 *
3440 * Return value:
3441 * number of bytes printed to buffer
3442 **/
ipr_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3443 static ssize_t ipr_store_log_level(struct device *dev,
3444 struct device_attribute *attr,
3445 const char *buf, size_t count)
3446 {
3447 struct Scsi_Host *shost = class_to_shost(dev);
3448 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3449 unsigned long lock_flags = 0;
3450
3451 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3452 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454 return strlen(buf);
3455 }
3456
3457 static struct device_attribute ipr_log_level_attr = {
3458 .attr = {
3459 .name = "log_level",
3460 .mode = S_IRUGO | S_IWUSR,
3461 },
3462 .show = ipr_show_log_level,
3463 .store = ipr_store_log_level
3464 };
3465
3466 /**
3467 * ipr_store_diagnostics - IOA Diagnostics interface
3468 * @dev: device struct
3469 * @buf: buffer
3470 * @count: buffer size
3471 *
3472 * This function will reset the adapter and wait a reasonable
3473 * amount of time for any errors that the adapter might log.
3474 *
3475 * Return value:
3476 * count on success / other on failure
3477 **/
ipr_store_diagnostics(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3478 static ssize_t ipr_store_diagnostics(struct device *dev,
3479 struct device_attribute *attr,
3480 const char *buf, size_t count)
3481 {
3482 struct Scsi_Host *shost = class_to_shost(dev);
3483 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3484 unsigned long lock_flags = 0;
3485 int rc = count;
3486
3487 if (!capable(CAP_SYS_ADMIN))
3488 return -EACCES;
3489
3490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3491 while (ioa_cfg->in_reset_reload) {
3492 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3493 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3495 }
3496
3497 ioa_cfg->errors_logged = 0;
3498 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3499
3500 if (ioa_cfg->in_reset_reload) {
3501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3502 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3503
3504 /* Wait for a second for any errors to be logged */
3505 msleep(1000);
3506 } else {
3507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3508 return -EIO;
3509 }
3510
3511 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3512 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3513 rc = -EIO;
3514 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3515
3516 return rc;
3517 }
3518
3519 static struct device_attribute ipr_diagnostics_attr = {
3520 .attr = {
3521 .name = "run_diagnostics",
3522 .mode = S_IWUSR,
3523 },
3524 .store = ipr_store_diagnostics
3525 };
3526
3527 /**
3528 * ipr_show_adapter_state - Show the adapter's state
3529 * @class_dev: device struct
3530 * @buf: buffer
3531 *
3532 * Return value:
3533 * number of bytes printed to buffer
3534 **/
ipr_show_adapter_state(struct device * dev,struct device_attribute * attr,char * buf)3535 static ssize_t ipr_show_adapter_state(struct device *dev,
3536 struct device_attribute *attr, char *buf)
3537 {
3538 struct Scsi_Host *shost = class_to_shost(dev);
3539 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3540 unsigned long lock_flags = 0;
3541 int len;
3542
3543 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3544 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3545 len = snprintf(buf, PAGE_SIZE, "offline\n");
3546 else
3547 len = snprintf(buf, PAGE_SIZE, "online\n");
3548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3549 return len;
3550 }
3551
3552 /**
3553 * ipr_store_adapter_state - Change adapter state
3554 * @dev: device struct
3555 * @buf: buffer
3556 * @count: buffer size
3557 *
3558 * This function will change the adapter's state.
3559 *
3560 * Return value:
3561 * count on success / other on failure
3562 **/
ipr_store_adapter_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3563 static ssize_t ipr_store_adapter_state(struct device *dev,
3564 struct device_attribute *attr,
3565 const char *buf, size_t count)
3566 {
3567 struct Scsi_Host *shost = class_to_shost(dev);
3568 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3569 unsigned long lock_flags;
3570 int result = count, i;
3571
3572 if (!capable(CAP_SYS_ADMIN))
3573 return -EACCES;
3574
3575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3576 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3577 !strncmp(buf, "online", 6)) {
3578 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3579 spin_lock(&ioa_cfg->hrrq[i]._lock);
3580 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3581 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3582 }
3583 wmb();
3584 ioa_cfg->reset_retries = 0;
3585 ioa_cfg->in_ioa_bringdown = 0;
3586 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3587 }
3588 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3589 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3590
3591 return result;
3592 }
3593
3594 static struct device_attribute ipr_ioa_state_attr = {
3595 .attr = {
3596 .name = "online_state",
3597 .mode = S_IRUGO | S_IWUSR,
3598 },
3599 .show = ipr_show_adapter_state,
3600 .store = ipr_store_adapter_state
3601 };
3602
3603 /**
3604 * ipr_store_reset_adapter - Reset the adapter
3605 * @dev: device struct
3606 * @buf: buffer
3607 * @count: buffer size
3608 *
3609 * This function will reset the adapter.
3610 *
3611 * Return value:
3612 * count on success / other on failure
3613 **/
ipr_store_reset_adapter(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3614 static ssize_t ipr_store_reset_adapter(struct device *dev,
3615 struct device_attribute *attr,
3616 const char *buf, size_t count)
3617 {
3618 struct Scsi_Host *shost = class_to_shost(dev);
3619 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3620 unsigned long lock_flags;
3621 int result = count;
3622
3623 if (!capable(CAP_SYS_ADMIN))
3624 return -EACCES;
3625
3626 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3627 if (!ioa_cfg->in_reset_reload)
3628 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3630 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3631
3632 return result;
3633 }
3634
3635 static struct device_attribute ipr_ioa_reset_attr = {
3636 .attr = {
3637 .name = "reset_host",
3638 .mode = S_IWUSR,
3639 },
3640 .store = ipr_store_reset_adapter
3641 };
3642
3643 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3644 /**
3645 * ipr_show_iopoll_weight - Show ipr polling mode
3646 * @dev: class device struct
3647 * @buf: buffer
3648 *
3649 * Return value:
3650 * number of bytes printed to buffer
3651 **/
ipr_show_iopoll_weight(struct device * dev,struct device_attribute * attr,char * buf)3652 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3653 struct device_attribute *attr, char *buf)
3654 {
3655 struct Scsi_Host *shost = class_to_shost(dev);
3656 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3657 unsigned long lock_flags = 0;
3658 int len;
3659
3660 spin_lock_irqsave(shost->host_lock, lock_flags);
3661 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3662 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3663
3664 return len;
3665 }
3666
3667 /**
3668 * ipr_store_iopoll_weight - Change the adapter's polling mode
3669 * @dev: class device struct
3670 * @buf: buffer
3671 *
3672 * Return value:
3673 * number of bytes printed to buffer
3674 **/
ipr_store_iopoll_weight(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3675 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3676 struct device_attribute *attr,
3677 const char *buf, size_t count)
3678 {
3679 struct Scsi_Host *shost = class_to_shost(dev);
3680 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3681 unsigned long user_iopoll_weight;
3682 unsigned long lock_flags = 0;
3683 int i;
3684
3685 if (!ioa_cfg->sis64) {
3686 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3687 return -EINVAL;
3688 }
3689 if (kstrtoul(buf, 10, &user_iopoll_weight))
3690 return -EINVAL;
3691
3692 if (user_iopoll_weight > 256) {
3693 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3694 return -EINVAL;
3695 }
3696
3697 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3698 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3699 return strlen(buf);
3700 }
3701
3702 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3703 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3704 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3705 }
3706
3707 spin_lock_irqsave(shost->host_lock, lock_flags);
3708 ioa_cfg->iopoll_weight = user_iopoll_weight;
3709 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3710 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3711 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3712 ioa_cfg->iopoll_weight, ipr_iopoll);
3713 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3714 }
3715 }
3716 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3717
3718 return strlen(buf);
3719 }
3720
3721 static struct device_attribute ipr_iopoll_weight_attr = {
3722 .attr = {
3723 .name = "iopoll_weight",
3724 .mode = S_IRUGO | S_IWUSR,
3725 },
3726 .show = ipr_show_iopoll_weight,
3727 .store = ipr_store_iopoll_weight
3728 };
3729
3730 /**
3731 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3732 * @buf_len: buffer length
3733 *
3734 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3735 * list to use for microcode download
3736 *
3737 * Return value:
3738 * pointer to sglist / NULL on failure
3739 **/
ipr_alloc_ucode_buffer(int buf_len)3740 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3741 {
3742 int sg_size, order, bsize_elem, num_elem, i, j;
3743 struct ipr_sglist *sglist;
3744 struct scatterlist *scatterlist;
3745 struct page *page;
3746
3747 /* Get the minimum size per scatter/gather element */
3748 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3749
3750 /* Get the actual size per element */
3751 order = get_order(sg_size);
3752
3753 /* Determine the actual number of bytes per element */
3754 bsize_elem = PAGE_SIZE * (1 << order);
3755
3756 /* Determine the actual number of sg entries needed */
3757 if (buf_len % bsize_elem)
3758 num_elem = (buf_len / bsize_elem) + 1;
3759 else
3760 num_elem = buf_len / bsize_elem;
3761
3762 /* Allocate a scatter/gather list for the DMA */
3763 sglist = kzalloc(sizeof(struct ipr_sglist) +
3764 (sizeof(struct scatterlist) * (num_elem - 1)),
3765 GFP_KERNEL);
3766
3767 if (sglist == NULL) {
3768 ipr_trace;
3769 return NULL;
3770 }
3771
3772 scatterlist = sglist->scatterlist;
3773 sg_init_table(scatterlist, num_elem);
3774
3775 sglist->order = order;
3776 sglist->num_sg = num_elem;
3777
3778 /* Allocate a bunch of sg elements */
3779 for (i = 0; i < num_elem; i++) {
3780 page = alloc_pages(GFP_KERNEL, order);
3781 if (!page) {
3782 ipr_trace;
3783
3784 /* Free up what we already allocated */
3785 for (j = i - 1; j >= 0; j--)
3786 __free_pages(sg_page(&scatterlist[j]), order);
3787 kfree(sglist);
3788 return NULL;
3789 }
3790
3791 sg_set_page(&scatterlist[i], page, 0, 0);
3792 }
3793
3794 return sglist;
3795 }
3796
3797 /**
3798 * ipr_free_ucode_buffer - Frees a microcode download buffer
3799 * @p_dnld: scatter/gather list pointer
3800 *
3801 * Free a DMA'able ucode download buffer previously allocated with
3802 * ipr_alloc_ucode_buffer
3803 *
3804 * Return value:
3805 * nothing
3806 **/
ipr_free_ucode_buffer(struct ipr_sglist * sglist)3807 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3808 {
3809 int i;
3810
3811 for (i = 0; i < sglist->num_sg; i++)
3812 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3813
3814 kfree(sglist);
3815 }
3816
3817 /**
3818 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3819 * @sglist: scatter/gather list pointer
3820 * @buffer: buffer pointer
3821 * @len: buffer length
3822 *
3823 * Copy a microcode image from a user buffer into a buffer allocated by
3824 * ipr_alloc_ucode_buffer
3825 *
3826 * Return value:
3827 * 0 on success / other on failure
3828 **/
ipr_copy_ucode_buffer(struct ipr_sglist * sglist,u8 * buffer,u32 len)3829 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3830 u8 *buffer, u32 len)
3831 {
3832 int bsize_elem, i, result = 0;
3833 struct scatterlist *scatterlist;
3834 void *kaddr;
3835
3836 /* Determine the actual number of bytes per element */
3837 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3838
3839 scatterlist = sglist->scatterlist;
3840
3841 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3842 struct page *page = sg_page(&scatterlist[i]);
3843
3844 kaddr = kmap(page);
3845 memcpy(kaddr, buffer, bsize_elem);
3846 kunmap(page);
3847
3848 scatterlist[i].length = bsize_elem;
3849
3850 if (result != 0) {
3851 ipr_trace;
3852 return result;
3853 }
3854 }
3855
3856 if (len % bsize_elem) {
3857 struct page *page = sg_page(&scatterlist[i]);
3858
3859 kaddr = kmap(page);
3860 memcpy(kaddr, buffer, len % bsize_elem);
3861 kunmap(page);
3862
3863 scatterlist[i].length = len % bsize_elem;
3864 }
3865
3866 sglist->buffer_len = len;
3867 return result;
3868 }
3869
3870 /**
3871 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3872 * @ipr_cmd: ipr command struct
3873 * @sglist: scatter/gather list
3874 *
3875 * Builds a microcode download IOA data list (IOADL).
3876 *
3877 **/
ipr_build_ucode_ioadl64(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3878 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3879 struct ipr_sglist *sglist)
3880 {
3881 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3882 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3883 struct scatterlist *scatterlist = sglist->scatterlist;
3884 int i;
3885
3886 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3887 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3888 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3889
3890 ioarcb->ioadl_len =
3891 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3892 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3893 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3894 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3895 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3896 }
3897
3898 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3899 }
3900
3901 /**
3902 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3903 * @ipr_cmd: ipr command struct
3904 * @sglist: scatter/gather list
3905 *
3906 * Builds a microcode download IOA data list (IOADL).
3907 *
3908 **/
ipr_build_ucode_ioadl(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3909 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3910 struct ipr_sglist *sglist)
3911 {
3912 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3913 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3914 struct scatterlist *scatterlist = sglist->scatterlist;
3915 int i;
3916
3917 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3918 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3919 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3920
3921 ioarcb->ioadl_len =
3922 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3923
3924 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3925 ioadl[i].flags_and_data_len =
3926 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3927 ioadl[i].address =
3928 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3929 }
3930
3931 ioadl[i-1].flags_and_data_len |=
3932 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3933 }
3934
3935 /**
3936 * ipr_update_ioa_ucode - Update IOA's microcode
3937 * @ioa_cfg: ioa config struct
3938 * @sglist: scatter/gather list
3939 *
3940 * Initiate an adapter reset to update the IOA's microcode
3941 *
3942 * Return value:
3943 * 0 on success / -EIO on failure
3944 **/
ipr_update_ioa_ucode(struct ipr_ioa_cfg * ioa_cfg,struct ipr_sglist * sglist)3945 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3946 struct ipr_sglist *sglist)
3947 {
3948 unsigned long lock_flags;
3949
3950 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3951 while (ioa_cfg->in_reset_reload) {
3952 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3953 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3954 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3955 }
3956
3957 if (ioa_cfg->ucode_sglist) {
3958 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3959 dev_err(&ioa_cfg->pdev->dev,
3960 "Microcode download already in progress\n");
3961 return -EIO;
3962 }
3963
3964 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3965 sglist->scatterlist, sglist->num_sg,
3966 DMA_TO_DEVICE);
3967
3968 if (!sglist->num_dma_sg) {
3969 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3970 dev_err(&ioa_cfg->pdev->dev,
3971 "Failed to map microcode download buffer!\n");
3972 return -EIO;
3973 }
3974
3975 ioa_cfg->ucode_sglist = sglist;
3976 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3977 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3978 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3979
3980 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3981 ioa_cfg->ucode_sglist = NULL;
3982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3983 return 0;
3984 }
3985
3986 /**
3987 * ipr_store_update_fw - Update the firmware on the adapter
3988 * @class_dev: device struct
3989 * @buf: buffer
3990 * @count: buffer size
3991 *
3992 * This function will update the firmware on the adapter.
3993 *
3994 * Return value:
3995 * count on success / other on failure
3996 **/
ipr_store_update_fw(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3997 static ssize_t ipr_store_update_fw(struct device *dev,
3998 struct device_attribute *attr,
3999 const char *buf, size_t count)
4000 {
4001 struct Scsi_Host *shost = class_to_shost(dev);
4002 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4003 struct ipr_ucode_image_header *image_hdr;
4004 const struct firmware *fw_entry;
4005 struct ipr_sglist *sglist;
4006 char fname[100];
4007 char *src;
4008 char *endline;
4009 int result, dnld_size;
4010
4011 if (!capable(CAP_SYS_ADMIN))
4012 return -EACCES;
4013
4014 snprintf(fname, sizeof(fname), "%s", buf);
4015
4016 endline = strchr(fname, '\n');
4017 if (endline)
4018 *endline = '\0';
4019
4020 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4021 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4022 return -EIO;
4023 }
4024
4025 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4026
4027 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4028 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4029 sglist = ipr_alloc_ucode_buffer(dnld_size);
4030
4031 if (!sglist) {
4032 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4033 release_firmware(fw_entry);
4034 return -ENOMEM;
4035 }
4036
4037 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4038
4039 if (result) {
4040 dev_err(&ioa_cfg->pdev->dev,
4041 "Microcode buffer copy to DMA buffer failed\n");
4042 goto out;
4043 }
4044
4045 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4046
4047 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4048
4049 if (!result)
4050 result = count;
4051 out:
4052 ipr_free_ucode_buffer(sglist);
4053 release_firmware(fw_entry);
4054 return result;
4055 }
4056
4057 static struct device_attribute ipr_update_fw_attr = {
4058 .attr = {
4059 .name = "update_fw",
4060 .mode = S_IWUSR,
4061 },
4062 .store = ipr_store_update_fw
4063 };
4064
4065 /**
4066 * ipr_show_fw_type - Show the adapter's firmware type.
4067 * @dev: class device struct
4068 * @buf: buffer
4069 *
4070 * Return value:
4071 * number of bytes printed to buffer
4072 **/
ipr_show_fw_type(struct device * dev,struct device_attribute * attr,char * buf)4073 static ssize_t ipr_show_fw_type(struct device *dev,
4074 struct device_attribute *attr, char *buf)
4075 {
4076 struct Scsi_Host *shost = class_to_shost(dev);
4077 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4078 unsigned long lock_flags = 0;
4079 int len;
4080
4081 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4082 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4083 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4084 return len;
4085 }
4086
4087 static struct device_attribute ipr_ioa_fw_type_attr = {
4088 .attr = {
4089 .name = "fw_type",
4090 .mode = S_IRUGO,
4091 },
4092 .show = ipr_show_fw_type
4093 };
4094
4095 static struct device_attribute *ipr_ioa_attrs[] = {
4096 &ipr_fw_version_attr,
4097 &ipr_log_level_attr,
4098 &ipr_diagnostics_attr,
4099 &ipr_ioa_state_attr,
4100 &ipr_ioa_reset_attr,
4101 &ipr_update_fw_attr,
4102 &ipr_ioa_fw_type_attr,
4103 &ipr_iopoll_weight_attr,
4104 NULL,
4105 };
4106
4107 #ifdef CONFIG_SCSI_IPR_DUMP
4108 /**
4109 * ipr_read_dump - Dump the adapter
4110 * @filp: open sysfs file
4111 * @kobj: kobject struct
4112 * @bin_attr: bin_attribute struct
4113 * @buf: buffer
4114 * @off: offset
4115 * @count: buffer size
4116 *
4117 * Return value:
4118 * number of bytes printed to buffer
4119 **/
ipr_read_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4120 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4121 struct bin_attribute *bin_attr,
4122 char *buf, loff_t off, size_t count)
4123 {
4124 struct device *cdev = container_of(kobj, struct device, kobj);
4125 struct Scsi_Host *shost = class_to_shost(cdev);
4126 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4127 struct ipr_dump *dump;
4128 unsigned long lock_flags = 0;
4129 char *src;
4130 int len, sdt_end;
4131 size_t rc = count;
4132
4133 if (!capable(CAP_SYS_ADMIN))
4134 return -EACCES;
4135
4136 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4137 dump = ioa_cfg->dump;
4138
4139 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4141 return 0;
4142 }
4143 kref_get(&dump->kref);
4144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4145
4146 if (off > dump->driver_dump.hdr.len) {
4147 kref_put(&dump->kref, ipr_release_dump);
4148 return 0;
4149 }
4150
4151 if (off + count > dump->driver_dump.hdr.len) {
4152 count = dump->driver_dump.hdr.len - off;
4153 rc = count;
4154 }
4155
4156 if (count && off < sizeof(dump->driver_dump)) {
4157 if (off + count > sizeof(dump->driver_dump))
4158 len = sizeof(dump->driver_dump) - off;
4159 else
4160 len = count;
4161 src = (u8 *)&dump->driver_dump + off;
4162 memcpy(buf, src, len);
4163 buf += len;
4164 off += len;
4165 count -= len;
4166 }
4167
4168 off -= sizeof(dump->driver_dump);
4169
4170 if (ioa_cfg->sis64)
4171 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4172 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4173 sizeof(struct ipr_sdt_entry));
4174 else
4175 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4176 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4177
4178 if (count && off < sdt_end) {
4179 if (off + count > sdt_end)
4180 len = sdt_end - off;
4181 else
4182 len = count;
4183 src = (u8 *)&dump->ioa_dump + off;
4184 memcpy(buf, src, len);
4185 buf += len;
4186 off += len;
4187 count -= len;
4188 }
4189
4190 off -= sdt_end;
4191
4192 while (count) {
4193 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4194 len = PAGE_ALIGN(off) - off;
4195 else
4196 len = count;
4197 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4198 src += off & ~PAGE_MASK;
4199 memcpy(buf, src, len);
4200 buf += len;
4201 off += len;
4202 count -= len;
4203 }
4204
4205 kref_put(&dump->kref, ipr_release_dump);
4206 return rc;
4207 }
4208
4209 /**
4210 * ipr_alloc_dump - Prepare for adapter dump
4211 * @ioa_cfg: ioa config struct
4212 *
4213 * Return value:
4214 * 0 on success / other on failure
4215 **/
ipr_alloc_dump(struct ipr_ioa_cfg * ioa_cfg)4216 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4217 {
4218 struct ipr_dump *dump;
4219 __be32 **ioa_data;
4220 unsigned long lock_flags = 0;
4221
4222 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4223
4224 if (!dump) {
4225 ipr_err("Dump memory allocation failed\n");
4226 return -ENOMEM;
4227 }
4228
4229 if (ioa_cfg->sis64)
4230 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4231 else
4232 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4233
4234 if (!ioa_data) {
4235 ipr_err("Dump memory allocation failed\n");
4236 kfree(dump);
4237 return -ENOMEM;
4238 }
4239
4240 dump->ioa_dump.ioa_data = ioa_data;
4241
4242 kref_init(&dump->kref);
4243 dump->ioa_cfg = ioa_cfg;
4244
4245 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4246
4247 if (INACTIVE != ioa_cfg->sdt_state) {
4248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249 vfree(dump->ioa_dump.ioa_data);
4250 kfree(dump);
4251 return 0;
4252 }
4253
4254 ioa_cfg->dump = dump;
4255 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4256 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4257 ioa_cfg->dump_taken = 1;
4258 schedule_work(&ioa_cfg->work_q);
4259 }
4260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4261
4262 return 0;
4263 }
4264
4265 /**
4266 * ipr_free_dump - Free adapter dump memory
4267 * @ioa_cfg: ioa config struct
4268 *
4269 * Return value:
4270 * 0 on success / other on failure
4271 **/
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4272 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4273 {
4274 struct ipr_dump *dump;
4275 unsigned long lock_flags = 0;
4276
4277 ENTER;
4278
4279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4280 dump = ioa_cfg->dump;
4281 if (!dump) {
4282 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4283 return 0;
4284 }
4285
4286 ioa_cfg->dump = NULL;
4287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4288
4289 kref_put(&dump->kref, ipr_release_dump);
4290
4291 LEAVE;
4292 return 0;
4293 }
4294
4295 /**
4296 * ipr_write_dump - Setup dump state of adapter
4297 * @filp: open sysfs file
4298 * @kobj: kobject struct
4299 * @bin_attr: bin_attribute struct
4300 * @buf: buffer
4301 * @off: offset
4302 * @count: buffer size
4303 *
4304 * Return value:
4305 * number of bytes printed to buffer
4306 **/
ipr_write_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4307 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4308 struct bin_attribute *bin_attr,
4309 char *buf, loff_t off, size_t count)
4310 {
4311 struct device *cdev = container_of(kobj, struct device, kobj);
4312 struct Scsi_Host *shost = class_to_shost(cdev);
4313 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4314 int rc;
4315
4316 if (!capable(CAP_SYS_ADMIN))
4317 return -EACCES;
4318
4319 if (buf[0] == '1')
4320 rc = ipr_alloc_dump(ioa_cfg);
4321 else if (buf[0] == '0')
4322 rc = ipr_free_dump(ioa_cfg);
4323 else
4324 return -EINVAL;
4325
4326 if (rc)
4327 return rc;
4328 else
4329 return count;
4330 }
4331
4332 static struct bin_attribute ipr_dump_attr = {
4333 .attr = {
4334 .name = "dump",
4335 .mode = S_IRUSR | S_IWUSR,
4336 },
4337 .size = 0,
4338 .read = ipr_read_dump,
4339 .write = ipr_write_dump
4340 };
4341 #else
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4342 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4343 #endif
4344
4345 /**
4346 * ipr_change_queue_depth - Change the device's queue depth
4347 * @sdev: scsi device struct
4348 * @qdepth: depth to set
4349 * @reason: calling context
4350 *
4351 * Return value:
4352 * actual depth set
4353 **/
ipr_change_queue_depth(struct scsi_device * sdev,int qdepth)4354 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4355 {
4356 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4357 struct ipr_resource_entry *res;
4358 unsigned long lock_flags = 0;
4359
4360 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4361 res = (struct ipr_resource_entry *)sdev->hostdata;
4362
4363 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4364 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4366
4367 scsi_change_queue_depth(sdev, qdepth);
4368 return sdev->queue_depth;
4369 }
4370
4371 /**
4372 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4373 * @dev: device struct
4374 * @attr: device attribute structure
4375 * @buf: buffer
4376 *
4377 * Return value:
4378 * number of bytes printed to buffer
4379 **/
ipr_show_adapter_handle(struct device * dev,struct device_attribute * attr,char * buf)4380 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4381 {
4382 struct scsi_device *sdev = to_scsi_device(dev);
4383 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4384 struct ipr_resource_entry *res;
4385 unsigned long lock_flags = 0;
4386 ssize_t len = -ENXIO;
4387
4388 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4389 res = (struct ipr_resource_entry *)sdev->hostdata;
4390 if (res)
4391 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4393 return len;
4394 }
4395
4396 static struct device_attribute ipr_adapter_handle_attr = {
4397 .attr = {
4398 .name = "adapter_handle",
4399 .mode = S_IRUSR,
4400 },
4401 .show = ipr_show_adapter_handle
4402 };
4403
4404 /**
4405 * ipr_show_resource_path - Show the resource path or the resource address for
4406 * this device.
4407 * @dev: device struct
4408 * @attr: device attribute structure
4409 * @buf: buffer
4410 *
4411 * Return value:
4412 * number of bytes printed to buffer
4413 **/
ipr_show_resource_path(struct device * dev,struct device_attribute * attr,char * buf)4414 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4415 {
4416 struct scsi_device *sdev = to_scsi_device(dev);
4417 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4418 struct ipr_resource_entry *res;
4419 unsigned long lock_flags = 0;
4420 ssize_t len = -ENXIO;
4421 char buffer[IPR_MAX_RES_PATH_LENGTH];
4422
4423 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4424 res = (struct ipr_resource_entry *)sdev->hostdata;
4425 if (res && ioa_cfg->sis64)
4426 len = snprintf(buf, PAGE_SIZE, "%s\n",
4427 __ipr_format_res_path(res->res_path, buffer,
4428 sizeof(buffer)));
4429 else if (res)
4430 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4431 res->bus, res->target, res->lun);
4432
4433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4434 return len;
4435 }
4436
4437 static struct device_attribute ipr_resource_path_attr = {
4438 .attr = {
4439 .name = "resource_path",
4440 .mode = S_IRUGO,
4441 },
4442 .show = ipr_show_resource_path
4443 };
4444
4445 /**
4446 * ipr_show_device_id - Show the device_id for this device.
4447 * @dev: device struct
4448 * @attr: device attribute structure
4449 * @buf: buffer
4450 *
4451 * Return value:
4452 * number of bytes printed to buffer
4453 **/
ipr_show_device_id(struct device * dev,struct device_attribute * attr,char * buf)4454 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4455 {
4456 struct scsi_device *sdev = to_scsi_device(dev);
4457 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4458 struct ipr_resource_entry *res;
4459 unsigned long lock_flags = 0;
4460 ssize_t len = -ENXIO;
4461
4462 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4463 res = (struct ipr_resource_entry *)sdev->hostdata;
4464 if (res && ioa_cfg->sis64)
4465 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4466 else if (res)
4467 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4468
4469 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4470 return len;
4471 }
4472
4473 static struct device_attribute ipr_device_id_attr = {
4474 .attr = {
4475 .name = "device_id",
4476 .mode = S_IRUGO,
4477 },
4478 .show = ipr_show_device_id
4479 };
4480
4481 /**
4482 * ipr_show_resource_type - Show the resource type for this device.
4483 * @dev: device struct
4484 * @attr: device attribute structure
4485 * @buf: buffer
4486 *
4487 * Return value:
4488 * number of bytes printed to buffer
4489 **/
ipr_show_resource_type(struct device * dev,struct device_attribute * attr,char * buf)4490 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4491 {
4492 struct scsi_device *sdev = to_scsi_device(dev);
4493 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4494 struct ipr_resource_entry *res;
4495 unsigned long lock_flags = 0;
4496 ssize_t len = -ENXIO;
4497
4498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4499 res = (struct ipr_resource_entry *)sdev->hostdata;
4500
4501 if (res)
4502 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4503
4504 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4505 return len;
4506 }
4507
4508 static struct device_attribute ipr_resource_type_attr = {
4509 .attr = {
4510 .name = "resource_type",
4511 .mode = S_IRUGO,
4512 },
4513 .show = ipr_show_resource_type
4514 };
4515
4516 /**
4517 * ipr_show_raw_mode - Show the adapter's raw mode
4518 * @dev: class device struct
4519 * @buf: buffer
4520 *
4521 * Return value:
4522 * number of bytes printed to buffer
4523 **/
ipr_show_raw_mode(struct device * dev,struct device_attribute * attr,char * buf)4524 static ssize_t ipr_show_raw_mode(struct device *dev,
4525 struct device_attribute *attr, char *buf)
4526 {
4527 struct scsi_device *sdev = to_scsi_device(dev);
4528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4529 struct ipr_resource_entry *res;
4530 unsigned long lock_flags = 0;
4531 ssize_t len;
4532
4533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4534 res = (struct ipr_resource_entry *)sdev->hostdata;
4535 if (res)
4536 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4537 else
4538 len = -ENXIO;
4539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4540 return len;
4541 }
4542
4543 /**
4544 * ipr_store_raw_mode - Change the adapter's raw mode
4545 * @dev: class device struct
4546 * @buf: buffer
4547 *
4548 * Return value:
4549 * number of bytes printed to buffer
4550 **/
ipr_store_raw_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4551 static ssize_t ipr_store_raw_mode(struct device *dev,
4552 struct device_attribute *attr,
4553 const char *buf, size_t count)
4554 {
4555 struct scsi_device *sdev = to_scsi_device(dev);
4556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4557 struct ipr_resource_entry *res;
4558 unsigned long lock_flags = 0;
4559 ssize_t len;
4560
4561 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4562 res = (struct ipr_resource_entry *)sdev->hostdata;
4563 if (res) {
4564 if (ipr_is_af_dasd_device(res)) {
4565 res->raw_mode = simple_strtoul(buf, NULL, 10);
4566 len = strlen(buf);
4567 if (res->sdev)
4568 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4569 res->raw_mode ? "enabled" : "disabled");
4570 } else
4571 len = -EINVAL;
4572 } else
4573 len = -ENXIO;
4574 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4575 return len;
4576 }
4577
4578 static struct device_attribute ipr_raw_mode_attr = {
4579 .attr = {
4580 .name = "raw_mode",
4581 .mode = S_IRUGO | S_IWUSR,
4582 },
4583 .show = ipr_show_raw_mode,
4584 .store = ipr_store_raw_mode
4585 };
4586
4587 static struct device_attribute *ipr_dev_attrs[] = {
4588 &ipr_adapter_handle_attr,
4589 &ipr_resource_path_attr,
4590 &ipr_device_id_attr,
4591 &ipr_resource_type_attr,
4592 &ipr_raw_mode_attr,
4593 NULL,
4594 };
4595
4596 /**
4597 * ipr_biosparam - Return the HSC mapping
4598 * @sdev: scsi device struct
4599 * @block_device: block device pointer
4600 * @capacity: capacity of the device
4601 * @parm: Array containing returned HSC values.
4602 *
4603 * This function generates the HSC parms that fdisk uses.
4604 * We want to make sure we return something that places partitions
4605 * on 4k boundaries for best performance with the IOA.
4606 *
4607 * Return value:
4608 * 0 on success
4609 **/
ipr_biosparam(struct scsi_device * sdev,struct block_device * block_device,sector_t capacity,int * parm)4610 static int ipr_biosparam(struct scsi_device *sdev,
4611 struct block_device *block_device,
4612 sector_t capacity, int *parm)
4613 {
4614 int heads, sectors;
4615 sector_t cylinders;
4616
4617 heads = 128;
4618 sectors = 32;
4619
4620 cylinders = capacity;
4621 sector_div(cylinders, (128 * 32));
4622
4623 /* return result */
4624 parm[0] = heads;
4625 parm[1] = sectors;
4626 parm[2] = cylinders;
4627
4628 return 0;
4629 }
4630
4631 /**
4632 * ipr_find_starget - Find target based on bus/target.
4633 * @starget: scsi target struct
4634 *
4635 * Return value:
4636 * resource entry pointer if found / NULL if not found
4637 **/
ipr_find_starget(struct scsi_target * starget)4638 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4639 {
4640 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4641 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4642 struct ipr_resource_entry *res;
4643
4644 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4645 if ((res->bus == starget->channel) &&
4646 (res->target == starget->id)) {
4647 return res;
4648 }
4649 }
4650
4651 return NULL;
4652 }
4653
4654 static struct ata_port_info sata_port_info;
4655
4656 /**
4657 * ipr_target_alloc - Prepare for commands to a SCSI target
4658 * @starget: scsi target struct
4659 *
4660 * If the device is a SATA device, this function allocates an
4661 * ATA port with libata, else it does nothing.
4662 *
4663 * Return value:
4664 * 0 on success / non-0 on failure
4665 **/
ipr_target_alloc(struct scsi_target * starget)4666 static int ipr_target_alloc(struct scsi_target *starget)
4667 {
4668 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4669 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4670 struct ipr_sata_port *sata_port;
4671 struct ata_port *ap;
4672 struct ipr_resource_entry *res;
4673 unsigned long lock_flags;
4674
4675 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4676 res = ipr_find_starget(starget);
4677 starget->hostdata = NULL;
4678
4679 if (res && ipr_is_gata(res)) {
4680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4681 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4682 if (!sata_port)
4683 return -ENOMEM;
4684
4685 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4686 if (ap) {
4687 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4688 sata_port->ioa_cfg = ioa_cfg;
4689 sata_port->ap = ap;
4690 sata_port->res = res;
4691
4692 res->sata_port = sata_port;
4693 ap->private_data = sata_port;
4694 starget->hostdata = sata_port;
4695 } else {
4696 kfree(sata_port);
4697 return -ENOMEM;
4698 }
4699 }
4700 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4701
4702 return 0;
4703 }
4704
4705 /**
4706 * ipr_target_destroy - Destroy a SCSI target
4707 * @starget: scsi target struct
4708 *
4709 * If the device was a SATA device, this function frees the libata
4710 * ATA port, else it does nothing.
4711 *
4712 **/
ipr_target_destroy(struct scsi_target * starget)4713 static void ipr_target_destroy(struct scsi_target *starget)
4714 {
4715 struct ipr_sata_port *sata_port = starget->hostdata;
4716 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4717 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4718
4719 if (ioa_cfg->sis64) {
4720 if (!ipr_find_starget(starget)) {
4721 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4722 clear_bit(starget->id, ioa_cfg->array_ids);
4723 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4724 clear_bit(starget->id, ioa_cfg->vset_ids);
4725 else if (starget->channel == 0)
4726 clear_bit(starget->id, ioa_cfg->target_ids);
4727 }
4728 }
4729
4730 if (sata_port) {
4731 starget->hostdata = NULL;
4732 ata_sas_port_destroy(sata_port->ap);
4733 kfree(sata_port);
4734 }
4735 }
4736
4737 /**
4738 * ipr_find_sdev - Find device based on bus/target/lun.
4739 * @sdev: scsi device struct
4740 *
4741 * Return value:
4742 * resource entry pointer if found / NULL if not found
4743 **/
ipr_find_sdev(struct scsi_device * sdev)4744 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4745 {
4746 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4747 struct ipr_resource_entry *res;
4748
4749 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4750 if ((res->bus == sdev->channel) &&
4751 (res->target == sdev->id) &&
4752 (res->lun == sdev->lun))
4753 return res;
4754 }
4755
4756 return NULL;
4757 }
4758
4759 /**
4760 * ipr_slave_destroy - Unconfigure a SCSI device
4761 * @sdev: scsi device struct
4762 *
4763 * Return value:
4764 * nothing
4765 **/
ipr_slave_destroy(struct scsi_device * sdev)4766 static void ipr_slave_destroy(struct scsi_device *sdev)
4767 {
4768 struct ipr_resource_entry *res;
4769 struct ipr_ioa_cfg *ioa_cfg;
4770 unsigned long lock_flags = 0;
4771
4772 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4773
4774 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4775 res = (struct ipr_resource_entry *) sdev->hostdata;
4776 if (res) {
4777 if (res->sata_port)
4778 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4779 sdev->hostdata = NULL;
4780 res->sdev = NULL;
4781 res->sata_port = NULL;
4782 }
4783 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4784 }
4785
4786 /**
4787 * ipr_slave_configure - Configure a SCSI device
4788 * @sdev: scsi device struct
4789 *
4790 * This function configures the specified scsi device.
4791 *
4792 * Return value:
4793 * 0 on success
4794 **/
ipr_slave_configure(struct scsi_device * sdev)4795 static int ipr_slave_configure(struct scsi_device *sdev)
4796 {
4797 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4798 struct ipr_resource_entry *res;
4799 struct ata_port *ap = NULL;
4800 unsigned long lock_flags = 0;
4801 char buffer[IPR_MAX_RES_PATH_LENGTH];
4802
4803 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4804 res = sdev->hostdata;
4805 if (res) {
4806 if (ipr_is_af_dasd_device(res))
4807 sdev->type = TYPE_RAID;
4808 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4809 sdev->scsi_level = 4;
4810 sdev->no_uld_attach = 1;
4811 }
4812 if (ipr_is_vset_device(res)) {
4813 sdev->scsi_level = SCSI_SPC_3;
4814 blk_queue_rq_timeout(sdev->request_queue,
4815 IPR_VSET_RW_TIMEOUT);
4816 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4817 }
4818 if (ipr_is_gata(res) && res->sata_port)
4819 ap = res->sata_port->ap;
4820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4821
4822 if (ap) {
4823 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4824 ata_sas_slave_configure(sdev, ap);
4825 }
4826
4827 if (ioa_cfg->sis64)
4828 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4829 ipr_format_res_path(ioa_cfg,
4830 res->res_path, buffer, sizeof(buffer)));
4831 return 0;
4832 }
4833 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4834 return 0;
4835 }
4836
4837 /**
4838 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4839 * @sdev: scsi device struct
4840 *
4841 * This function initializes an ATA port so that future commands
4842 * sent through queuecommand will work.
4843 *
4844 * Return value:
4845 * 0 on success
4846 **/
ipr_ata_slave_alloc(struct scsi_device * sdev)4847 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4848 {
4849 struct ipr_sata_port *sata_port = NULL;
4850 int rc = -ENXIO;
4851
4852 ENTER;
4853 if (sdev->sdev_target)
4854 sata_port = sdev->sdev_target->hostdata;
4855 if (sata_port) {
4856 rc = ata_sas_port_init(sata_port->ap);
4857 if (rc == 0)
4858 rc = ata_sas_sync_probe(sata_port->ap);
4859 }
4860
4861 if (rc)
4862 ipr_slave_destroy(sdev);
4863
4864 LEAVE;
4865 return rc;
4866 }
4867
4868 /**
4869 * ipr_slave_alloc - Prepare for commands to a device.
4870 * @sdev: scsi device struct
4871 *
4872 * This function saves a pointer to the resource entry
4873 * in the scsi device struct if the device exists. We
4874 * can then use this pointer in ipr_queuecommand when
4875 * handling new commands.
4876 *
4877 * Return value:
4878 * 0 on success / -ENXIO if device does not exist
4879 **/
ipr_slave_alloc(struct scsi_device * sdev)4880 static int ipr_slave_alloc(struct scsi_device *sdev)
4881 {
4882 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4883 struct ipr_resource_entry *res;
4884 unsigned long lock_flags;
4885 int rc = -ENXIO;
4886
4887 sdev->hostdata = NULL;
4888
4889 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4890
4891 res = ipr_find_sdev(sdev);
4892 if (res) {
4893 res->sdev = sdev;
4894 res->add_to_ml = 0;
4895 res->in_erp = 0;
4896 sdev->hostdata = res;
4897 if (!ipr_is_naca_model(res))
4898 res->needs_sync_complete = 1;
4899 rc = 0;
4900 if (ipr_is_gata(res)) {
4901 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4902 return ipr_ata_slave_alloc(sdev);
4903 }
4904 }
4905
4906 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4907
4908 return rc;
4909 }
4910
4911 /**
4912 * ipr_match_lun - Match function for specified LUN
4913 * @ipr_cmd: ipr command struct
4914 * @device: device to match (sdev)
4915 *
4916 * Returns:
4917 * 1 if command matches sdev / 0 if command does not match sdev
4918 **/
ipr_match_lun(struct ipr_cmnd * ipr_cmd,void * device)4919 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4920 {
4921 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4922 return 1;
4923 return 0;
4924 }
4925
4926 /**
4927 * ipr_wait_for_ops - Wait for matching commands to complete
4928 * @ipr_cmd: ipr command struct
4929 * @device: device to match (sdev)
4930 * @match: match function to use
4931 *
4932 * Returns:
4933 * SUCCESS / FAILED
4934 **/
ipr_wait_for_ops(struct ipr_ioa_cfg * ioa_cfg,void * device,int (* match)(struct ipr_cmnd *,void *))4935 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4936 int (*match)(struct ipr_cmnd *, void *))
4937 {
4938 struct ipr_cmnd *ipr_cmd;
4939 int wait;
4940 unsigned long flags;
4941 struct ipr_hrr_queue *hrrq;
4942 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4943 DECLARE_COMPLETION_ONSTACK(comp);
4944
4945 ENTER;
4946 do {
4947 wait = 0;
4948
4949 for_each_hrrq(hrrq, ioa_cfg) {
4950 spin_lock_irqsave(hrrq->lock, flags);
4951 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4952 if (match(ipr_cmd, device)) {
4953 ipr_cmd->eh_comp = ∁
4954 wait++;
4955 }
4956 }
4957 spin_unlock_irqrestore(hrrq->lock, flags);
4958 }
4959
4960 if (wait) {
4961 timeout = wait_for_completion_timeout(&comp, timeout);
4962
4963 if (!timeout) {
4964 wait = 0;
4965
4966 for_each_hrrq(hrrq, ioa_cfg) {
4967 spin_lock_irqsave(hrrq->lock, flags);
4968 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4969 if (match(ipr_cmd, device)) {
4970 ipr_cmd->eh_comp = NULL;
4971 wait++;
4972 }
4973 }
4974 spin_unlock_irqrestore(hrrq->lock, flags);
4975 }
4976
4977 if (wait)
4978 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4979 LEAVE;
4980 return wait ? FAILED : SUCCESS;
4981 }
4982 }
4983 } while (wait);
4984
4985 LEAVE;
4986 return SUCCESS;
4987 }
4988
ipr_eh_host_reset(struct scsi_cmnd * cmd)4989 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4990 {
4991 struct ipr_ioa_cfg *ioa_cfg;
4992 unsigned long lock_flags = 0;
4993 int rc = SUCCESS;
4994
4995 ENTER;
4996 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4997 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4998
4999 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5000 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5001 dev_err(&ioa_cfg->pdev->dev,
5002 "Adapter being reset as a result of error recovery.\n");
5003
5004 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5005 ioa_cfg->sdt_state = GET_DUMP;
5006 }
5007
5008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5009 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5010 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5011
5012 /* If we got hit with a host reset while we were already resetting
5013 the adapter for some reason, and the reset failed. */
5014 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5015 ipr_trace;
5016 rc = FAILED;
5017 }
5018
5019 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5020 LEAVE;
5021 return rc;
5022 }
5023
5024 /**
5025 * ipr_device_reset - Reset the device
5026 * @ioa_cfg: ioa config struct
5027 * @res: resource entry struct
5028 *
5029 * This function issues a device reset to the affected device.
5030 * If the device is a SCSI device, a LUN reset will be sent
5031 * to the device first. If that does not work, a target reset
5032 * will be sent. If the device is a SATA device, a PHY reset will
5033 * be sent.
5034 *
5035 * Return value:
5036 * 0 on success / non-zero on failure
5037 **/
ipr_device_reset(struct ipr_ioa_cfg * ioa_cfg,struct ipr_resource_entry * res)5038 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5039 struct ipr_resource_entry *res)
5040 {
5041 struct ipr_cmnd *ipr_cmd;
5042 struct ipr_ioarcb *ioarcb;
5043 struct ipr_cmd_pkt *cmd_pkt;
5044 struct ipr_ioarcb_ata_regs *regs;
5045 u32 ioasc;
5046
5047 ENTER;
5048 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5049 ioarcb = &ipr_cmd->ioarcb;
5050 cmd_pkt = &ioarcb->cmd_pkt;
5051
5052 if (ipr_cmd->ioa_cfg->sis64) {
5053 regs = &ipr_cmd->i.ata_ioadl.regs;
5054 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5055 } else
5056 regs = &ioarcb->u.add_data.u.regs;
5057
5058 ioarcb->res_handle = res->res_handle;
5059 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5060 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5061 if (ipr_is_gata(res)) {
5062 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5063 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5064 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5065 }
5066
5067 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5068 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5069 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5070 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5071 if (ipr_cmd->ioa_cfg->sis64)
5072 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5073 sizeof(struct ipr_ioasa_gata));
5074 else
5075 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5076 sizeof(struct ipr_ioasa_gata));
5077 }
5078
5079 LEAVE;
5080 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5081 }
5082
5083 /**
5084 * ipr_sata_reset - Reset the SATA port
5085 * @link: SATA link to reset
5086 * @classes: class of the attached device
5087 *
5088 * This function issues a SATA phy reset to the affected ATA link.
5089 *
5090 * Return value:
5091 * 0 on success / non-zero on failure
5092 **/
ipr_sata_reset(struct ata_link * link,unsigned int * classes,unsigned long deadline)5093 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5094 unsigned long deadline)
5095 {
5096 struct ipr_sata_port *sata_port = link->ap->private_data;
5097 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5098 struct ipr_resource_entry *res;
5099 unsigned long lock_flags = 0;
5100 int rc = -ENXIO;
5101
5102 ENTER;
5103 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5104 while (ioa_cfg->in_reset_reload) {
5105 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5106 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5108 }
5109
5110 res = sata_port->res;
5111 if (res) {
5112 rc = ipr_device_reset(ioa_cfg, res);
5113 *classes = res->ata_class;
5114 }
5115
5116 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5117 LEAVE;
5118 return rc;
5119 }
5120
5121 /**
5122 * ipr_eh_dev_reset - Reset the device
5123 * @scsi_cmd: scsi command struct
5124 *
5125 * This function issues a device reset to the affected device.
5126 * A LUN reset will be sent to the device first. If that does
5127 * not work, a target reset will be sent.
5128 *
5129 * Return value:
5130 * SUCCESS / FAILED
5131 **/
__ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)5132 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5133 {
5134 struct ipr_cmnd *ipr_cmd;
5135 struct ipr_ioa_cfg *ioa_cfg;
5136 struct ipr_resource_entry *res;
5137 struct ata_port *ap;
5138 int rc = 0;
5139 struct ipr_hrr_queue *hrrq;
5140
5141 ENTER;
5142 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5143 res = scsi_cmd->device->hostdata;
5144
5145 if (!res)
5146 return FAILED;
5147
5148 /*
5149 * If we are currently going through reset/reload, return failed. This will force the
5150 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5151 * reset to complete
5152 */
5153 if (ioa_cfg->in_reset_reload)
5154 return FAILED;
5155 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5156 return FAILED;
5157
5158 for_each_hrrq(hrrq, ioa_cfg) {
5159 spin_lock(&hrrq->_lock);
5160 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5161 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5162 if (ipr_cmd->scsi_cmd)
5163 ipr_cmd->done = ipr_scsi_eh_done;
5164 if (ipr_cmd->qc)
5165 ipr_cmd->done = ipr_sata_eh_done;
5166 if (ipr_cmd->qc &&
5167 !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5168 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5169 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5170 }
5171 }
5172 }
5173 spin_unlock(&hrrq->_lock);
5174 }
5175 res->resetting_device = 1;
5176 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5177
5178 if (ipr_is_gata(res) && res->sata_port) {
5179 ap = res->sata_port->ap;
5180 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5181 ata_std_error_handler(ap);
5182 spin_lock_irq(scsi_cmd->device->host->host_lock);
5183
5184 for_each_hrrq(hrrq, ioa_cfg) {
5185 spin_lock(&hrrq->_lock);
5186 list_for_each_entry(ipr_cmd,
5187 &hrrq->hrrq_pending_q, queue) {
5188 if (ipr_cmd->ioarcb.res_handle ==
5189 res->res_handle) {
5190 rc = -EIO;
5191 break;
5192 }
5193 }
5194 spin_unlock(&hrrq->_lock);
5195 }
5196 } else
5197 rc = ipr_device_reset(ioa_cfg, res);
5198 res->resetting_device = 0;
5199 res->reset_occurred = 1;
5200
5201 LEAVE;
5202 return rc ? FAILED : SUCCESS;
5203 }
5204
ipr_eh_dev_reset(struct scsi_cmnd * cmd)5205 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5206 {
5207 int rc;
5208 struct ipr_ioa_cfg *ioa_cfg;
5209
5210 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5211
5212 spin_lock_irq(cmd->device->host->host_lock);
5213 rc = __ipr_eh_dev_reset(cmd);
5214 spin_unlock_irq(cmd->device->host->host_lock);
5215
5216 if (rc == SUCCESS)
5217 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5218
5219 return rc;
5220 }
5221
5222 /**
5223 * ipr_bus_reset_done - Op done function for bus reset.
5224 * @ipr_cmd: ipr command struct
5225 *
5226 * This function is the op done function for a bus reset
5227 *
5228 * Return value:
5229 * none
5230 **/
ipr_bus_reset_done(struct ipr_cmnd * ipr_cmd)5231 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5232 {
5233 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5234 struct ipr_resource_entry *res;
5235
5236 ENTER;
5237 if (!ioa_cfg->sis64)
5238 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5239 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5240 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5241 break;
5242 }
5243 }
5244
5245 /*
5246 * If abort has not completed, indicate the reset has, else call the
5247 * abort's done function to wake the sleeping eh thread
5248 */
5249 if (ipr_cmd->sibling->sibling)
5250 ipr_cmd->sibling->sibling = NULL;
5251 else
5252 ipr_cmd->sibling->done(ipr_cmd->sibling);
5253
5254 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5255 LEAVE;
5256 }
5257
5258 /**
5259 * ipr_abort_timeout - An abort task has timed out
5260 * @ipr_cmd: ipr command struct
5261 *
5262 * This function handles when an abort task times out. If this
5263 * happens we issue a bus reset since we have resources tied
5264 * up that must be freed before returning to the midlayer.
5265 *
5266 * Return value:
5267 * none
5268 **/
ipr_abort_timeout(struct ipr_cmnd * ipr_cmd)5269 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5270 {
5271 struct ipr_cmnd *reset_cmd;
5272 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5273 struct ipr_cmd_pkt *cmd_pkt;
5274 unsigned long lock_flags = 0;
5275
5276 ENTER;
5277 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5278 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5279 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5280 return;
5281 }
5282
5283 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5284 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5285 ipr_cmd->sibling = reset_cmd;
5286 reset_cmd->sibling = ipr_cmd;
5287 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5288 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5289 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5290 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5291 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5292
5293 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5295 LEAVE;
5296 }
5297
5298 /**
5299 * ipr_cancel_op - Cancel specified op
5300 * @scsi_cmd: scsi command struct
5301 *
5302 * This function cancels specified op.
5303 *
5304 * Return value:
5305 * SUCCESS / FAILED
5306 **/
ipr_cancel_op(struct scsi_cmnd * scsi_cmd)5307 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5308 {
5309 struct ipr_cmnd *ipr_cmd;
5310 struct ipr_ioa_cfg *ioa_cfg;
5311 struct ipr_resource_entry *res;
5312 struct ipr_cmd_pkt *cmd_pkt;
5313 u32 ioasc, int_reg;
5314 int op_found = 0;
5315 struct ipr_hrr_queue *hrrq;
5316
5317 ENTER;
5318 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5319 res = scsi_cmd->device->hostdata;
5320
5321 /* If we are currently going through reset/reload, return failed.
5322 * This will force the mid-layer to call ipr_eh_host_reset,
5323 * which will then go to sleep and wait for the reset to complete
5324 */
5325 if (ioa_cfg->in_reset_reload ||
5326 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5327 return FAILED;
5328 if (!res)
5329 return FAILED;
5330
5331 /*
5332 * If we are aborting a timed out op, chances are that the timeout was caused
5333 * by a still not detected EEH error. In such cases, reading a register will
5334 * trigger the EEH recovery infrastructure.
5335 */
5336 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5337
5338 if (!ipr_is_gscsi(res))
5339 return FAILED;
5340
5341 for_each_hrrq(hrrq, ioa_cfg) {
5342 spin_lock(&hrrq->_lock);
5343 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5344 if (ipr_cmd->scsi_cmd == scsi_cmd) {
5345 ipr_cmd->done = ipr_scsi_eh_done;
5346 op_found = 1;
5347 break;
5348 }
5349 }
5350 spin_unlock(&hrrq->_lock);
5351 }
5352
5353 if (!op_found)
5354 return SUCCESS;
5355
5356 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5357 ipr_cmd->ioarcb.res_handle = res->res_handle;
5358 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5359 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5360 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5361 ipr_cmd->u.sdev = scsi_cmd->device;
5362
5363 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5364 scsi_cmd->cmnd[0]);
5365 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5366 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5367
5368 /*
5369 * If the abort task timed out and we sent a bus reset, we will get
5370 * one the following responses to the abort
5371 */
5372 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5373 ioasc = 0;
5374 ipr_trace;
5375 }
5376
5377 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5378 if (!ipr_is_naca_model(res))
5379 res->needs_sync_complete = 1;
5380
5381 LEAVE;
5382 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5383 }
5384
5385 /**
5386 * ipr_eh_abort - Abort a single op
5387 * @scsi_cmd: scsi command struct
5388 *
5389 * Return value:
5390 * 0 if scan in progress / 1 if scan is complete
5391 **/
ipr_scan_finished(struct Scsi_Host * shost,unsigned long elapsed_time)5392 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5393 {
5394 unsigned long lock_flags;
5395 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5396 int rc = 0;
5397
5398 spin_lock_irqsave(shost->host_lock, lock_flags);
5399 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5400 rc = 1;
5401 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5402 rc = 1;
5403 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5404 return rc;
5405 }
5406
5407 /**
5408 * ipr_eh_host_reset - Reset the host adapter
5409 * @scsi_cmd: scsi command struct
5410 *
5411 * Return value:
5412 * SUCCESS / FAILED
5413 **/
ipr_eh_abort(struct scsi_cmnd * scsi_cmd)5414 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5415 {
5416 unsigned long flags;
5417 int rc;
5418 struct ipr_ioa_cfg *ioa_cfg;
5419
5420 ENTER;
5421
5422 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5423
5424 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5425 rc = ipr_cancel_op(scsi_cmd);
5426 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5427
5428 if (rc == SUCCESS)
5429 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5430 LEAVE;
5431 return rc;
5432 }
5433
5434 /**
5435 * ipr_handle_other_interrupt - Handle "other" interrupts
5436 * @ioa_cfg: ioa config struct
5437 * @int_reg: interrupt register
5438 *
5439 * Return value:
5440 * IRQ_NONE / IRQ_HANDLED
5441 **/
ipr_handle_other_interrupt(struct ipr_ioa_cfg * ioa_cfg,u32 int_reg)5442 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5443 u32 int_reg)
5444 {
5445 irqreturn_t rc = IRQ_HANDLED;
5446 u32 int_mask_reg;
5447
5448 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5449 int_reg &= ~int_mask_reg;
5450
5451 /* If an interrupt on the adapter did not occur, ignore it.
5452 * Or in the case of SIS 64, check for a stage change interrupt.
5453 */
5454 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5455 if (ioa_cfg->sis64) {
5456 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5457 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5458 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5459
5460 /* clear stage change */
5461 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5462 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5463 list_del(&ioa_cfg->reset_cmd->queue);
5464 del_timer(&ioa_cfg->reset_cmd->timer);
5465 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5466 return IRQ_HANDLED;
5467 }
5468 }
5469
5470 return IRQ_NONE;
5471 }
5472
5473 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5474 /* Mask the interrupt */
5475 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5476 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5477
5478 list_del(&ioa_cfg->reset_cmd->queue);
5479 del_timer(&ioa_cfg->reset_cmd->timer);
5480 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5481 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5482 if (ioa_cfg->clear_isr) {
5483 if (ipr_debug && printk_ratelimit())
5484 dev_err(&ioa_cfg->pdev->dev,
5485 "Spurious interrupt detected. 0x%08X\n", int_reg);
5486 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5487 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5488 return IRQ_NONE;
5489 }
5490 } else {
5491 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5492 ioa_cfg->ioa_unit_checked = 1;
5493 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5494 dev_err(&ioa_cfg->pdev->dev,
5495 "No Host RRQ. 0x%08X\n", int_reg);
5496 else
5497 dev_err(&ioa_cfg->pdev->dev,
5498 "Permanent IOA failure. 0x%08X\n", int_reg);
5499
5500 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5501 ioa_cfg->sdt_state = GET_DUMP;
5502
5503 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5504 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5505 }
5506
5507 return rc;
5508 }
5509
5510 /**
5511 * ipr_isr_eh - Interrupt service routine error handler
5512 * @ioa_cfg: ioa config struct
5513 * @msg: message to log
5514 *
5515 * Return value:
5516 * none
5517 **/
ipr_isr_eh(struct ipr_ioa_cfg * ioa_cfg,char * msg,u16 number)5518 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5519 {
5520 ioa_cfg->errors_logged++;
5521 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5522
5523 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5524 ioa_cfg->sdt_state = GET_DUMP;
5525
5526 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5527 }
5528
ipr_process_hrrq(struct ipr_hrr_queue * hrr_queue,int budget,struct list_head * doneq)5529 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5530 struct list_head *doneq)
5531 {
5532 u32 ioasc;
5533 u16 cmd_index;
5534 struct ipr_cmnd *ipr_cmd;
5535 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5536 int num_hrrq = 0;
5537
5538 /* If interrupts are disabled, ignore the interrupt */
5539 if (!hrr_queue->allow_interrupts)
5540 return 0;
5541
5542 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5543 hrr_queue->toggle_bit) {
5544
5545 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5546 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5547 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5548
5549 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5550 cmd_index < hrr_queue->min_cmd_id)) {
5551 ipr_isr_eh(ioa_cfg,
5552 "Invalid response handle from IOA: ",
5553 cmd_index);
5554 break;
5555 }
5556
5557 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5558 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5559
5560 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5561
5562 list_move_tail(&ipr_cmd->queue, doneq);
5563
5564 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5565 hrr_queue->hrrq_curr++;
5566 } else {
5567 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5568 hrr_queue->toggle_bit ^= 1u;
5569 }
5570 num_hrrq++;
5571 if (budget > 0 && num_hrrq >= budget)
5572 break;
5573 }
5574
5575 return num_hrrq;
5576 }
5577
ipr_iopoll(struct blk_iopoll * iop,int budget)5578 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5579 {
5580 struct ipr_ioa_cfg *ioa_cfg;
5581 struct ipr_hrr_queue *hrrq;
5582 struct ipr_cmnd *ipr_cmd, *temp;
5583 unsigned long hrrq_flags;
5584 int completed_ops;
5585 LIST_HEAD(doneq);
5586
5587 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5588 ioa_cfg = hrrq->ioa_cfg;
5589
5590 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5591 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5592
5593 if (completed_ops < budget)
5594 blk_iopoll_complete(iop);
5595 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5596
5597 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5598 list_del(&ipr_cmd->queue);
5599 del_timer(&ipr_cmd->timer);
5600 ipr_cmd->fast_done(ipr_cmd);
5601 }
5602
5603 return completed_ops;
5604 }
5605
5606 /**
5607 * ipr_isr - Interrupt service routine
5608 * @irq: irq number
5609 * @devp: pointer to ioa config struct
5610 *
5611 * Return value:
5612 * IRQ_NONE / IRQ_HANDLED
5613 **/
ipr_isr(int irq,void * devp)5614 static irqreturn_t ipr_isr(int irq, void *devp)
5615 {
5616 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5617 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5618 unsigned long hrrq_flags = 0;
5619 u32 int_reg = 0;
5620 int num_hrrq = 0;
5621 int irq_none = 0;
5622 struct ipr_cmnd *ipr_cmd, *temp;
5623 irqreturn_t rc = IRQ_NONE;
5624 LIST_HEAD(doneq);
5625
5626 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5627 /* If interrupts are disabled, ignore the interrupt */
5628 if (!hrrq->allow_interrupts) {
5629 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5630 return IRQ_NONE;
5631 }
5632
5633 while (1) {
5634 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5635 rc = IRQ_HANDLED;
5636
5637 if (!ioa_cfg->clear_isr)
5638 break;
5639
5640 /* Clear the PCI interrupt */
5641 num_hrrq = 0;
5642 do {
5643 writel(IPR_PCII_HRRQ_UPDATED,
5644 ioa_cfg->regs.clr_interrupt_reg32);
5645 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5646 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5647 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5648
5649 } else if (rc == IRQ_NONE && irq_none == 0) {
5650 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5651 irq_none++;
5652 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5653 int_reg & IPR_PCII_HRRQ_UPDATED) {
5654 ipr_isr_eh(ioa_cfg,
5655 "Error clearing HRRQ: ", num_hrrq);
5656 rc = IRQ_HANDLED;
5657 break;
5658 } else
5659 break;
5660 }
5661
5662 if (unlikely(rc == IRQ_NONE))
5663 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5664
5665 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5666 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5667 list_del(&ipr_cmd->queue);
5668 del_timer(&ipr_cmd->timer);
5669 ipr_cmd->fast_done(ipr_cmd);
5670 }
5671 return rc;
5672 }
5673
5674 /**
5675 * ipr_isr_mhrrq - Interrupt service routine
5676 * @irq: irq number
5677 * @devp: pointer to ioa config struct
5678 *
5679 * Return value:
5680 * IRQ_NONE / IRQ_HANDLED
5681 **/
ipr_isr_mhrrq(int irq,void * devp)5682 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5683 {
5684 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5685 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5686 unsigned long hrrq_flags = 0;
5687 struct ipr_cmnd *ipr_cmd, *temp;
5688 irqreturn_t rc = IRQ_NONE;
5689 LIST_HEAD(doneq);
5690
5691 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5692
5693 /* If interrupts are disabled, ignore the interrupt */
5694 if (!hrrq->allow_interrupts) {
5695 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5696 return IRQ_NONE;
5697 }
5698
5699 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5700 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5701 hrrq->toggle_bit) {
5702 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5703 blk_iopoll_sched(&hrrq->iopoll);
5704 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5705 return IRQ_HANDLED;
5706 }
5707 } else {
5708 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5709 hrrq->toggle_bit)
5710
5711 if (ipr_process_hrrq(hrrq, -1, &doneq))
5712 rc = IRQ_HANDLED;
5713 }
5714
5715 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5716
5717 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5718 list_del(&ipr_cmd->queue);
5719 del_timer(&ipr_cmd->timer);
5720 ipr_cmd->fast_done(ipr_cmd);
5721 }
5722 return rc;
5723 }
5724
5725 /**
5726 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5727 * @ioa_cfg: ioa config struct
5728 * @ipr_cmd: ipr command struct
5729 *
5730 * Return value:
5731 * 0 on success / -1 on failure
5732 **/
ipr_build_ioadl64(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5733 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5734 struct ipr_cmnd *ipr_cmd)
5735 {
5736 int i, nseg;
5737 struct scatterlist *sg;
5738 u32 length;
5739 u32 ioadl_flags = 0;
5740 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5741 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5742 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5743
5744 length = scsi_bufflen(scsi_cmd);
5745 if (!length)
5746 return 0;
5747
5748 nseg = scsi_dma_map(scsi_cmd);
5749 if (nseg < 0) {
5750 if (printk_ratelimit())
5751 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5752 return -1;
5753 }
5754
5755 ipr_cmd->dma_use_sg = nseg;
5756
5757 ioarcb->data_transfer_length = cpu_to_be32(length);
5758 ioarcb->ioadl_len =
5759 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5760
5761 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5762 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5763 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5764 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5765 ioadl_flags = IPR_IOADL_FLAGS_READ;
5766
5767 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5768 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5769 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5770 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5771 }
5772
5773 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5774 return 0;
5775 }
5776
5777 /**
5778 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5779 * @ioa_cfg: ioa config struct
5780 * @ipr_cmd: ipr command struct
5781 *
5782 * Return value:
5783 * 0 on success / -1 on failure
5784 **/
ipr_build_ioadl(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5785 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5786 struct ipr_cmnd *ipr_cmd)
5787 {
5788 int i, nseg;
5789 struct scatterlist *sg;
5790 u32 length;
5791 u32 ioadl_flags = 0;
5792 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5793 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5794 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5795
5796 length = scsi_bufflen(scsi_cmd);
5797 if (!length)
5798 return 0;
5799
5800 nseg = scsi_dma_map(scsi_cmd);
5801 if (nseg < 0) {
5802 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5803 return -1;
5804 }
5805
5806 ipr_cmd->dma_use_sg = nseg;
5807
5808 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5809 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5810 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5811 ioarcb->data_transfer_length = cpu_to_be32(length);
5812 ioarcb->ioadl_len =
5813 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5814 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5815 ioadl_flags = IPR_IOADL_FLAGS_READ;
5816 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5817 ioarcb->read_ioadl_len =
5818 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5819 }
5820
5821 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5822 ioadl = ioarcb->u.add_data.u.ioadl;
5823 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5824 offsetof(struct ipr_ioarcb, u.add_data));
5825 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5826 }
5827
5828 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5829 ioadl[i].flags_and_data_len =
5830 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5831 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5832 }
5833
5834 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5835 return 0;
5836 }
5837
5838 /**
5839 * ipr_erp_done - Process completion of ERP for a device
5840 * @ipr_cmd: ipr command struct
5841 *
5842 * This function copies the sense buffer into the scsi_cmd
5843 * struct and pushes the scsi_done function.
5844 *
5845 * Return value:
5846 * nothing
5847 **/
ipr_erp_done(struct ipr_cmnd * ipr_cmd)5848 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5849 {
5850 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5851 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5852 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5853
5854 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5855 scsi_cmd->result |= (DID_ERROR << 16);
5856 scmd_printk(KERN_ERR, scsi_cmd,
5857 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5858 } else {
5859 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5860 SCSI_SENSE_BUFFERSIZE);
5861 }
5862
5863 if (res) {
5864 if (!ipr_is_naca_model(res))
5865 res->needs_sync_complete = 1;
5866 res->in_erp = 0;
5867 }
5868 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5869 scsi_cmd->scsi_done(scsi_cmd);
5870 if (ipr_cmd->eh_comp)
5871 complete(ipr_cmd->eh_comp);
5872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5873 }
5874
5875 /**
5876 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5877 * @ipr_cmd: ipr command struct
5878 *
5879 * Return value:
5880 * none
5881 **/
ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd * ipr_cmd)5882 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5883 {
5884 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5885 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5886 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5887
5888 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5889 ioarcb->data_transfer_length = 0;
5890 ioarcb->read_data_transfer_length = 0;
5891 ioarcb->ioadl_len = 0;
5892 ioarcb->read_ioadl_len = 0;
5893 ioasa->hdr.ioasc = 0;
5894 ioasa->hdr.residual_data_len = 0;
5895
5896 if (ipr_cmd->ioa_cfg->sis64)
5897 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5898 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5899 else {
5900 ioarcb->write_ioadl_addr =
5901 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5902 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5903 }
5904 }
5905
5906 /**
5907 * ipr_erp_request_sense - Send request sense to a device
5908 * @ipr_cmd: ipr command struct
5909 *
5910 * This function sends a request sense to a device as a result
5911 * of a check condition.
5912 *
5913 * Return value:
5914 * nothing
5915 **/
ipr_erp_request_sense(struct ipr_cmnd * ipr_cmd)5916 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5917 {
5918 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5919 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5920
5921 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5922 ipr_erp_done(ipr_cmd);
5923 return;
5924 }
5925
5926 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5927
5928 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5929 cmd_pkt->cdb[0] = REQUEST_SENSE;
5930 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5931 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5932 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5933 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5934
5935 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5936 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5937
5938 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5939 IPR_REQUEST_SENSE_TIMEOUT * 2);
5940 }
5941
5942 /**
5943 * ipr_erp_cancel_all - Send cancel all to a device
5944 * @ipr_cmd: ipr command struct
5945 *
5946 * This function sends a cancel all to a device to clear the
5947 * queue. If we are running TCQ on the device, QERR is set to 1,
5948 * which means all outstanding ops have been dropped on the floor.
5949 * Cancel all will return them to us.
5950 *
5951 * Return value:
5952 * nothing
5953 **/
ipr_erp_cancel_all(struct ipr_cmnd * ipr_cmd)5954 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5955 {
5956 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5957 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5958 struct ipr_cmd_pkt *cmd_pkt;
5959
5960 res->in_erp = 1;
5961
5962 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5963
5964 if (!scsi_cmd->device->simple_tags) {
5965 ipr_erp_request_sense(ipr_cmd);
5966 return;
5967 }
5968
5969 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5970 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5971 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5972
5973 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5974 IPR_CANCEL_ALL_TIMEOUT);
5975 }
5976
5977 /**
5978 * ipr_dump_ioasa - Dump contents of IOASA
5979 * @ioa_cfg: ioa config struct
5980 * @ipr_cmd: ipr command struct
5981 * @res: resource entry struct
5982 *
5983 * This function is invoked by the interrupt handler when ops
5984 * fail. It will log the IOASA if appropriate. Only called
5985 * for GPDD ops.
5986 *
5987 * Return value:
5988 * none
5989 **/
ipr_dump_ioasa(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd,struct ipr_resource_entry * res)5990 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5991 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5992 {
5993 int i;
5994 u16 data_len;
5995 u32 ioasc, fd_ioasc;
5996 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5997 __be32 *ioasa_data = (__be32 *)ioasa;
5998 int error_index;
5999
6000 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6001 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
6002
6003 if (0 == ioasc)
6004 return;
6005
6006 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6007 return;
6008
6009 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6010 error_index = ipr_get_error(fd_ioasc);
6011 else
6012 error_index = ipr_get_error(ioasc);
6013
6014 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6015 /* Don't log an error if the IOA already logged one */
6016 if (ioasa->hdr.ilid != 0)
6017 return;
6018
6019 if (!ipr_is_gscsi(res))
6020 return;
6021
6022 if (ipr_error_table[error_index].log_ioasa == 0)
6023 return;
6024 }
6025
6026 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6027
6028 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6029 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6030 data_len = sizeof(struct ipr_ioasa64);
6031 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6032 data_len = sizeof(struct ipr_ioasa);
6033
6034 ipr_err("IOASA Dump:\n");
6035
6036 for (i = 0; i < data_len / 4; i += 4) {
6037 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6038 be32_to_cpu(ioasa_data[i]),
6039 be32_to_cpu(ioasa_data[i+1]),
6040 be32_to_cpu(ioasa_data[i+2]),
6041 be32_to_cpu(ioasa_data[i+3]));
6042 }
6043 }
6044
6045 /**
6046 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6047 * @ioasa: IOASA
6048 * @sense_buf: sense data buffer
6049 *
6050 * Return value:
6051 * none
6052 **/
ipr_gen_sense(struct ipr_cmnd * ipr_cmd)6053 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6054 {
6055 u32 failing_lba;
6056 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6057 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6058 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6059 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6060
6061 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6062
6063 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6064 return;
6065
6066 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6067
6068 if (ipr_is_vset_device(res) &&
6069 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6070 ioasa->u.vset.failing_lba_hi != 0) {
6071 sense_buf[0] = 0x72;
6072 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6073 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6074 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6075
6076 sense_buf[7] = 12;
6077 sense_buf[8] = 0;
6078 sense_buf[9] = 0x0A;
6079 sense_buf[10] = 0x80;
6080
6081 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6082
6083 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6084 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6085 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6086 sense_buf[15] = failing_lba & 0x000000ff;
6087
6088 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6089
6090 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6091 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6092 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6093 sense_buf[19] = failing_lba & 0x000000ff;
6094 } else {
6095 sense_buf[0] = 0x70;
6096 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6097 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6098 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6099
6100 /* Illegal request */
6101 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6102 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6103 sense_buf[7] = 10; /* additional length */
6104
6105 /* IOARCB was in error */
6106 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6107 sense_buf[15] = 0xC0;
6108 else /* Parameter data was invalid */
6109 sense_buf[15] = 0x80;
6110
6111 sense_buf[16] =
6112 ((IPR_FIELD_POINTER_MASK &
6113 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6114 sense_buf[17] =
6115 (IPR_FIELD_POINTER_MASK &
6116 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6117 } else {
6118 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6119 if (ipr_is_vset_device(res))
6120 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6121 else
6122 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6123
6124 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6125 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6126 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6127 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6128 sense_buf[6] = failing_lba & 0x000000ff;
6129 }
6130
6131 sense_buf[7] = 6; /* additional length */
6132 }
6133 }
6134 }
6135
6136 /**
6137 * ipr_get_autosense - Copy autosense data to sense buffer
6138 * @ipr_cmd: ipr command struct
6139 *
6140 * This function copies the autosense buffer to the buffer
6141 * in the scsi_cmd, if there is autosense available.
6142 *
6143 * Return value:
6144 * 1 if autosense was available / 0 if not
6145 **/
ipr_get_autosense(struct ipr_cmnd * ipr_cmd)6146 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6147 {
6148 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6149 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6150
6151 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6152 return 0;
6153
6154 if (ipr_cmd->ioa_cfg->sis64)
6155 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6156 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6157 SCSI_SENSE_BUFFERSIZE));
6158 else
6159 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6160 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6161 SCSI_SENSE_BUFFERSIZE));
6162 return 1;
6163 }
6164
6165 /**
6166 * ipr_erp_start - Process an error response for a SCSI op
6167 * @ioa_cfg: ioa config struct
6168 * @ipr_cmd: ipr command struct
6169 *
6170 * This function determines whether or not to initiate ERP
6171 * on the affected device.
6172 *
6173 * Return value:
6174 * nothing
6175 **/
ipr_erp_start(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)6176 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6177 struct ipr_cmnd *ipr_cmd)
6178 {
6179 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6180 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6181 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6182 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6183
6184 if (!res) {
6185 ipr_scsi_eh_done(ipr_cmd);
6186 return;
6187 }
6188
6189 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6190 ipr_gen_sense(ipr_cmd);
6191
6192 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6193
6194 switch (masked_ioasc) {
6195 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6196 if (ipr_is_naca_model(res))
6197 scsi_cmd->result |= (DID_ABORT << 16);
6198 else
6199 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6200 break;
6201 case IPR_IOASC_IR_RESOURCE_HANDLE:
6202 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6203 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6204 break;
6205 case IPR_IOASC_HW_SEL_TIMEOUT:
6206 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6207 if (!ipr_is_naca_model(res))
6208 res->needs_sync_complete = 1;
6209 break;
6210 case IPR_IOASC_SYNC_REQUIRED:
6211 if (!res->in_erp)
6212 res->needs_sync_complete = 1;
6213 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6214 break;
6215 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6216 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6217 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6218 break;
6219 case IPR_IOASC_BUS_WAS_RESET:
6220 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6221 /*
6222 * Report the bus reset and ask for a retry. The device
6223 * will give CC/UA the next command.
6224 */
6225 if (!res->resetting_device)
6226 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6227 scsi_cmd->result |= (DID_ERROR << 16);
6228 if (!ipr_is_naca_model(res))
6229 res->needs_sync_complete = 1;
6230 break;
6231 case IPR_IOASC_HW_DEV_BUS_STATUS:
6232 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6233 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6234 if (!ipr_get_autosense(ipr_cmd)) {
6235 if (!ipr_is_naca_model(res)) {
6236 ipr_erp_cancel_all(ipr_cmd);
6237 return;
6238 }
6239 }
6240 }
6241 if (!ipr_is_naca_model(res))
6242 res->needs_sync_complete = 1;
6243 break;
6244 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6245 break;
6246 case IPR_IOASC_IR_NON_OPTIMIZED:
6247 if (res->raw_mode) {
6248 res->raw_mode = 0;
6249 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6250 } else
6251 scsi_cmd->result |= (DID_ERROR << 16);
6252 break;
6253 default:
6254 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6255 scsi_cmd->result |= (DID_ERROR << 16);
6256 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6257 res->needs_sync_complete = 1;
6258 break;
6259 }
6260
6261 scsi_dma_unmap(ipr_cmd->scsi_cmd);
6262 scsi_cmd->scsi_done(scsi_cmd);
6263 if (ipr_cmd->eh_comp)
6264 complete(ipr_cmd->eh_comp);
6265 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6266 }
6267
6268 /**
6269 * ipr_scsi_done - mid-layer done function
6270 * @ipr_cmd: ipr command struct
6271 *
6272 * This function is invoked by the interrupt handler for
6273 * ops generated by the SCSI mid-layer
6274 *
6275 * Return value:
6276 * none
6277 **/
ipr_scsi_done(struct ipr_cmnd * ipr_cmd)6278 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6279 {
6280 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6281 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6282 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6283 unsigned long lock_flags;
6284
6285 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6286
6287 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6288 scsi_dma_unmap(scsi_cmd);
6289
6290 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6291 scsi_cmd->scsi_done(scsi_cmd);
6292 if (ipr_cmd->eh_comp)
6293 complete(ipr_cmd->eh_comp);
6294 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6295 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6296 } else {
6297 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6298 spin_lock(&ipr_cmd->hrrq->_lock);
6299 ipr_erp_start(ioa_cfg, ipr_cmd);
6300 spin_unlock(&ipr_cmd->hrrq->_lock);
6301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6302 }
6303 }
6304
6305 /**
6306 * ipr_queuecommand - Queue a mid-layer request
6307 * @shost: scsi host struct
6308 * @scsi_cmd: scsi command struct
6309 *
6310 * This function queues a request generated by the mid-layer.
6311 *
6312 * Return value:
6313 * 0 on success
6314 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6315 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6316 **/
ipr_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scsi_cmd)6317 static int ipr_queuecommand(struct Scsi_Host *shost,
6318 struct scsi_cmnd *scsi_cmd)
6319 {
6320 struct ipr_ioa_cfg *ioa_cfg;
6321 struct ipr_resource_entry *res;
6322 struct ipr_ioarcb *ioarcb;
6323 struct ipr_cmnd *ipr_cmd;
6324 unsigned long hrrq_flags, lock_flags;
6325 int rc;
6326 struct ipr_hrr_queue *hrrq;
6327 int hrrq_id;
6328
6329 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6330
6331 scsi_cmd->result = (DID_OK << 16);
6332 res = scsi_cmd->device->hostdata;
6333
6334 if (ipr_is_gata(res) && res->sata_port) {
6335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6336 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6338 return rc;
6339 }
6340
6341 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6342 hrrq = &ioa_cfg->hrrq[hrrq_id];
6343
6344 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6345 /*
6346 * We are currently blocking all devices due to a host reset
6347 * We have told the host to stop giving us new requests, but
6348 * ERP ops don't count. FIXME
6349 */
6350 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6351 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6352 return SCSI_MLQUEUE_HOST_BUSY;
6353 }
6354
6355 /*
6356 * FIXME - Create scsi_set_host_offline interface
6357 * and the ioa_is_dead check can be removed
6358 */
6359 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6360 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6361 goto err_nodev;
6362 }
6363
6364 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6365 if (ipr_cmd == NULL) {
6366 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6367 return SCSI_MLQUEUE_HOST_BUSY;
6368 }
6369 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6370
6371 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6372 ioarcb = &ipr_cmd->ioarcb;
6373
6374 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6375 ipr_cmd->scsi_cmd = scsi_cmd;
6376 ipr_cmd->done = ipr_scsi_eh_done;
6377
6378 if (ipr_is_gscsi(res)) {
6379 if (scsi_cmd->underflow == 0)
6380 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6381
6382 if (res->reset_occurred) {
6383 res->reset_occurred = 0;
6384 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6385 }
6386 }
6387
6388 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6389 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6390
6391 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6392 if (scsi_cmd->flags & SCMD_TAGGED)
6393 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6394 else
6395 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6396 }
6397
6398 if (scsi_cmd->cmnd[0] >= 0xC0 &&
6399 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6400 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6401 }
6402 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6403 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6404
6405 if (scsi_cmd->underflow == 0)
6406 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6407 }
6408
6409 if (ioa_cfg->sis64)
6410 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6411 else
6412 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6413
6414 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6415 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6416 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6417 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6418 if (!rc)
6419 scsi_dma_unmap(scsi_cmd);
6420 return SCSI_MLQUEUE_HOST_BUSY;
6421 }
6422
6423 if (unlikely(hrrq->ioa_is_dead)) {
6424 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6425 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6426 scsi_dma_unmap(scsi_cmd);
6427 goto err_nodev;
6428 }
6429
6430 ioarcb->res_handle = res->res_handle;
6431 if (res->needs_sync_complete) {
6432 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6433 res->needs_sync_complete = 0;
6434 }
6435 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6436 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6437 ipr_send_command(ipr_cmd);
6438 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6439 return 0;
6440
6441 err_nodev:
6442 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6443 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6444 scsi_cmd->result = (DID_NO_CONNECT << 16);
6445 scsi_cmd->scsi_done(scsi_cmd);
6446 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6447 return 0;
6448 }
6449
6450 /**
6451 * ipr_ioctl - IOCTL handler
6452 * @sdev: scsi device struct
6453 * @cmd: IOCTL cmd
6454 * @arg: IOCTL arg
6455 *
6456 * Return value:
6457 * 0 on success / other on failure
6458 **/
ipr_ioctl(struct scsi_device * sdev,int cmd,void __user * arg)6459 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6460 {
6461 struct ipr_resource_entry *res;
6462
6463 res = (struct ipr_resource_entry *)sdev->hostdata;
6464 if (res && ipr_is_gata(res)) {
6465 if (cmd == HDIO_GET_IDENTITY)
6466 return -ENOTTY;
6467 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6468 }
6469
6470 return -EINVAL;
6471 }
6472
6473 /**
6474 * ipr_info - Get information about the card/driver
6475 * @scsi_host: scsi host struct
6476 *
6477 * Return value:
6478 * pointer to buffer with description string
6479 **/
ipr_ioa_info(struct Scsi_Host * host)6480 static const char *ipr_ioa_info(struct Scsi_Host *host)
6481 {
6482 static char buffer[512];
6483 struct ipr_ioa_cfg *ioa_cfg;
6484 unsigned long lock_flags = 0;
6485
6486 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6487
6488 spin_lock_irqsave(host->host_lock, lock_flags);
6489 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6490 spin_unlock_irqrestore(host->host_lock, lock_flags);
6491
6492 return buffer;
6493 }
6494
6495 static struct scsi_host_template driver_template = {
6496 .module = THIS_MODULE,
6497 .name = "IPR",
6498 .info = ipr_ioa_info,
6499 .ioctl = ipr_ioctl,
6500 .queuecommand = ipr_queuecommand,
6501 .eh_abort_handler = ipr_eh_abort,
6502 .eh_device_reset_handler = ipr_eh_dev_reset,
6503 .eh_host_reset_handler = ipr_eh_host_reset,
6504 .slave_alloc = ipr_slave_alloc,
6505 .slave_configure = ipr_slave_configure,
6506 .slave_destroy = ipr_slave_destroy,
6507 .scan_finished = ipr_scan_finished,
6508 .target_alloc = ipr_target_alloc,
6509 .target_destroy = ipr_target_destroy,
6510 .change_queue_depth = ipr_change_queue_depth,
6511 .bios_param = ipr_biosparam,
6512 .can_queue = IPR_MAX_COMMANDS,
6513 .this_id = -1,
6514 .sg_tablesize = IPR_MAX_SGLIST,
6515 .max_sectors = IPR_IOA_MAX_SECTORS,
6516 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6517 .use_clustering = ENABLE_CLUSTERING,
6518 .shost_attrs = ipr_ioa_attrs,
6519 .sdev_attrs = ipr_dev_attrs,
6520 .proc_name = IPR_NAME,
6521 };
6522
6523 /**
6524 * ipr_ata_phy_reset - libata phy_reset handler
6525 * @ap: ata port to reset
6526 *
6527 **/
ipr_ata_phy_reset(struct ata_port * ap)6528 static void ipr_ata_phy_reset(struct ata_port *ap)
6529 {
6530 unsigned long flags;
6531 struct ipr_sata_port *sata_port = ap->private_data;
6532 struct ipr_resource_entry *res = sata_port->res;
6533 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6534 int rc;
6535
6536 ENTER;
6537 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6538 while (ioa_cfg->in_reset_reload) {
6539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6540 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6541 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6542 }
6543
6544 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6545 goto out_unlock;
6546
6547 rc = ipr_device_reset(ioa_cfg, res);
6548
6549 if (rc) {
6550 ap->link.device[0].class = ATA_DEV_NONE;
6551 goto out_unlock;
6552 }
6553
6554 ap->link.device[0].class = res->ata_class;
6555 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6556 ap->link.device[0].class = ATA_DEV_NONE;
6557
6558 out_unlock:
6559 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6560 LEAVE;
6561 }
6562
6563 /**
6564 * ipr_ata_post_internal - Cleanup after an internal command
6565 * @qc: ATA queued command
6566 *
6567 * Return value:
6568 * none
6569 **/
ipr_ata_post_internal(struct ata_queued_cmd * qc)6570 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6571 {
6572 struct ipr_sata_port *sata_port = qc->ap->private_data;
6573 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6574 struct ipr_cmnd *ipr_cmd;
6575 struct ipr_hrr_queue *hrrq;
6576 unsigned long flags;
6577
6578 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6579 while (ioa_cfg->in_reset_reload) {
6580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6581 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6582 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6583 }
6584
6585 for_each_hrrq(hrrq, ioa_cfg) {
6586 spin_lock(&hrrq->_lock);
6587 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6588 if (ipr_cmd->qc == qc) {
6589 ipr_device_reset(ioa_cfg, sata_port->res);
6590 break;
6591 }
6592 }
6593 spin_unlock(&hrrq->_lock);
6594 }
6595 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6596 }
6597
6598 /**
6599 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6600 * @regs: destination
6601 * @tf: source ATA taskfile
6602 *
6603 * Return value:
6604 * none
6605 **/
ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs * regs,struct ata_taskfile * tf)6606 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6607 struct ata_taskfile *tf)
6608 {
6609 regs->feature = tf->feature;
6610 regs->nsect = tf->nsect;
6611 regs->lbal = tf->lbal;
6612 regs->lbam = tf->lbam;
6613 regs->lbah = tf->lbah;
6614 regs->device = tf->device;
6615 regs->command = tf->command;
6616 regs->hob_feature = tf->hob_feature;
6617 regs->hob_nsect = tf->hob_nsect;
6618 regs->hob_lbal = tf->hob_lbal;
6619 regs->hob_lbam = tf->hob_lbam;
6620 regs->hob_lbah = tf->hob_lbah;
6621 regs->ctl = tf->ctl;
6622 }
6623
6624 /**
6625 * ipr_sata_done - done function for SATA commands
6626 * @ipr_cmd: ipr command struct
6627 *
6628 * This function is invoked by the interrupt handler for
6629 * ops generated by the SCSI mid-layer to SATA devices
6630 *
6631 * Return value:
6632 * none
6633 **/
ipr_sata_done(struct ipr_cmnd * ipr_cmd)6634 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6635 {
6636 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6637 struct ata_queued_cmd *qc = ipr_cmd->qc;
6638 struct ipr_sata_port *sata_port = qc->ap->private_data;
6639 struct ipr_resource_entry *res = sata_port->res;
6640 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6641
6642 spin_lock(&ipr_cmd->hrrq->_lock);
6643 if (ipr_cmd->ioa_cfg->sis64)
6644 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6645 sizeof(struct ipr_ioasa_gata));
6646 else
6647 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6648 sizeof(struct ipr_ioasa_gata));
6649 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6650
6651 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6652 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6653
6654 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6655 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6656 else
6657 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6658 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6659 spin_unlock(&ipr_cmd->hrrq->_lock);
6660 ata_qc_complete(qc);
6661 }
6662
6663 /**
6664 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6665 * @ipr_cmd: ipr command struct
6666 * @qc: ATA queued command
6667 *
6668 **/
ipr_build_ata_ioadl64(struct ipr_cmnd * ipr_cmd,struct ata_queued_cmd * qc)6669 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6670 struct ata_queued_cmd *qc)
6671 {
6672 u32 ioadl_flags = 0;
6673 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6674 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6675 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6676 int len = qc->nbytes;
6677 struct scatterlist *sg;
6678 unsigned int si;
6679 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6680
6681 if (len == 0)
6682 return;
6683
6684 if (qc->dma_dir == DMA_TO_DEVICE) {
6685 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6686 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6687 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6688 ioadl_flags = IPR_IOADL_FLAGS_READ;
6689
6690 ioarcb->data_transfer_length = cpu_to_be32(len);
6691 ioarcb->ioadl_len =
6692 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6693 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6694 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6695
6696 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6697 ioadl64->flags = cpu_to_be32(ioadl_flags);
6698 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6699 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6700
6701 last_ioadl64 = ioadl64;
6702 ioadl64++;
6703 }
6704
6705 if (likely(last_ioadl64))
6706 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6707 }
6708
6709 /**
6710 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6711 * @ipr_cmd: ipr command struct
6712 * @qc: ATA queued command
6713 *
6714 **/
ipr_build_ata_ioadl(struct ipr_cmnd * ipr_cmd,struct ata_queued_cmd * qc)6715 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6716 struct ata_queued_cmd *qc)
6717 {
6718 u32 ioadl_flags = 0;
6719 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6720 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6721 struct ipr_ioadl_desc *last_ioadl = NULL;
6722 int len = qc->nbytes;
6723 struct scatterlist *sg;
6724 unsigned int si;
6725
6726 if (len == 0)
6727 return;
6728
6729 if (qc->dma_dir == DMA_TO_DEVICE) {
6730 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6731 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6732 ioarcb->data_transfer_length = cpu_to_be32(len);
6733 ioarcb->ioadl_len =
6734 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6735 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6736 ioadl_flags = IPR_IOADL_FLAGS_READ;
6737 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6738 ioarcb->read_ioadl_len =
6739 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6740 }
6741
6742 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6743 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6744 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6745
6746 last_ioadl = ioadl;
6747 ioadl++;
6748 }
6749
6750 if (likely(last_ioadl))
6751 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6752 }
6753
6754 /**
6755 * ipr_qc_defer - Get a free ipr_cmd
6756 * @qc: queued command
6757 *
6758 * Return value:
6759 * 0 if success
6760 **/
ipr_qc_defer(struct ata_queued_cmd * qc)6761 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6762 {
6763 struct ata_port *ap = qc->ap;
6764 struct ipr_sata_port *sata_port = ap->private_data;
6765 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6766 struct ipr_cmnd *ipr_cmd;
6767 struct ipr_hrr_queue *hrrq;
6768 int hrrq_id;
6769
6770 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6771 hrrq = &ioa_cfg->hrrq[hrrq_id];
6772
6773 qc->lldd_task = NULL;
6774 spin_lock(&hrrq->_lock);
6775 if (unlikely(hrrq->ioa_is_dead)) {
6776 spin_unlock(&hrrq->_lock);
6777 return 0;
6778 }
6779
6780 if (unlikely(!hrrq->allow_cmds)) {
6781 spin_unlock(&hrrq->_lock);
6782 return ATA_DEFER_LINK;
6783 }
6784
6785 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6786 if (ipr_cmd == NULL) {
6787 spin_unlock(&hrrq->_lock);
6788 return ATA_DEFER_LINK;
6789 }
6790
6791 qc->lldd_task = ipr_cmd;
6792 spin_unlock(&hrrq->_lock);
6793 return 0;
6794 }
6795
6796 /**
6797 * ipr_qc_issue - Issue a SATA qc to a device
6798 * @qc: queued command
6799 *
6800 * Return value:
6801 * 0 if success
6802 **/
ipr_qc_issue(struct ata_queued_cmd * qc)6803 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6804 {
6805 struct ata_port *ap = qc->ap;
6806 struct ipr_sata_port *sata_port = ap->private_data;
6807 struct ipr_resource_entry *res = sata_port->res;
6808 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6809 struct ipr_cmnd *ipr_cmd;
6810 struct ipr_ioarcb *ioarcb;
6811 struct ipr_ioarcb_ata_regs *regs;
6812
6813 if (qc->lldd_task == NULL)
6814 ipr_qc_defer(qc);
6815
6816 ipr_cmd = qc->lldd_task;
6817 if (ipr_cmd == NULL)
6818 return AC_ERR_SYSTEM;
6819
6820 qc->lldd_task = NULL;
6821 spin_lock(&ipr_cmd->hrrq->_lock);
6822 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6823 ipr_cmd->hrrq->ioa_is_dead)) {
6824 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6825 spin_unlock(&ipr_cmd->hrrq->_lock);
6826 return AC_ERR_SYSTEM;
6827 }
6828
6829 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6830 ioarcb = &ipr_cmd->ioarcb;
6831
6832 if (ioa_cfg->sis64) {
6833 regs = &ipr_cmd->i.ata_ioadl.regs;
6834 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6835 } else
6836 regs = &ioarcb->u.add_data.u.regs;
6837
6838 memset(regs, 0, sizeof(*regs));
6839 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6840
6841 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6842 ipr_cmd->qc = qc;
6843 ipr_cmd->done = ipr_sata_done;
6844 ipr_cmd->ioarcb.res_handle = res->res_handle;
6845 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6846 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6847 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6848 ipr_cmd->dma_use_sg = qc->n_elem;
6849
6850 if (ioa_cfg->sis64)
6851 ipr_build_ata_ioadl64(ipr_cmd, qc);
6852 else
6853 ipr_build_ata_ioadl(ipr_cmd, qc);
6854
6855 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6856 ipr_copy_sata_tf(regs, &qc->tf);
6857 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6858 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6859
6860 switch (qc->tf.protocol) {
6861 case ATA_PROT_NODATA:
6862 case ATA_PROT_PIO:
6863 break;
6864
6865 case ATA_PROT_DMA:
6866 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6867 break;
6868
6869 case ATAPI_PROT_PIO:
6870 case ATAPI_PROT_NODATA:
6871 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6872 break;
6873
6874 case ATAPI_PROT_DMA:
6875 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6876 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6877 break;
6878
6879 default:
6880 WARN_ON(1);
6881 spin_unlock(&ipr_cmd->hrrq->_lock);
6882 return AC_ERR_INVALID;
6883 }
6884
6885 ipr_send_command(ipr_cmd);
6886 spin_unlock(&ipr_cmd->hrrq->_lock);
6887
6888 return 0;
6889 }
6890
6891 /**
6892 * ipr_qc_fill_rtf - Read result TF
6893 * @qc: ATA queued command
6894 *
6895 * Return value:
6896 * true
6897 **/
ipr_qc_fill_rtf(struct ata_queued_cmd * qc)6898 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6899 {
6900 struct ipr_sata_port *sata_port = qc->ap->private_data;
6901 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6902 struct ata_taskfile *tf = &qc->result_tf;
6903
6904 tf->feature = g->error;
6905 tf->nsect = g->nsect;
6906 tf->lbal = g->lbal;
6907 tf->lbam = g->lbam;
6908 tf->lbah = g->lbah;
6909 tf->device = g->device;
6910 tf->command = g->status;
6911 tf->hob_nsect = g->hob_nsect;
6912 tf->hob_lbal = g->hob_lbal;
6913 tf->hob_lbam = g->hob_lbam;
6914 tf->hob_lbah = g->hob_lbah;
6915
6916 return true;
6917 }
6918
6919 static struct ata_port_operations ipr_sata_ops = {
6920 .phy_reset = ipr_ata_phy_reset,
6921 .hardreset = ipr_sata_reset,
6922 .post_internal_cmd = ipr_ata_post_internal,
6923 .qc_prep = ata_noop_qc_prep,
6924 .qc_defer = ipr_qc_defer,
6925 .qc_issue = ipr_qc_issue,
6926 .qc_fill_rtf = ipr_qc_fill_rtf,
6927 .port_start = ata_sas_port_start,
6928 .port_stop = ata_sas_port_stop
6929 };
6930
6931 static struct ata_port_info sata_port_info = {
6932 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6933 ATA_FLAG_SAS_HOST,
6934 .pio_mask = ATA_PIO4_ONLY,
6935 .mwdma_mask = ATA_MWDMA2,
6936 .udma_mask = ATA_UDMA6,
6937 .port_ops = &ipr_sata_ops
6938 };
6939
6940 #ifdef CONFIG_PPC_PSERIES
6941 static const u16 ipr_blocked_processors[] = {
6942 PVR_NORTHSTAR,
6943 PVR_PULSAR,
6944 PVR_POWER4,
6945 PVR_ICESTAR,
6946 PVR_SSTAR,
6947 PVR_POWER4p,
6948 PVR_630,
6949 PVR_630p
6950 };
6951
6952 /**
6953 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6954 * @ioa_cfg: ioa cfg struct
6955 *
6956 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6957 * certain pSeries hardware. This function determines if the given
6958 * adapter is in one of these confgurations or not.
6959 *
6960 * Return value:
6961 * 1 if adapter is not supported / 0 if adapter is supported
6962 **/
ipr_invalid_adapter(struct ipr_ioa_cfg * ioa_cfg)6963 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6964 {
6965 int i;
6966
6967 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6968 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6969 if (pvr_version_is(ipr_blocked_processors[i]))
6970 return 1;
6971 }
6972 }
6973 return 0;
6974 }
6975 #else
6976 #define ipr_invalid_adapter(ioa_cfg) 0
6977 #endif
6978
6979 /**
6980 * ipr_ioa_bringdown_done - IOA bring down completion.
6981 * @ipr_cmd: ipr command struct
6982 *
6983 * This function processes the completion of an adapter bring down.
6984 * It wakes any reset sleepers.
6985 *
6986 * Return value:
6987 * IPR_RC_JOB_RETURN
6988 **/
ipr_ioa_bringdown_done(struct ipr_cmnd * ipr_cmd)6989 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6990 {
6991 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6992 int i;
6993
6994 ENTER;
6995 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6996 ipr_trace;
6997 spin_unlock_irq(ioa_cfg->host->host_lock);
6998 scsi_unblock_requests(ioa_cfg->host);
6999 spin_lock_irq(ioa_cfg->host->host_lock);
7000 }
7001
7002 ioa_cfg->in_reset_reload = 0;
7003 ioa_cfg->reset_retries = 0;
7004 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7005 spin_lock(&ioa_cfg->hrrq[i]._lock);
7006 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7007 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7008 }
7009 wmb();
7010
7011 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7012 wake_up_all(&ioa_cfg->reset_wait_q);
7013 LEAVE;
7014
7015 return IPR_RC_JOB_RETURN;
7016 }
7017
7018 /**
7019 * ipr_ioa_reset_done - IOA reset completion.
7020 * @ipr_cmd: ipr command struct
7021 *
7022 * This function processes the completion of an adapter reset.
7023 * It schedules any necessary mid-layer add/removes and
7024 * wakes any reset sleepers.
7025 *
7026 * Return value:
7027 * IPR_RC_JOB_RETURN
7028 **/
ipr_ioa_reset_done(struct ipr_cmnd * ipr_cmd)7029 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7030 {
7031 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7032 struct ipr_resource_entry *res;
7033 struct ipr_hostrcb *hostrcb, *temp;
7034 int i = 0, j;
7035
7036 ENTER;
7037 ioa_cfg->in_reset_reload = 0;
7038 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7039 spin_lock(&ioa_cfg->hrrq[j]._lock);
7040 ioa_cfg->hrrq[j].allow_cmds = 1;
7041 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7042 }
7043 wmb();
7044 ioa_cfg->reset_cmd = NULL;
7045 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7046
7047 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7048 if (res->add_to_ml || res->del_from_ml) {
7049 ipr_trace;
7050 break;
7051 }
7052 }
7053 schedule_work(&ioa_cfg->work_q);
7054
7055 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7056 list_del(&hostrcb->queue);
7057 if (i++ < IPR_NUM_LOG_HCAMS)
7058 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7059 else
7060 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7061 }
7062
7063 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7064 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7065
7066 ioa_cfg->reset_retries = 0;
7067 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7068 wake_up_all(&ioa_cfg->reset_wait_q);
7069
7070 spin_unlock(ioa_cfg->host->host_lock);
7071 scsi_unblock_requests(ioa_cfg->host);
7072 spin_lock(ioa_cfg->host->host_lock);
7073
7074 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7075 scsi_block_requests(ioa_cfg->host);
7076
7077 schedule_work(&ioa_cfg->work_q);
7078 LEAVE;
7079 return IPR_RC_JOB_RETURN;
7080 }
7081
7082 /**
7083 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7084 * @supported_dev: supported device struct
7085 * @vpids: vendor product id struct
7086 *
7087 * Return value:
7088 * none
7089 **/
ipr_set_sup_dev_dflt(struct ipr_supported_device * supported_dev,struct ipr_std_inq_vpids * vpids)7090 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7091 struct ipr_std_inq_vpids *vpids)
7092 {
7093 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7094 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7095 supported_dev->num_records = 1;
7096 supported_dev->data_length =
7097 cpu_to_be16(sizeof(struct ipr_supported_device));
7098 supported_dev->reserved = 0;
7099 }
7100
7101 /**
7102 * ipr_set_supported_devs - Send Set Supported Devices for a device
7103 * @ipr_cmd: ipr command struct
7104 *
7105 * This function sends a Set Supported Devices to the adapter
7106 *
7107 * Return value:
7108 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7109 **/
ipr_set_supported_devs(struct ipr_cmnd * ipr_cmd)7110 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7111 {
7112 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7113 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7114 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7115 struct ipr_resource_entry *res = ipr_cmd->u.res;
7116
7117 ipr_cmd->job_step = ipr_ioa_reset_done;
7118
7119 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7120 if (!ipr_is_scsi_disk(res))
7121 continue;
7122
7123 ipr_cmd->u.res = res;
7124 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7125
7126 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7127 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7128 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7129
7130 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7131 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7132 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7133 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7134
7135 ipr_init_ioadl(ipr_cmd,
7136 ioa_cfg->vpd_cbs_dma +
7137 offsetof(struct ipr_misc_cbs, supp_dev),
7138 sizeof(struct ipr_supported_device),
7139 IPR_IOADL_FLAGS_WRITE_LAST);
7140
7141 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7142 IPR_SET_SUP_DEVICE_TIMEOUT);
7143
7144 if (!ioa_cfg->sis64)
7145 ipr_cmd->job_step = ipr_set_supported_devs;
7146 LEAVE;
7147 return IPR_RC_JOB_RETURN;
7148 }
7149
7150 LEAVE;
7151 return IPR_RC_JOB_CONTINUE;
7152 }
7153
7154 /**
7155 * ipr_get_mode_page - Locate specified mode page
7156 * @mode_pages: mode page buffer
7157 * @page_code: page code to find
7158 * @len: minimum required length for mode page
7159 *
7160 * Return value:
7161 * pointer to mode page / NULL on failure
7162 **/
ipr_get_mode_page(struct ipr_mode_pages * mode_pages,u32 page_code,u32 len)7163 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7164 u32 page_code, u32 len)
7165 {
7166 struct ipr_mode_page_hdr *mode_hdr;
7167 u32 page_length;
7168 u32 length;
7169
7170 if (!mode_pages || (mode_pages->hdr.length == 0))
7171 return NULL;
7172
7173 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7174 mode_hdr = (struct ipr_mode_page_hdr *)
7175 (mode_pages->data + mode_pages->hdr.block_desc_len);
7176
7177 while (length) {
7178 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7179 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7180 return mode_hdr;
7181 break;
7182 } else {
7183 page_length = (sizeof(struct ipr_mode_page_hdr) +
7184 mode_hdr->page_length);
7185 length -= page_length;
7186 mode_hdr = (struct ipr_mode_page_hdr *)
7187 ((unsigned long)mode_hdr + page_length);
7188 }
7189 }
7190 return NULL;
7191 }
7192
7193 /**
7194 * ipr_check_term_power - Check for term power errors
7195 * @ioa_cfg: ioa config struct
7196 * @mode_pages: IOAFP mode pages buffer
7197 *
7198 * Check the IOAFP's mode page 28 for term power errors
7199 *
7200 * Return value:
7201 * nothing
7202 **/
ipr_check_term_power(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)7203 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7204 struct ipr_mode_pages *mode_pages)
7205 {
7206 int i;
7207 int entry_length;
7208 struct ipr_dev_bus_entry *bus;
7209 struct ipr_mode_page28 *mode_page;
7210
7211 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7212 sizeof(struct ipr_mode_page28));
7213
7214 entry_length = mode_page->entry_length;
7215
7216 bus = mode_page->bus;
7217
7218 for (i = 0; i < mode_page->num_entries; i++) {
7219 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7220 dev_err(&ioa_cfg->pdev->dev,
7221 "Term power is absent on scsi bus %d\n",
7222 bus->res_addr.bus);
7223 }
7224
7225 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7226 }
7227 }
7228
7229 /**
7230 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7231 * @ioa_cfg: ioa config struct
7232 *
7233 * Looks through the config table checking for SES devices. If
7234 * the SES device is in the SES table indicating a maximum SCSI
7235 * bus speed, the speed is limited for the bus.
7236 *
7237 * Return value:
7238 * none
7239 **/
ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg * ioa_cfg)7240 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7241 {
7242 u32 max_xfer_rate;
7243 int i;
7244
7245 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7246 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7247 ioa_cfg->bus_attr[i].bus_width);
7248
7249 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7250 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7251 }
7252 }
7253
7254 /**
7255 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7256 * @ioa_cfg: ioa config struct
7257 * @mode_pages: mode page 28 buffer
7258 *
7259 * Updates mode page 28 based on driver configuration
7260 *
7261 * Return value:
7262 * none
7263 **/
ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)7264 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7265 struct ipr_mode_pages *mode_pages)
7266 {
7267 int i, entry_length;
7268 struct ipr_dev_bus_entry *bus;
7269 struct ipr_bus_attributes *bus_attr;
7270 struct ipr_mode_page28 *mode_page;
7271
7272 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7273 sizeof(struct ipr_mode_page28));
7274
7275 entry_length = mode_page->entry_length;
7276
7277 /* Loop for each device bus entry */
7278 for (i = 0, bus = mode_page->bus;
7279 i < mode_page->num_entries;
7280 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7281 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7282 dev_err(&ioa_cfg->pdev->dev,
7283 "Invalid resource address reported: 0x%08X\n",
7284 IPR_GET_PHYS_LOC(bus->res_addr));
7285 continue;
7286 }
7287
7288 bus_attr = &ioa_cfg->bus_attr[i];
7289 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7290 bus->bus_width = bus_attr->bus_width;
7291 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7292 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7293 if (bus_attr->qas_enabled)
7294 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7295 else
7296 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7297 }
7298 }
7299
7300 /**
7301 * ipr_build_mode_select - Build a mode select command
7302 * @ipr_cmd: ipr command struct
7303 * @res_handle: resource handle to send command to
7304 * @parm: Byte 2 of Mode Sense command
7305 * @dma_addr: DMA buffer address
7306 * @xfer_len: data transfer length
7307 *
7308 * Return value:
7309 * none
7310 **/
ipr_build_mode_select(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)7311 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7312 __be32 res_handle, u8 parm,
7313 dma_addr_t dma_addr, u8 xfer_len)
7314 {
7315 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7316
7317 ioarcb->res_handle = res_handle;
7318 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7319 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7320 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7321 ioarcb->cmd_pkt.cdb[1] = parm;
7322 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7323
7324 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7325 }
7326
7327 /**
7328 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7329 * @ipr_cmd: ipr command struct
7330 *
7331 * This function sets up the SCSI bus attributes and sends
7332 * a Mode Select for Page 28 to activate them.
7333 *
7334 * Return value:
7335 * IPR_RC_JOB_RETURN
7336 **/
ipr_ioafp_mode_select_page28(struct ipr_cmnd * ipr_cmd)7337 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7338 {
7339 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7340 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7341 int length;
7342
7343 ENTER;
7344 ipr_scsi_bus_speed_limit(ioa_cfg);
7345 ipr_check_term_power(ioa_cfg, mode_pages);
7346 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7347 length = mode_pages->hdr.length + 1;
7348 mode_pages->hdr.length = 0;
7349
7350 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7351 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7352 length);
7353
7354 ipr_cmd->job_step = ipr_set_supported_devs;
7355 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7356 struct ipr_resource_entry, queue);
7357 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7358
7359 LEAVE;
7360 return IPR_RC_JOB_RETURN;
7361 }
7362
7363 /**
7364 * ipr_build_mode_sense - Builds a mode sense command
7365 * @ipr_cmd: ipr command struct
7366 * @res: resource entry struct
7367 * @parm: Byte 2 of mode sense command
7368 * @dma_addr: DMA address of mode sense buffer
7369 * @xfer_len: Size of DMA buffer
7370 *
7371 * Return value:
7372 * none
7373 **/
ipr_build_mode_sense(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)7374 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7375 __be32 res_handle,
7376 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7377 {
7378 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7379
7380 ioarcb->res_handle = res_handle;
7381 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7382 ioarcb->cmd_pkt.cdb[2] = parm;
7383 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7384 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7385
7386 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7387 }
7388
7389 /**
7390 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7391 * @ipr_cmd: ipr command struct
7392 *
7393 * This function handles the failure of an IOA bringup command.
7394 *
7395 * Return value:
7396 * IPR_RC_JOB_RETURN
7397 **/
ipr_reset_cmd_failed(struct ipr_cmnd * ipr_cmd)7398 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7399 {
7400 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7401 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7402
7403 dev_err(&ioa_cfg->pdev->dev,
7404 "0x%02X failed with IOASC: 0x%08X\n",
7405 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7406
7407 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7408 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7409 return IPR_RC_JOB_RETURN;
7410 }
7411
7412 /**
7413 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7414 * @ipr_cmd: ipr command struct
7415 *
7416 * This function handles the failure of a Mode Sense to the IOAFP.
7417 * Some adapters do not handle all mode pages.
7418 *
7419 * Return value:
7420 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7421 **/
ipr_reset_mode_sense_failed(struct ipr_cmnd * ipr_cmd)7422 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7423 {
7424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7425 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7426
7427 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7428 ipr_cmd->job_step = ipr_set_supported_devs;
7429 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7430 struct ipr_resource_entry, queue);
7431 return IPR_RC_JOB_CONTINUE;
7432 }
7433
7434 return ipr_reset_cmd_failed(ipr_cmd);
7435 }
7436
7437 /**
7438 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7439 * @ipr_cmd: ipr command struct
7440 *
7441 * This function send a Page 28 mode sense to the IOA to
7442 * retrieve SCSI bus attributes.
7443 *
7444 * Return value:
7445 * IPR_RC_JOB_RETURN
7446 **/
ipr_ioafp_mode_sense_page28(struct ipr_cmnd * ipr_cmd)7447 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7448 {
7449 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7450
7451 ENTER;
7452 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7453 0x28, ioa_cfg->vpd_cbs_dma +
7454 offsetof(struct ipr_misc_cbs, mode_pages),
7455 sizeof(struct ipr_mode_pages));
7456
7457 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7458 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7459
7460 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7461
7462 LEAVE;
7463 return IPR_RC_JOB_RETURN;
7464 }
7465
7466 /**
7467 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7468 * @ipr_cmd: ipr command struct
7469 *
7470 * This function enables dual IOA RAID support if possible.
7471 *
7472 * Return value:
7473 * IPR_RC_JOB_RETURN
7474 **/
ipr_ioafp_mode_select_page24(struct ipr_cmnd * ipr_cmd)7475 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7476 {
7477 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7478 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7479 struct ipr_mode_page24 *mode_page;
7480 int length;
7481
7482 ENTER;
7483 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7484 sizeof(struct ipr_mode_page24));
7485
7486 if (mode_page)
7487 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7488
7489 length = mode_pages->hdr.length + 1;
7490 mode_pages->hdr.length = 0;
7491
7492 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7493 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7494 length);
7495
7496 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7497 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7498
7499 LEAVE;
7500 return IPR_RC_JOB_RETURN;
7501 }
7502
7503 /**
7504 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7505 * @ipr_cmd: ipr command struct
7506 *
7507 * This function handles the failure of a Mode Sense to the IOAFP.
7508 * Some adapters do not handle all mode pages.
7509 *
7510 * Return value:
7511 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7512 **/
ipr_reset_mode_sense_page24_failed(struct ipr_cmnd * ipr_cmd)7513 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7514 {
7515 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7516
7517 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7518 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7519 return IPR_RC_JOB_CONTINUE;
7520 }
7521
7522 return ipr_reset_cmd_failed(ipr_cmd);
7523 }
7524
7525 /**
7526 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7527 * @ipr_cmd: ipr command struct
7528 *
7529 * This function send a mode sense to the IOA to retrieve
7530 * the IOA Advanced Function Control mode page.
7531 *
7532 * Return value:
7533 * IPR_RC_JOB_RETURN
7534 **/
ipr_ioafp_mode_sense_page24(struct ipr_cmnd * ipr_cmd)7535 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7536 {
7537 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7538
7539 ENTER;
7540 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7541 0x24, ioa_cfg->vpd_cbs_dma +
7542 offsetof(struct ipr_misc_cbs, mode_pages),
7543 sizeof(struct ipr_mode_pages));
7544
7545 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7546 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7547
7548 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7549
7550 LEAVE;
7551 return IPR_RC_JOB_RETURN;
7552 }
7553
7554 /**
7555 * ipr_init_res_table - Initialize the resource table
7556 * @ipr_cmd: ipr command struct
7557 *
7558 * This function looks through the existing resource table, comparing
7559 * it with the config table. This function will take care of old/new
7560 * devices and schedule adding/removing them from the mid-layer
7561 * as appropriate.
7562 *
7563 * Return value:
7564 * IPR_RC_JOB_CONTINUE
7565 **/
ipr_init_res_table(struct ipr_cmnd * ipr_cmd)7566 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7567 {
7568 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7569 struct ipr_resource_entry *res, *temp;
7570 struct ipr_config_table_entry_wrapper cfgtew;
7571 int entries, found, flag, i;
7572 LIST_HEAD(old_res);
7573
7574 ENTER;
7575 if (ioa_cfg->sis64)
7576 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7577 else
7578 flag = ioa_cfg->u.cfg_table->hdr.flags;
7579
7580 if (flag & IPR_UCODE_DOWNLOAD_REQ)
7581 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7582
7583 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7584 list_move_tail(&res->queue, &old_res);
7585
7586 if (ioa_cfg->sis64)
7587 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7588 else
7589 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7590
7591 for (i = 0; i < entries; i++) {
7592 if (ioa_cfg->sis64)
7593 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7594 else
7595 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7596 found = 0;
7597
7598 list_for_each_entry_safe(res, temp, &old_res, queue) {
7599 if (ipr_is_same_device(res, &cfgtew)) {
7600 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7601 found = 1;
7602 break;
7603 }
7604 }
7605
7606 if (!found) {
7607 if (list_empty(&ioa_cfg->free_res_q)) {
7608 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7609 break;
7610 }
7611
7612 found = 1;
7613 res = list_entry(ioa_cfg->free_res_q.next,
7614 struct ipr_resource_entry, queue);
7615 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7616 ipr_init_res_entry(res, &cfgtew);
7617 res->add_to_ml = 1;
7618 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7619 res->sdev->allow_restart = 1;
7620
7621 if (found)
7622 ipr_update_res_entry(res, &cfgtew);
7623 }
7624
7625 list_for_each_entry_safe(res, temp, &old_res, queue) {
7626 if (res->sdev) {
7627 res->del_from_ml = 1;
7628 res->res_handle = IPR_INVALID_RES_HANDLE;
7629 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7630 }
7631 }
7632
7633 list_for_each_entry_safe(res, temp, &old_res, queue) {
7634 ipr_clear_res_target(res);
7635 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7636 }
7637
7638 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7639 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7640 else
7641 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7642
7643 LEAVE;
7644 return IPR_RC_JOB_CONTINUE;
7645 }
7646
7647 /**
7648 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7649 * @ipr_cmd: ipr command struct
7650 *
7651 * This function sends a Query IOA Configuration command
7652 * to the adapter to retrieve the IOA configuration table.
7653 *
7654 * Return value:
7655 * IPR_RC_JOB_RETURN
7656 **/
ipr_ioafp_query_ioa_cfg(struct ipr_cmnd * ipr_cmd)7657 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7658 {
7659 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7660 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7661 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7662 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7663
7664 ENTER;
7665 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7666 ioa_cfg->dual_raid = 1;
7667 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7668 ucode_vpd->major_release, ucode_vpd->card_type,
7669 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7670 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7671 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7672
7673 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7674 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7675 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7676 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7677
7678 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7679 IPR_IOADL_FLAGS_READ_LAST);
7680
7681 ipr_cmd->job_step = ipr_init_res_table;
7682
7683 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7684
7685 LEAVE;
7686 return IPR_RC_JOB_RETURN;
7687 }
7688
ipr_ioa_service_action_failed(struct ipr_cmnd * ipr_cmd)7689 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7690 {
7691 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7692
7693 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7694 return IPR_RC_JOB_CONTINUE;
7695
7696 return ipr_reset_cmd_failed(ipr_cmd);
7697 }
7698
ipr_build_ioa_service_action(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 sa_code)7699 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7700 __be32 res_handle, u8 sa_code)
7701 {
7702 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7703
7704 ioarcb->res_handle = res_handle;
7705 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7706 ioarcb->cmd_pkt.cdb[1] = sa_code;
7707 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7708 }
7709
7710 /**
7711 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7712 * action
7713 *
7714 * Return value:
7715 * none
7716 **/
ipr_ioafp_set_caching_parameters(struct ipr_cmnd * ipr_cmd)7717 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7718 {
7719 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7720 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7721 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7722
7723 ENTER;
7724
7725 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7726
7727 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7728 ipr_build_ioa_service_action(ipr_cmd,
7729 cpu_to_be32(IPR_IOA_RES_HANDLE),
7730 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7731
7732 ioarcb->cmd_pkt.cdb[2] = 0x40;
7733
7734 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7735 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7736 IPR_SET_SUP_DEVICE_TIMEOUT);
7737
7738 LEAVE;
7739 return IPR_RC_JOB_RETURN;
7740 }
7741
7742 LEAVE;
7743 return IPR_RC_JOB_CONTINUE;
7744 }
7745
7746 /**
7747 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7748 * @ipr_cmd: ipr command struct
7749 *
7750 * This utility function sends an inquiry to the adapter.
7751 *
7752 * Return value:
7753 * none
7754 **/
ipr_ioafp_inquiry(struct ipr_cmnd * ipr_cmd,u8 flags,u8 page,dma_addr_t dma_addr,u8 xfer_len)7755 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7756 dma_addr_t dma_addr, u8 xfer_len)
7757 {
7758 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7759
7760 ENTER;
7761 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7762 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7763
7764 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7765 ioarcb->cmd_pkt.cdb[1] = flags;
7766 ioarcb->cmd_pkt.cdb[2] = page;
7767 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7768
7769 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7770
7771 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7772 LEAVE;
7773 }
7774
7775 /**
7776 * ipr_inquiry_page_supported - Is the given inquiry page supported
7777 * @page0: inquiry page 0 buffer
7778 * @page: page code.
7779 *
7780 * This function determines if the specified inquiry page is supported.
7781 *
7782 * Return value:
7783 * 1 if page is supported / 0 if not
7784 **/
ipr_inquiry_page_supported(struct ipr_inquiry_page0 * page0,u8 page)7785 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7786 {
7787 int i;
7788
7789 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7790 if (page0->page[i] == page)
7791 return 1;
7792
7793 return 0;
7794 }
7795
7796 /**
7797 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7798 * @ipr_cmd: ipr command struct
7799 *
7800 * This function sends a Page 0xC4 inquiry to the adapter
7801 * to retrieve software VPD information.
7802 *
7803 * Return value:
7804 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7805 **/
ipr_ioafp_pageC4_inquiry(struct ipr_cmnd * ipr_cmd)7806 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7807 {
7808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7809 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7810 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7811
7812 ENTER;
7813 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7814 memset(pageC4, 0, sizeof(*pageC4));
7815
7816 if (ipr_inquiry_page_supported(page0, 0xC4)) {
7817 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7818 (ioa_cfg->vpd_cbs_dma
7819 + offsetof(struct ipr_misc_cbs,
7820 pageC4_data)),
7821 sizeof(struct ipr_inquiry_pageC4));
7822 return IPR_RC_JOB_RETURN;
7823 }
7824
7825 LEAVE;
7826 return IPR_RC_JOB_CONTINUE;
7827 }
7828
7829 /**
7830 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7831 * @ipr_cmd: ipr command struct
7832 *
7833 * This function sends a Page 0xD0 inquiry to the adapter
7834 * to retrieve adapter capabilities.
7835 *
7836 * Return value:
7837 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7838 **/
ipr_ioafp_cap_inquiry(struct ipr_cmnd * ipr_cmd)7839 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7840 {
7841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7842 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7843 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7844
7845 ENTER;
7846 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7847 memset(cap, 0, sizeof(*cap));
7848
7849 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7850 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7851 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7852 sizeof(struct ipr_inquiry_cap));
7853 return IPR_RC_JOB_RETURN;
7854 }
7855
7856 LEAVE;
7857 return IPR_RC_JOB_CONTINUE;
7858 }
7859
7860 /**
7861 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7862 * @ipr_cmd: ipr command struct
7863 *
7864 * This function sends a Page 3 inquiry to the adapter
7865 * to retrieve software VPD information.
7866 *
7867 * Return value:
7868 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7869 **/
ipr_ioafp_page3_inquiry(struct ipr_cmnd * ipr_cmd)7870 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7871 {
7872 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7873
7874 ENTER;
7875
7876 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7877
7878 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7879 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7880 sizeof(struct ipr_inquiry_page3));
7881
7882 LEAVE;
7883 return IPR_RC_JOB_RETURN;
7884 }
7885
7886 /**
7887 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7888 * @ipr_cmd: ipr command struct
7889 *
7890 * This function sends a Page 0 inquiry to the adapter
7891 * to retrieve supported inquiry pages.
7892 *
7893 * Return value:
7894 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7895 **/
ipr_ioafp_page0_inquiry(struct ipr_cmnd * ipr_cmd)7896 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7897 {
7898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7899 char type[5];
7900
7901 ENTER;
7902
7903 /* Grab the type out of the VPD and store it away */
7904 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7905 type[4] = '\0';
7906 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7907
7908 if (ipr_invalid_adapter(ioa_cfg)) {
7909 dev_err(&ioa_cfg->pdev->dev,
7910 "Adapter not supported in this hardware configuration.\n");
7911
7912 if (!ipr_testmode) {
7913 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7914 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7915 list_add_tail(&ipr_cmd->queue,
7916 &ioa_cfg->hrrq->hrrq_free_q);
7917 return IPR_RC_JOB_RETURN;
7918 }
7919 }
7920
7921 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7922
7923 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7924 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7925 sizeof(struct ipr_inquiry_page0));
7926
7927 LEAVE;
7928 return IPR_RC_JOB_RETURN;
7929 }
7930
7931 /**
7932 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7933 * @ipr_cmd: ipr command struct
7934 *
7935 * This function sends a standard inquiry to the adapter.
7936 *
7937 * Return value:
7938 * IPR_RC_JOB_RETURN
7939 **/
ipr_ioafp_std_inquiry(struct ipr_cmnd * ipr_cmd)7940 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7941 {
7942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7943
7944 ENTER;
7945 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7946
7947 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7948 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7949 sizeof(struct ipr_ioa_vpd));
7950
7951 LEAVE;
7952 return IPR_RC_JOB_RETURN;
7953 }
7954
7955 /**
7956 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7957 * @ipr_cmd: ipr command struct
7958 *
7959 * This function send an Identify Host Request Response Queue
7960 * command to establish the HRRQ with the adapter.
7961 *
7962 * Return value:
7963 * IPR_RC_JOB_RETURN
7964 **/
ipr_ioafp_identify_hrrq(struct ipr_cmnd * ipr_cmd)7965 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7966 {
7967 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7968 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7969 struct ipr_hrr_queue *hrrq;
7970
7971 ENTER;
7972 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7973 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7974
7975 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7976 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7977
7978 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7979 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7980
7981 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7982 if (ioa_cfg->sis64)
7983 ioarcb->cmd_pkt.cdb[1] = 0x1;
7984
7985 if (ioa_cfg->nvectors == 1)
7986 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7987 else
7988 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7989
7990 ioarcb->cmd_pkt.cdb[2] =
7991 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7992 ioarcb->cmd_pkt.cdb[3] =
7993 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7994 ioarcb->cmd_pkt.cdb[4] =
7995 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7996 ioarcb->cmd_pkt.cdb[5] =
7997 ((u64) hrrq->host_rrq_dma) & 0xff;
7998 ioarcb->cmd_pkt.cdb[7] =
7999 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8000 ioarcb->cmd_pkt.cdb[8] =
8001 (sizeof(u32) * hrrq->size) & 0xff;
8002
8003 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8004 ioarcb->cmd_pkt.cdb[9] =
8005 ioa_cfg->identify_hrrq_index;
8006
8007 if (ioa_cfg->sis64) {
8008 ioarcb->cmd_pkt.cdb[10] =
8009 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8010 ioarcb->cmd_pkt.cdb[11] =
8011 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8012 ioarcb->cmd_pkt.cdb[12] =
8013 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8014 ioarcb->cmd_pkt.cdb[13] =
8015 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8016 }
8017
8018 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
8019 ioarcb->cmd_pkt.cdb[14] =
8020 ioa_cfg->identify_hrrq_index;
8021
8022 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8023 IPR_INTERNAL_TIMEOUT);
8024
8025 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8026 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8027
8028 LEAVE;
8029 return IPR_RC_JOB_RETURN;
8030 }
8031
8032 LEAVE;
8033 return IPR_RC_JOB_CONTINUE;
8034 }
8035
8036 /**
8037 * ipr_reset_timer_done - Adapter reset timer function
8038 * @ipr_cmd: ipr command struct
8039 *
8040 * Description: This function is used in adapter reset processing
8041 * for timing events. If the reset_cmd pointer in the IOA
8042 * config struct is not this adapter's we are doing nested
8043 * resets and fail_all_ops will take care of freeing the
8044 * command block.
8045 *
8046 * Return value:
8047 * none
8048 **/
ipr_reset_timer_done(struct ipr_cmnd * ipr_cmd)8049 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
8050 {
8051 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8052 unsigned long lock_flags = 0;
8053
8054 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8055
8056 if (ioa_cfg->reset_cmd == ipr_cmd) {
8057 list_del(&ipr_cmd->queue);
8058 ipr_cmd->done(ipr_cmd);
8059 }
8060
8061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8062 }
8063
8064 /**
8065 * ipr_reset_start_timer - Start a timer for adapter reset job
8066 * @ipr_cmd: ipr command struct
8067 * @timeout: timeout value
8068 *
8069 * Description: This function is used in adapter reset processing
8070 * for timing events. If the reset_cmd pointer in the IOA
8071 * config struct is not this adapter's we are doing nested
8072 * resets and fail_all_ops will take care of freeing the
8073 * command block.
8074 *
8075 * Return value:
8076 * none
8077 **/
ipr_reset_start_timer(struct ipr_cmnd * ipr_cmd,unsigned long timeout)8078 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8079 unsigned long timeout)
8080 {
8081
8082 ENTER;
8083 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8084 ipr_cmd->done = ipr_reset_ioa_job;
8085
8086 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8087 ipr_cmd->timer.expires = jiffies + timeout;
8088 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
8089 add_timer(&ipr_cmd->timer);
8090 }
8091
8092 /**
8093 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8094 * @ioa_cfg: ioa cfg struct
8095 *
8096 * Return value:
8097 * nothing
8098 **/
ipr_init_ioa_mem(struct ipr_ioa_cfg * ioa_cfg)8099 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8100 {
8101 struct ipr_hrr_queue *hrrq;
8102
8103 for_each_hrrq(hrrq, ioa_cfg) {
8104 spin_lock(&hrrq->_lock);
8105 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8106
8107 /* Initialize Host RRQ pointers */
8108 hrrq->hrrq_start = hrrq->host_rrq;
8109 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8110 hrrq->hrrq_curr = hrrq->hrrq_start;
8111 hrrq->toggle_bit = 1;
8112 spin_unlock(&hrrq->_lock);
8113 }
8114 wmb();
8115
8116 ioa_cfg->identify_hrrq_index = 0;
8117 if (ioa_cfg->hrrq_num == 1)
8118 atomic_set(&ioa_cfg->hrrq_index, 0);
8119 else
8120 atomic_set(&ioa_cfg->hrrq_index, 1);
8121
8122 /* Zero out config table */
8123 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8124 }
8125
8126 /**
8127 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8128 * @ipr_cmd: ipr command struct
8129 *
8130 * Return value:
8131 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8132 **/
ipr_reset_next_stage(struct ipr_cmnd * ipr_cmd)8133 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8134 {
8135 unsigned long stage, stage_time;
8136 u32 feedback;
8137 volatile u32 int_reg;
8138 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8139 u64 maskval = 0;
8140
8141 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8142 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8143 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8144
8145 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8146
8147 /* sanity check the stage_time value */
8148 if (stage_time == 0)
8149 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8150 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8151 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8152 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8153 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8154
8155 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8156 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8157 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8158 stage_time = ioa_cfg->transop_timeout;
8159 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8160 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8161 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8162 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8163 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8164 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8165 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8166 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8167 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8168 return IPR_RC_JOB_CONTINUE;
8169 }
8170 }
8171
8172 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8173 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8174 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8175 ipr_cmd->done = ipr_reset_ioa_job;
8176 add_timer(&ipr_cmd->timer);
8177
8178 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8179
8180 return IPR_RC_JOB_RETURN;
8181 }
8182
8183 /**
8184 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8185 * @ipr_cmd: ipr command struct
8186 *
8187 * This function reinitializes some control blocks and
8188 * enables destructive diagnostics on the adapter.
8189 *
8190 * Return value:
8191 * IPR_RC_JOB_RETURN
8192 **/
ipr_reset_enable_ioa(struct ipr_cmnd * ipr_cmd)8193 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8194 {
8195 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8196 volatile u32 int_reg;
8197 volatile u64 maskval;
8198 int i;
8199
8200 ENTER;
8201 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8202 ipr_init_ioa_mem(ioa_cfg);
8203
8204 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8205 spin_lock(&ioa_cfg->hrrq[i]._lock);
8206 ioa_cfg->hrrq[i].allow_interrupts = 1;
8207 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8208 }
8209 wmb();
8210 if (ioa_cfg->sis64) {
8211 /* Set the adapter to the correct endian mode. */
8212 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8213 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8214 }
8215
8216 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8217
8218 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8219 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8220 ioa_cfg->regs.clr_interrupt_mask_reg32);
8221 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8222 return IPR_RC_JOB_CONTINUE;
8223 }
8224
8225 /* Enable destructive diagnostics on IOA */
8226 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8227
8228 if (ioa_cfg->sis64) {
8229 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8230 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8231 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8232 } else
8233 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8234
8235 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8236
8237 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8238
8239 if (ioa_cfg->sis64) {
8240 ipr_cmd->job_step = ipr_reset_next_stage;
8241 return IPR_RC_JOB_CONTINUE;
8242 }
8243
8244 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8245 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8246 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8247 ipr_cmd->done = ipr_reset_ioa_job;
8248 add_timer(&ipr_cmd->timer);
8249 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8250
8251 LEAVE;
8252 return IPR_RC_JOB_RETURN;
8253 }
8254
8255 /**
8256 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8257 * @ipr_cmd: ipr command struct
8258 *
8259 * This function is invoked when an adapter dump has run out
8260 * of processing time.
8261 *
8262 * Return value:
8263 * IPR_RC_JOB_CONTINUE
8264 **/
ipr_reset_wait_for_dump(struct ipr_cmnd * ipr_cmd)8265 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8266 {
8267 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8268
8269 if (ioa_cfg->sdt_state == GET_DUMP)
8270 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8271 else if (ioa_cfg->sdt_state == READ_DUMP)
8272 ioa_cfg->sdt_state = ABORT_DUMP;
8273
8274 ioa_cfg->dump_timeout = 1;
8275 ipr_cmd->job_step = ipr_reset_alert;
8276
8277 return IPR_RC_JOB_CONTINUE;
8278 }
8279
8280 /**
8281 * ipr_unit_check_no_data - Log a unit check/no data error log
8282 * @ioa_cfg: ioa config struct
8283 *
8284 * Logs an error indicating the adapter unit checked, but for some
8285 * reason, we were unable to fetch the unit check buffer.
8286 *
8287 * Return value:
8288 * nothing
8289 **/
ipr_unit_check_no_data(struct ipr_ioa_cfg * ioa_cfg)8290 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8291 {
8292 ioa_cfg->errors_logged++;
8293 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8294 }
8295
8296 /**
8297 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8298 * @ioa_cfg: ioa config struct
8299 *
8300 * Fetches the unit check buffer from the adapter by clocking the data
8301 * through the mailbox register.
8302 *
8303 * Return value:
8304 * nothing
8305 **/
ipr_get_unit_check_buffer(struct ipr_ioa_cfg * ioa_cfg)8306 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8307 {
8308 unsigned long mailbox;
8309 struct ipr_hostrcb *hostrcb;
8310 struct ipr_uc_sdt sdt;
8311 int rc, length;
8312 u32 ioasc;
8313
8314 mailbox = readl(ioa_cfg->ioa_mailbox);
8315
8316 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8317 ipr_unit_check_no_data(ioa_cfg);
8318 return;
8319 }
8320
8321 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8322 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8323 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8324
8325 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8326 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8327 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8328 ipr_unit_check_no_data(ioa_cfg);
8329 return;
8330 }
8331
8332 /* Find length of the first sdt entry (UC buffer) */
8333 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8334 length = be32_to_cpu(sdt.entry[0].end_token);
8335 else
8336 length = (be32_to_cpu(sdt.entry[0].end_token) -
8337 be32_to_cpu(sdt.entry[0].start_token)) &
8338 IPR_FMT2_MBX_ADDR_MASK;
8339
8340 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8341 struct ipr_hostrcb, queue);
8342 list_del(&hostrcb->queue);
8343 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8344
8345 rc = ipr_get_ldump_data_section(ioa_cfg,
8346 be32_to_cpu(sdt.entry[0].start_token),
8347 (__be32 *)&hostrcb->hcam,
8348 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8349
8350 if (!rc) {
8351 ipr_handle_log_data(ioa_cfg, hostrcb);
8352 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8353 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8354 ioa_cfg->sdt_state == GET_DUMP)
8355 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8356 } else
8357 ipr_unit_check_no_data(ioa_cfg);
8358
8359 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8360 }
8361
8362 /**
8363 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8364 * @ipr_cmd: ipr command struct
8365 *
8366 * Description: This function will call to get the unit check buffer.
8367 *
8368 * Return value:
8369 * IPR_RC_JOB_RETURN
8370 **/
ipr_reset_get_unit_check_job(struct ipr_cmnd * ipr_cmd)8371 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8372 {
8373 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8374
8375 ENTER;
8376 ioa_cfg->ioa_unit_checked = 0;
8377 ipr_get_unit_check_buffer(ioa_cfg);
8378 ipr_cmd->job_step = ipr_reset_alert;
8379 ipr_reset_start_timer(ipr_cmd, 0);
8380
8381 LEAVE;
8382 return IPR_RC_JOB_RETURN;
8383 }
8384
ipr_dump_mailbox_wait(struct ipr_cmnd * ipr_cmd)8385 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8386 {
8387 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8388
8389 ENTER;
8390
8391 if (ioa_cfg->sdt_state != GET_DUMP)
8392 return IPR_RC_JOB_RETURN;
8393
8394 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8395 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8396 IPR_PCII_MAILBOX_STABLE)) {
8397
8398 if (!ipr_cmd->u.time_left)
8399 dev_err(&ioa_cfg->pdev->dev,
8400 "Timed out waiting for Mailbox register.\n");
8401
8402 ioa_cfg->sdt_state = READ_DUMP;
8403 ioa_cfg->dump_timeout = 0;
8404 if (ioa_cfg->sis64)
8405 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8406 else
8407 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8408 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8409 schedule_work(&ioa_cfg->work_q);
8410
8411 } else {
8412 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8413 ipr_reset_start_timer(ipr_cmd,
8414 IPR_CHECK_FOR_RESET_TIMEOUT);
8415 }
8416
8417 LEAVE;
8418 return IPR_RC_JOB_RETURN;
8419 }
8420
8421 /**
8422 * ipr_reset_restore_cfg_space - Restore PCI config space.
8423 * @ipr_cmd: ipr command struct
8424 *
8425 * Description: This function restores the saved PCI config space of
8426 * the adapter, fails all outstanding ops back to the callers, and
8427 * fetches the dump/unit check if applicable to this reset.
8428 *
8429 * Return value:
8430 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8431 **/
ipr_reset_restore_cfg_space(struct ipr_cmnd * ipr_cmd)8432 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8433 {
8434 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8435 u32 int_reg;
8436
8437 ENTER;
8438 ioa_cfg->pdev->state_saved = true;
8439 pci_restore_state(ioa_cfg->pdev);
8440
8441 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8442 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8443 return IPR_RC_JOB_CONTINUE;
8444 }
8445
8446 ipr_fail_all_ops(ioa_cfg);
8447
8448 if (ioa_cfg->sis64) {
8449 /* Set the adapter to the correct endian mode. */
8450 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8451 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8452 }
8453
8454 if (ioa_cfg->ioa_unit_checked) {
8455 if (ioa_cfg->sis64) {
8456 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8457 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8458 return IPR_RC_JOB_RETURN;
8459 } else {
8460 ioa_cfg->ioa_unit_checked = 0;
8461 ipr_get_unit_check_buffer(ioa_cfg);
8462 ipr_cmd->job_step = ipr_reset_alert;
8463 ipr_reset_start_timer(ipr_cmd, 0);
8464 return IPR_RC_JOB_RETURN;
8465 }
8466 }
8467
8468 if (ioa_cfg->in_ioa_bringdown) {
8469 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8470 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8471 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8472 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
8473 } else {
8474 ipr_cmd->job_step = ipr_reset_enable_ioa;
8475 }
8476
8477 LEAVE;
8478 return IPR_RC_JOB_CONTINUE;
8479 }
8480
8481 /**
8482 * ipr_reset_bist_done - BIST has completed on the adapter.
8483 * @ipr_cmd: ipr command struct
8484 *
8485 * Description: Unblock config space and resume the reset process.
8486 *
8487 * Return value:
8488 * IPR_RC_JOB_CONTINUE
8489 **/
ipr_reset_bist_done(struct ipr_cmnd * ipr_cmd)8490 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8491 {
8492 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8493
8494 ENTER;
8495 if (ioa_cfg->cfg_locked)
8496 pci_cfg_access_unlock(ioa_cfg->pdev);
8497 ioa_cfg->cfg_locked = 0;
8498 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8499 LEAVE;
8500 return IPR_RC_JOB_CONTINUE;
8501 }
8502
8503 /**
8504 * ipr_reset_start_bist - Run BIST on the adapter.
8505 * @ipr_cmd: ipr command struct
8506 *
8507 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8508 *
8509 * Return value:
8510 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8511 **/
ipr_reset_start_bist(struct ipr_cmnd * ipr_cmd)8512 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8513 {
8514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8515 int rc = PCIBIOS_SUCCESSFUL;
8516
8517 ENTER;
8518 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8519 writel(IPR_UPROCI_SIS64_START_BIST,
8520 ioa_cfg->regs.set_uproc_interrupt_reg32);
8521 else
8522 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8523
8524 if (rc == PCIBIOS_SUCCESSFUL) {
8525 ipr_cmd->job_step = ipr_reset_bist_done;
8526 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8527 rc = IPR_RC_JOB_RETURN;
8528 } else {
8529 if (ioa_cfg->cfg_locked)
8530 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8531 ioa_cfg->cfg_locked = 0;
8532 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8533 rc = IPR_RC_JOB_CONTINUE;
8534 }
8535
8536 LEAVE;
8537 return rc;
8538 }
8539
8540 /**
8541 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8542 * @ipr_cmd: ipr command struct
8543 *
8544 * Description: This clears PCI reset to the adapter and delays two seconds.
8545 *
8546 * Return value:
8547 * IPR_RC_JOB_RETURN
8548 **/
ipr_reset_slot_reset_done(struct ipr_cmnd * ipr_cmd)8549 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8550 {
8551 ENTER;
8552 ipr_cmd->job_step = ipr_reset_bist_done;
8553 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8554 LEAVE;
8555 return IPR_RC_JOB_RETURN;
8556 }
8557
8558 /**
8559 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8560 * @work: work struct
8561 *
8562 * Description: This pulses warm reset to a slot.
8563 *
8564 **/
ipr_reset_reset_work(struct work_struct * work)8565 static void ipr_reset_reset_work(struct work_struct *work)
8566 {
8567 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8568 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8569 struct pci_dev *pdev = ioa_cfg->pdev;
8570 unsigned long lock_flags = 0;
8571
8572 ENTER;
8573 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8574 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8575 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8576
8577 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8578 if (ioa_cfg->reset_cmd == ipr_cmd)
8579 ipr_reset_ioa_job(ipr_cmd);
8580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8581 LEAVE;
8582 }
8583
8584 /**
8585 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8586 * @ipr_cmd: ipr command struct
8587 *
8588 * Description: This asserts PCI reset to the adapter.
8589 *
8590 * Return value:
8591 * IPR_RC_JOB_RETURN
8592 **/
ipr_reset_slot_reset(struct ipr_cmnd * ipr_cmd)8593 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8594 {
8595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8596
8597 ENTER;
8598 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8599 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8600 ipr_cmd->job_step = ipr_reset_slot_reset_done;
8601 LEAVE;
8602 return IPR_RC_JOB_RETURN;
8603 }
8604
8605 /**
8606 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8607 * @ipr_cmd: ipr command struct
8608 *
8609 * Description: This attempts to block config access to the IOA.
8610 *
8611 * Return value:
8612 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8613 **/
ipr_reset_block_config_access_wait(struct ipr_cmnd * ipr_cmd)8614 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8615 {
8616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8617 int rc = IPR_RC_JOB_CONTINUE;
8618
8619 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8620 ioa_cfg->cfg_locked = 1;
8621 ipr_cmd->job_step = ioa_cfg->reset;
8622 } else {
8623 if (ipr_cmd->u.time_left) {
8624 rc = IPR_RC_JOB_RETURN;
8625 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8626 ipr_reset_start_timer(ipr_cmd,
8627 IPR_CHECK_FOR_RESET_TIMEOUT);
8628 } else {
8629 ipr_cmd->job_step = ioa_cfg->reset;
8630 dev_err(&ioa_cfg->pdev->dev,
8631 "Timed out waiting to lock config access. Resetting anyway.\n");
8632 }
8633 }
8634
8635 return rc;
8636 }
8637
8638 /**
8639 * ipr_reset_block_config_access - Block config access to the IOA
8640 * @ipr_cmd: ipr command struct
8641 *
8642 * Description: This attempts to block config access to the IOA
8643 *
8644 * Return value:
8645 * IPR_RC_JOB_CONTINUE
8646 **/
ipr_reset_block_config_access(struct ipr_cmnd * ipr_cmd)8647 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8648 {
8649 ipr_cmd->ioa_cfg->cfg_locked = 0;
8650 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8651 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8652 return IPR_RC_JOB_CONTINUE;
8653 }
8654
8655 /**
8656 * ipr_reset_allowed - Query whether or not IOA can be reset
8657 * @ioa_cfg: ioa config struct
8658 *
8659 * Return value:
8660 * 0 if reset not allowed / non-zero if reset is allowed
8661 **/
ipr_reset_allowed(struct ipr_ioa_cfg * ioa_cfg)8662 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8663 {
8664 volatile u32 temp_reg;
8665
8666 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8667 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8668 }
8669
8670 /**
8671 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8672 * @ipr_cmd: ipr command struct
8673 *
8674 * Description: This function waits for adapter permission to run BIST,
8675 * then runs BIST. If the adapter does not give permission after a
8676 * reasonable time, we will reset the adapter anyway. The impact of
8677 * resetting the adapter without warning the adapter is the risk of
8678 * losing the persistent error log on the adapter. If the adapter is
8679 * reset while it is writing to the flash on the adapter, the flash
8680 * segment will have bad ECC and be zeroed.
8681 *
8682 * Return value:
8683 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8684 **/
ipr_reset_wait_to_start_bist(struct ipr_cmnd * ipr_cmd)8685 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8686 {
8687 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8688 int rc = IPR_RC_JOB_RETURN;
8689
8690 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8691 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8692 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8693 } else {
8694 ipr_cmd->job_step = ipr_reset_block_config_access;
8695 rc = IPR_RC_JOB_CONTINUE;
8696 }
8697
8698 return rc;
8699 }
8700
8701 /**
8702 * ipr_reset_alert - Alert the adapter of a pending reset
8703 * @ipr_cmd: ipr command struct
8704 *
8705 * Description: This function alerts the adapter that it will be reset.
8706 * If memory space is not currently enabled, proceed directly
8707 * to running BIST on the adapter. The timer must always be started
8708 * so we guarantee we do not run BIST from ipr_isr.
8709 *
8710 * Return value:
8711 * IPR_RC_JOB_RETURN
8712 **/
ipr_reset_alert(struct ipr_cmnd * ipr_cmd)8713 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8714 {
8715 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8716 u16 cmd_reg;
8717 int rc;
8718
8719 ENTER;
8720 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8721
8722 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8723 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8724 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8725 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8726 } else {
8727 ipr_cmd->job_step = ipr_reset_block_config_access;
8728 }
8729
8730 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8731 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8732
8733 LEAVE;
8734 return IPR_RC_JOB_RETURN;
8735 }
8736
8737 /**
8738 * ipr_reset_quiesce_done - Complete IOA disconnect
8739 * @ipr_cmd: ipr command struct
8740 *
8741 * Description: Freeze the adapter to complete quiesce processing
8742 *
8743 * Return value:
8744 * IPR_RC_JOB_CONTINUE
8745 **/
ipr_reset_quiesce_done(struct ipr_cmnd * ipr_cmd)8746 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8747 {
8748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8749
8750 ENTER;
8751 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8752 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8753 LEAVE;
8754 return IPR_RC_JOB_CONTINUE;
8755 }
8756
8757 /**
8758 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8759 * @ipr_cmd: ipr command struct
8760 *
8761 * Description: Ensure nothing is outstanding to the IOA and
8762 * proceed with IOA disconnect. Otherwise reset the IOA.
8763 *
8764 * Return value:
8765 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8766 **/
ipr_reset_cancel_hcam_done(struct ipr_cmnd * ipr_cmd)8767 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8768 {
8769 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8770 struct ipr_cmnd *loop_cmd;
8771 struct ipr_hrr_queue *hrrq;
8772 int rc = IPR_RC_JOB_CONTINUE;
8773 int count = 0;
8774
8775 ENTER;
8776 ipr_cmd->job_step = ipr_reset_quiesce_done;
8777
8778 for_each_hrrq(hrrq, ioa_cfg) {
8779 spin_lock(&hrrq->_lock);
8780 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8781 count++;
8782 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8783 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8784 rc = IPR_RC_JOB_RETURN;
8785 break;
8786 }
8787 spin_unlock(&hrrq->_lock);
8788
8789 if (count)
8790 break;
8791 }
8792
8793 LEAVE;
8794 return rc;
8795 }
8796
8797 /**
8798 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8799 * @ipr_cmd: ipr command struct
8800 *
8801 * Description: Cancel any oustanding HCAMs to the IOA.
8802 *
8803 * Return value:
8804 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8805 **/
ipr_reset_cancel_hcam(struct ipr_cmnd * ipr_cmd)8806 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8807 {
8808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8809 int rc = IPR_RC_JOB_CONTINUE;
8810 struct ipr_cmd_pkt *cmd_pkt;
8811 struct ipr_cmnd *hcam_cmd;
8812 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8813
8814 ENTER;
8815 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8816
8817 if (!hrrq->ioa_is_dead) {
8818 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8819 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8820 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8821 continue;
8822
8823 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8824 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8825 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8826 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8827 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8828 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8829 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8830 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8831 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8832 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8833 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8834 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8835 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8836 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8837
8838 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8839 IPR_CANCEL_TIMEOUT);
8840
8841 rc = IPR_RC_JOB_RETURN;
8842 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8843 break;
8844 }
8845 }
8846 } else
8847 ipr_cmd->job_step = ipr_reset_alert;
8848
8849 LEAVE;
8850 return rc;
8851 }
8852
8853 /**
8854 * ipr_reset_ucode_download_done - Microcode download completion
8855 * @ipr_cmd: ipr command struct
8856 *
8857 * Description: This function unmaps the microcode download buffer.
8858 *
8859 * Return value:
8860 * IPR_RC_JOB_CONTINUE
8861 **/
ipr_reset_ucode_download_done(struct ipr_cmnd * ipr_cmd)8862 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8863 {
8864 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8865 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8866
8867 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8868 sglist->num_sg, DMA_TO_DEVICE);
8869
8870 ipr_cmd->job_step = ipr_reset_alert;
8871 return IPR_RC_JOB_CONTINUE;
8872 }
8873
8874 /**
8875 * ipr_reset_ucode_download - Download microcode to the adapter
8876 * @ipr_cmd: ipr command struct
8877 *
8878 * Description: This function checks to see if it there is microcode
8879 * to download to the adapter. If there is, a download is performed.
8880 *
8881 * Return value:
8882 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8883 **/
ipr_reset_ucode_download(struct ipr_cmnd * ipr_cmd)8884 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8885 {
8886 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8887 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8888
8889 ENTER;
8890 ipr_cmd->job_step = ipr_reset_alert;
8891
8892 if (!sglist)
8893 return IPR_RC_JOB_CONTINUE;
8894
8895 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8896 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8897 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8898 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8899 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8900 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8901 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8902
8903 if (ioa_cfg->sis64)
8904 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8905 else
8906 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8907 ipr_cmd->job_step = ipr_reset_ucode_download_done;
8908
8909 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8910 IPR_WRITE_BUFFER_TIMEOUT);
8911
8912 LEAVE;
8913 return IPR_RC_JOB_RETURN;
8914 }
8915
8916 /**
8917 * ipr_reset_shutdown_ioa - Shutdown the adapter
8918 * @ipr_cmd: ipr command struct
8919 *
8920 * Description: This function issues an adapter shutdown of the
8921 * specified type to the specified adapter as part of the
8922 * adapter reset job.
8923 *
8924 * Return value:
8925 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8926 **/
ipr_reset_shutdown_ioa(struct ipr_cmnd * ipr_cmd)8927 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8928 {
8929 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8930 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8931 unsigned long timeout;
8932 int rc = IPR_RC_JOB_CONTINUE;
8933
8934 ENTER;
8935 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8936 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8937 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8938 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8939 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8940 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8941 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8942 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8943
8944 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8945 timeout = IPR_SHUTDOWN_TIMEOUT;
8946 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8947 timeout = IPR_INTERNAL_TIMEOUT;
8948 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8949 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8950 else
8951 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8952
8953 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8954
8955 rc = IPR_RC_JOB_RETURN;
8956 ipr_cmd->job_step = ipr_reset_ucode_download;
8957 } else
8958 ipr_cmd->job_step = ipr_reset_alert;
8959
8960 LEAVE;
8961 return rc;
8962 }
8963
8964 /**
8965 * ipr_reset_ioa_job - Adapter reset job
8966 * @ipr_cmd: ipr command struct
8967 *
8968 * Description: This function is the job router for the adapter reset job.
8969 *
8970 * Return value:
8971 * none
8972 **/
ipr_reset_ioa_job(struct ipr_cmnd * ipr_cmd)8973 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8974 {
8975 u32 rc, ioasc;
8976 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8977
8978 do {
8979 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8980
8981 if (ioa_cfg->reset_cmd != ipr_cmd) {
8982 /*
8983 * We are doing nested adapter resets and this is
8984 * not the current reset job.
8985 */
8986 list_add_tail(&ipr_cmd->queue,
8987 &ipr_cmd->hrrq->hrrq_free_q);
8988 return;
8989 }
8990
8991 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8992 rc = ipr_cmd->job_step_failed(ipr_cmd);
8993 if (rc == IPR_RC_JOB_RETURN)
8994 return;
8995 }
8996
8997 ipr_reinit_ipr_cmnd(ipr_cmd);
8998 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8999 rc = ipr_cmd->job_step(ipr_cmd);
9000 } while (rc == IPR_RC_JOB_CONTINUE);
9001 }
9002
9003 /**
9004 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9005 * @ioa_cfg: ioa config struct
9006 * @job_step: first job step of reset job
9007 * @shutdown_type: shutdown type
9008 *
9009 * Description: This function will initiate the reset of the given adapter
9010 * starting at the selected job step.
9011 * If the caller needs to wait on the completion of the reset,
9012 * the caller must sleep on the reset_wait_q.
9013 *
9014 * Return value:
9015 * none
9016 **/
_ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,int (* job_step)(struct ipr_cmnd *),enum ipr_shutdown_type shutdown_type)9017 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9018 int (*job_step) (struct ipr_cmnd *),
9019 enum ipr_shutdown_type shutdown_type)
9020 {
9021 struct ipr_cmnd *ipr_cmd;
9022 int i;
9023
9024 ioa_cfg->in_reset_reload = 1;
9025 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9026 spin_lock(&ioa_cfg->hrrq[i]._lock);
9027 ioa_cfg->hrrq[i].allow_cmds = 0;
9028 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9029 }
9030 wmb();
9031 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
9032 scsi_block_requests(ioa_cfg->host);
9033
9034 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9035 ioa_cfg->reset_cmd = ipr_cmd;
9036 ipr_cmd->job_step = job_step;
9037 ipr_cmd->u.shutdown_type = shutdown_type;
9038
9039 ipr_reset_ioa_job(ipr_cmd);
9040 }
9041
9042 /**
9043 * ipr_initiate_ioa_reset - Initiate an adapter reset
9044 * @ioa_cfg: ioa config struct
9045 * @shutdown_type: shutdown type
9046 *
9047 * Description: This function will initiate the reset of the given adapter.
9048 * If the caller needs to wait on the completion of the reset,
9049 * the caller must sleep on the reset_wait_q.
9050 *
9051 * Return value:
9052 * none
9053 **/
ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)9054 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9055 enum ipr_shutdown_type shutdown_type)
9056 {
9057 int i;
9058
9059 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
9060 return;
9061
9062 if (ioa_cfg->in_reset_reload) {
9063 if (ioa_cfg->sdt_state == GET_DUMP)
9064 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9065 else if (ioa_cfg->sdt_state == READ_DUMP)
9066 ioa_cfg->sdt_state = ABORT_DUMP;
9067 }
9068
9069 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9070 dev_err(&ioa_cfg->pdev->dev,
9071 "IOA taken offline - error recovery failed\n");
9072
9073 ioa_cfg->reset_retries = 0;
9074 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9075 spin_lock(&ioa_cfg->hrrq[i]._lock);
9076 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9077 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9078 }
9079 wmb();
9080
9081 if (ioa_cfg->in_ioa_bringdown) {
9082 ioa_cfg->reset_cmd = NULL;
9083 ioa_cfg->in_reset_reload = 0;
9084 ipr_fail_all_ops(ioa_cfg);
9085 wake_up_all(&ioa_cfg->reset_wait_q);
9086
9087 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9088 spin_unlock_irq(ioa_cfg->host->host_lock);
9089 scsi_unblock_requests(ioa_cfg->host);
9090 spin_lock_irq(ioa_cfg->host->host_lock);
9091 }
9092 return;
9093 } else {
9094 ioa_cfg->in_ioa_bringdown = 1;
9095 shutdown_type = IPR_SHUTDOWN_NONE;
9096 }
9097 }
9098
9099 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9100 shutdown_type);
9101 }
9102
9103 /**
9104 * ipr_reset_freeze - Hold off all I/O activity
9105 * @ipr_cmd: ipr command struct
9106 *
9107 * Description: If the PCI slot is frozen, hold off all I/O
9108 * activity; then, as soon as the slot is available again,
9109 * initiate an adapter reset.
9110 */
ipr_reset_freeze(struct ipr_cmnd * ipr_cmd)9111 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9112 {
9113 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9114 int i;
9115
9116 /* Disallow new interrupts, avoid loop */
9117 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9118 spin_lock(&ioa_cfg->hrrq[i]._lock);
9119 ioa_cfg->hrrq[i].allow_interrupts = 0;
9120 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9121 }
9122 wmb();
9123 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
9124 ipr_cmd->done = ipr_reset_ioa_job;
9125 return IPR_RC_JOB_RETURN;
9126 }
9127
9128 /**
9129 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9130 * @pdev: PCI device struct
9131 *
9132 * Description: This routine is called to tell us that the MMIO
9133 * access to the IOA has been restored
9134 */
ipr_pci_mmio_enabled(struct pci_dev * pdev)9135 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9136 {
9137 unsigned long flags = 0;
9138 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9139
9140 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9141 if (!ioa_cfg->probe_done)
9142 pci_save_state(pdev);
9143 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9144 return PCI_ERS_RESULT_NEED_RESET;
9145 }
9146
9147 /**
9148 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9149 * @pdev: PCI device struct
9150 *
9151 * Description: This routine is called to tell us that the PCI bus
9152 * is down. Can't do anything here, except put the device driver
9153 * into a holding pattern, waiting for the PCI bus to come back.
9154 */
ipr_pci_frozen(struct pci_dev * pdev)9155 static void ipr_pci_frozen(struct pci_dev *pdev)
9156 {
9157 unsigned long flags = 0;
9158 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9159
9160 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9161 if (ioa_cfg->probe_done)
9162 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9163 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9164 }
9165
9166 /**
9167 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9168 * @pdev: PCI device struct
9169 *
9170 * Description: This routine is called by the pci error recovery
9171 * code after the PCI slot has been reset, just before we
9172 * should resume normal operations.
9173 */
ipr_pci_slot_reset(struct pci_dev * pdev)9174 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9175 {
9176 unsigned long flags = 0;
9177 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9178
9179 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9180 if (ioa_cfg->probe_done) {
9181 if (ioa_cfg->needs_warm_reset)
9182 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9183 else
9184 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9185 IPR_SHUTDOWN_NONE);
9186 } else
9187 wake_up_all(&ioa_cfg->eeh_wait_q);
9188 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9189 return PCI_ERS_RESULT_RECOVERED;
9190 }
9191
9192 /**
9193 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9194 * @pdev: PCI device struct
9195 *
9196 * Description: This routine is called when the PCI bus has
9197 * permanently failed.
9198 */
ipr_pci_perm_failure(struct pci_dev * pdev)9199 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9200 {
9201 unsigned long flags = 0;
9202 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9203 int i;
9204
9205 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9206 if (ioa_cfg->probe_done) {
9207 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9208 ioa_cfg->sdt_state = ABORT_DUMP;
9209 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9210 ioa_cfg->in_ioa_bringdown = 1;
9211 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9212 spin_lock(&ioa_cfg->hrrq[i]._lock);
9213 ioa_cfg->hrrq[i].allow_cmds = 0;
9214 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9215 }
9216 wmb();
9217 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9218 } else
9219 wake_up_all(&ioa_cfg->eeh_wait_q);
9220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9221 }
9222
9223 /**
9224 * ipr_pci_error_detected - Called when a PCI error is detected.
9225 * @pdev: PCI device struct
9226 * @state: PCI channel state
9227 *
9228 * Description: Called when a PCI error is detected.
9229 *
9230 * Return value:
9231 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9232 */
ipr_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)9233 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9234 pci_channel_state_t state)
9235 {
9236 switch (state) {
9237 case pci_channel_io_frozen:
9238 ipr_pci_frozen(pdev);
9239 return PCI_ERS_RESULT_CAN_RECOVER;
9240 case pci_channel_io_perm_failure:
9241 ipr_pci_perm_failure(pdev);
9242 return PCI_ERS_RESULT_DISCONNECT;
9243 break;
9244 default:
9245 break;
9246 }
9247 return PCI_ERS_RESULT_NEED_RESET;
9248 }
9249
9250 /**
9251 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9252 * @ioa_cfg: ioa cfg struct
9253 *
9254 * Description: This is the second phase of adapter intialization
9255 * This function takes care of initilizing the adapter to the point
9256 * where it can accept new commands.
9257
9258 * Return value:
9259 * 0 on success / -EIO on failure
9260 **/
ipr_probe_ioa_part2(struct ipr_ioa_cfg * ioa_cfg)9261 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9262 {
9263 int rc = 0;
9264 unsigned long host_lock_flags = 0;
9265
9266 ENTER;
9267 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9268 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9269 ioa_cfg->probe_done = 1;
9270 if (ioa_cfg->needs_hard_reset) {
9271 ioa_cfg->needs_hard_reset = 0;
9272 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9273 } else
9274 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9275 IPR_SHUTDOWN_NONE);
9276 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9277
9278 LEAVE;
9279 return rc;
9280 }
9281
9282 /**
9283 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9284 * @ioa_cfg: ioa config struct
9285 *
9286 * Return value:
9287 * none
9288 **/
ipr_free_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)9289 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9290 {
9291 int i;
9292
9293 if (ioa_cfg->ipr_cmnd_list) {
9294 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9295 if (ioa_cfg->ipr_cmnd_list[i])
9296 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9297 ioa_cfg->ipr_cmnd_list[i],
9298 ioa_cfg->ipr_cmnd_list_dma[i]);
9299
9300 ioa_cfg->ipr_cmnd_list[i] = NULL;
9301 }
9302 }
9303
9304 if (ioa_cfg->ipr_cmd_pool)
9305 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9306
9307 kfree(ioa_cfg->ipr_cmnd_list);
9308 kfree(ioa_cfg->ipr_cmnd_list_dma);
9309 ioa_cfg->ipr_cmnd_list = NULL;
9310 ioa_cfg->ipr_cmnd_list_dma = NULL;
9311 ioa_cfg->ipr_cmd_pool = NULL;
9312 }
9313
9314 /**
9315 * ipr_free_mem - Frees memory allocated for an adapter
9316 * @ioa_cfg: ioa cfg struct
9317 *
9318 * Return value:
9319 * nothing
9320 **/
ipr_free_mem(struct ipr_ioa_cfg * ioa_cfg)9321 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9322 {
9323 int i;
9324
9325 kfree(ioa_cfg->res_entries);
9326 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9327 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9328 ipr_free_cmd_blks(ioa_cfg);
9329
9330 for (i = 0; i < ioa_cfg->hrrq_num; i++)
9331 dma_free_coherent(&ioa_cfg->pdev->dev,
9332 sizeof(u32) * ioa_cfg->hrrq[i].size,
9333 ioa_cfg->hrrq[i].host_rrq,
9334 ioa_cfg->hrrq[i].host_rrq_dma);
9335
9336 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9337 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9338
9339 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9340 dma_free_coherent(&ioa_cfg->pdev->dev,
9341 sizeof(struct ipr_hostrcb),
9342 ioa_cfg->hostrcb[i],
9343 ioa_cfg->hostrcb_dma[i]);
9344 }
9345
9346 ipr_free_dump(ioa_cfg);
9347 kfree(ioa_cfg->trace);
9348 }
9349
9350 /**
9351 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9352 * @ioa_cfg: ipr cfg struct
9353 *
9354 * This function frees all allocated IRQs for the
9355 * specified adapter.
9356 *
9357 * Return value:
9358 * none
9359 **/
ipr_free_irqs(struct ipr_ioa_cfg * ioa_cfg)9360 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9361 {
9362 struct pci_dev *pdev = ioa_cfg->pdev;
9363
9364 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9365 ioa_cfg->intr_flag == IPR_USE_MSIX) {
9366 int i;
9367 for (i = 0; i < ioa_cfg->nvectors; i++)
9368 free_irq(ioa_cfg->vectors_info[i].vec,
9369 &ioa_cfg->hrrq[i]);
9370 } else
9371 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9372
9373 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9374 pci_disable_msi(pdev);
9375 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9376 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9377 pci_disable_msix(pdev);
9378 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9379 }
9380 }
9381
9382 /**
9383 * ipr_free_all_resources - Free all allocated resources for an adapter.
9384 * @ipr_cmd: ipr command struct
9385 *
9386 * This function frees all allocated resources for the
9387 * specified adapter.
9388 *
9389 * Return value:
9390 * none
9391 **/
ipr_free_all_resources(struct ipr_ioa_cfg * ioa_cfg)9392 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9393 {
9394 struct pci_dev *pdev = ioa_cfg->pdev;
9395
9396 ENTER;
9397 ipr_free_irqs(ioa_cfg);
9398 if (ioa_cfg->reset_work_q)
9399 destroy_workqueue(ioa_cfg->reset_work_q);
9400 iounmap(ioa_cfg->hdw_dma_regs);
9401 pci_release_regions(pdev);
9402 ipr_free_mem(ioa_cfg);
9403 scsi_host_put(ioa_cfg->host);
9404 pci_disable_device(pdev);
9405 LEAVE;
9406 }
9407
9408 /**
9409 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9410 * @ioa_cfg: ioa config struct
9411 *
9412 * Return value:
9413 * 0 on success / -ENOMEM on allocation failure
9414 **/
ipr_alloc_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)9415 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9416 {
9417 struct ipr_cmnd *ipr_cmd;
9418 struct ipr_ioarcb *ioarcb;
9419 dma_addr_t dma_addr;
9420 int i, entries_each_hrrq, hrrq_id = 0;
9421
9422 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9423 sizeof(struct ipr_cmnd), 512, 0);
9424
9425 if (!ioa_cfg->ipr_cmd_pool)
9426 return -ENOMEM;
9427
9428 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9429 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9430
9431 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9432 ipr_free_cmd_blks(ioa_cfg);
9433 return -ENOMEM;
9434 }
9435
9436 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9437 if (ioa_cfg->hrrq_num > 1) {
9438 if (i == 0) {
9439 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9440 ioa_cfg->hrrq[i].min_cmd_id = 0;
9441 ioa_cfg->hrrq[i].max_cmd_id =
9442 (entries_each_hrrq - 1);
9443 } else {
9444 entries_each_hrrq =
9445 IPR_NUM_BASE_CMD_BLKS/
9446 (ioa_cfg->hrrq_num - 1);
9447 ioa_cfg->hrrq[i].min_cmd_id =
9448 IPR_NUM_INTERNAL_CMD_BLKS +
9449 (i - 1) * entries_each_hrrq;
9450 ioa_cfg->hrrq[i].max_cmd_id =
9451 (IPR_NUM_INTERNAL_CMD_BLKS +
9452 i * entries_each_hrrq - 1);
9453 }
9454 } else {
9455 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9456 ioa_cfg->hrrq[i].min_cmd_id = 0;
9457 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9458 }
9459 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9460 }
9461
9462 BUG_ON(ioa_cfg->hrrq_num == 0);
9463
9464 i = IPR_NUM_CMD_BLKS -
9465 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9466 if (i > 0) {
9467 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9468 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9469 }
9470
9471 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9472 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9473
9474 if (!ipr_cmd) {
9475 ipr_free_cmd_blks(ioa_cfg);
9476 return -ENOMEM;
9477 }
9478
9479 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9480 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9481 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9482
9483 ioarcb = &ipr_cmd->ioarcb;
9484 ipr_cmd->dma_addr = dma_addr;
9485 if (ioa_cfg->sis64)
9486 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9487 else
9488 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9489
9490 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9491 if (ioa_cfg->sis64) {
9492 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9493 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9494 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9495 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9496 } else {
9497 ioarcb->write_ioadl_addr =
9498 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9499 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9500 ioarcb->ioasa_host_pci_addr =
9501 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9502 }
9503 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9504 ipr_cmd->cmd_index = i;
9505 ipr_cmd->ioa_cfg = ioa_cfg;
9506 ipr_cmd->sense_buffer_dma = dma_addr +
9507 offsetof(struct ipr_cmnd, sense_buffer);
9508
9509 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9510 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9511 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9512 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9513 hrrq_id++;
9514 }
9515
9516 return 0;
9517 }
9518
9519 /**
9520 * ipr_alloc_mem - Allocate memory for an adapter
9521 * @ioa_cfg: ioa config struct
9522 *
9523 * Return value:
9524 * 0 on success / non-zero for error
9525 **/
ipr_alloc_mem(struct ipr_ioa_cfg * ioa_cfg)9526 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9527 {
9528 struct pci_dev *pdev = ioa_cfg->pdev;
9529 int i, rc = -ENOMEM;
9530
9531 ENTER;
9532 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9533 ioa_cfg->max_devs_supported, GFP_KERNEL);
9534
9535 if (!ioa_cfg->res_entries)
9536 goto out;
9537
9538 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9539 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9540 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9541 }
9542
9543 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9544 sizeof(struct ipr_misc_cbs),
9545 &ioa_cfg->vpd_cbs_dma,
9546 GFP_KERNEL);
9547
9548 if (!ioa_cfg->vpd_cbs)
9549 goto out_free_res_entries;
9550
9551 if (ipr_alloc_cmd_blks(ioa_cfg))
9552 goto out_free_vpd_cbs;
9553
9554 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9555 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9556 sizeof(u32) * ioa_cfg->hrrq[i].size,
9557 &ioa_cfg->hrrq[i].host_rrq_dma,
9558 GFP_KERNEL);
9559
9560 if (!ioa_cfg->hrrq[i].host_rrq) {
9561 while (--i > 0)
9562 dma_free_coherent(&pdev->dev,
9563 sizeof(u32) * ioa_cfg->hrrq[i].size,
9564 ioa_cfg->hrrq[i].host_rrq,
9565 ioa_cfg->hrrq[i].host_rrq_dma);
9566 goto out_ipr_free_cmd_blocks;
9567 }
9568 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9569 }
9570
9571 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9572 ioa_cfg->cfg_table_size,
9573 &ioa_cfg->cfg_table_dma,
9574 GFP_KERNEL);
9575
9576 if (!ioa_cfg->u.cfg_table)
9577 goto out_free_host_rrq;
9578
9579 for (i = 0; i < IPR_NUM_HCAMS; i++) {
9580 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9581 sizeof(struct ipr_hostrcb),
9582 &ioa_cfg->hostrcb_dma[i],
9583 GFP_KERNEL);
9584
9585 if (!ioa_cfg->hostrcb[i])
9586 goto out_free_hostrcb_dma;
9587
9588 ioa_cfg->hostrcb[i]->hostrcb_dma =
9589 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9590 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9591 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9592 }
9593
9594 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9595 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9596
9597 if (!ioa_cfg->trace)
9598 goto out_free_hostrcb_dma;
9599
9600 rc = 0;
9601 out:
9602 LEAVE;
9603 return rc;
9604
9605 out_free_hostrcb_dma:
9606 while (i-- > 0) {
9607 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9608 ioa_cfg->hostrcb[i],
9609 ioa_cfg->hostrcb_dma[i]);
9610 }
9611 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9612 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9613 out_free_host_rrq:
9614 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9615 dma_free_coherent(&pdev->dev,
9616 sizeof(u32) * ioa_cfg->hrrq[i].size,
9617 ioa_cfg->hrrq[i].host_rrq,
9618 ioa_cfg->hrrq[i].host_rrq_dma);
9619 }
9620 out_ipr_free_cmd_blocks:
9621 ipr_free_cmd_blks(ioa_cfg);
9622 out_free_vpd_cbs:
9623 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9624 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9625 out_free_res_entries:
9626 kfree(ioa_cfg->res_entries);
9627 goto out;
9628 }
9629
9630 /**
9631 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9632 * @ioa_cfg: ioa config struct
9633 *
9634 * Return value:
9635 * none
9636 **/
ipr_initialize_bus_attr(struct ipr_ioa_cfg * ioa_cfg)9637 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9638 {
9639 int i;
9640
9641 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9642 ioa_cfg->bus_attr[i].bus = i;
9643 ioa_cfg->bus_attr[i].qas_enabled = 0;
9644 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9645 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9646 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9647 else
9648 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9649 }
9650 }
9651
9652 /**
9653 * ipr_init_regs - Initialize IOA registers
9654 * @ioa_cfg: ioa config struct
9655 *
9656 * Return value:
9657 * none
9658 **/
ipr_init_regs(struct ipr_ioa_cfg * ioa_cfg)9659 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9660 {
9661 const struct ipr_interrupt_offsets *p;
9662 struct ipr_interrupts *t;
9663 void __iomem *base;
9664
9665 p = &ioa_cfg->chip_cfg->regs;
9666 t = &ioa_cfg->regs;
9667 base = ioa_cfg->hdw_dma_regs;
9668
9669 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9670 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9671 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9672 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9673 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9674 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9675 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9676 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9677 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9678 t->ioarrin_reg = base + p->ioarrin_reg;
9679 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9680 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9681 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9682 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9683 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9684 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9685
9686 if (ioa_cfg->sis64) {
9687 t->init_feedback_reg = base + p->init_feedback_reg;
9688 t->dump_addr_reg = base + p->dump_addr_reg;
9689 t->dump_data_reg = base + p->dump_data_reg;
9690 t->endian_swap_reg = base + p->endian_swap_reg;
9691 }
9692 }
9693
9694 /**
9695 * ipr_init_ioa_cfg - Initialize IOA config struct
9696 * @ioa_cfg: ioa config struct
9697 * @host: scsi host struct
9698 * @pdev: PCI dev struct
9699 *
9700 * Return value:
9701 * none
9702 **/
ipr_init_ioa_cfg(struct ipr_ioa_cfg * ioa_cfg,struct Scsi_Host * host,struct pci_dev * pdev)9703 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9704 struct Scsi_Host *host, struct pci_dev *pdev)
9705 {
9706 int i;
9707
9708 ioa_cfg->host = host;
9709 ioa_cfg->pdev = pdev;
9710 ioa_cfg->log_level = ipr_log_level;
9711 ioa_cfg->doorbell = IPR_DOORBELL;
9712 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9713 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9714 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9715 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9716 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9717 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9718
9719 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9720 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9721 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9722 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9723 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9724 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9725 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9726 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9727 ioa_cfg->sdt_state = INACTIVE;
9728
9729 ipr_initialize_bus_attr(ioa_cfg);
9730 ioa_cfg->max_devs_supported = ipr_max_devs;
9731
9732 if (ioa_cfg->sis64) {
9733 host->max_channel = IPR_MAX_SIS64_BUSES;
9734 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9735 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9736 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9737 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9738 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9739 + ((sizeof(struct ipr_config_table_entry64)
9740 * ioa_cfg->max_devs_supported)));
9741 } else {
9742 host->max_channel = IPR_VSET_BUS;
9743 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9744 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9745 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9746 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9747 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9748 + ((sizeof(struct ipr_config_table_entry)
9749 * ioa_cfg->max_devs_supported)));
9750 }
9751
9752 host->unique_id = host->host_no;
9753 host->max_cmd_len = IPR_MAX_CDB_LEN;
9754 host->can_queue = ioa_cfg->max_cmds;
9755 pci_set_drvdata(pdev, ioa_cfg);
9756
9757 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9758 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9759 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9760 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9761 if (i == 0)
9762 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9763 else
9764 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9765 }
9766 }
9767
9768 /**
9769 * ipr_get_chip_info - Find adapter chip information
9770 * @dev_id: PCI device id struct
9771 *
9772 * Return value:
9773 * ptr to chip information on success / NULL on failure
9774 **/
9775 static const struct ipr_chip_t *
ipr_get_chip_info(const struct pci_device_id * dev_id)9776 ipr_get_chip_info(const struct pci_device_id *dev_id)
9777 {
9778 int i;
9779
9780 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9781 if (ipr_chip[i].vendor == dev_id->vendor &&
9782 ipr_chip[i].device == dev_id->device)
9783 return &ipr_chip[i];
9784 return NULL;
9785 }
9786
9787 /**
9788 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9789 * during probe time
9790 * @ioa_cfg: ioa config struct
9791 *
9792 * Return value:
9793 * None
9794 **/
ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg * ioa_cfg)9795 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9796 {
9797 struct pci_dev *pdev = ioa_cfg->pdev;
9798
9799 if (pci_channel_offline(pdev)) {
9800 wait_event_timeout(ioa_cfg->eeh_wait_q,
9801 !pci_channel_offline(pdev),
9802 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9803 pci_restore_state(pdev);
9804 }
9805 }
9806
ipr_enable_msix(struct ipr_ioa_cfg * ioa_cfg)9807 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9808 {
9809 struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9810 int i, vectors;
9811
9812 for (i = 0; i < ARRAY_SIZE(entries); ++i)
9813 entries[i].entry = i;
9814
9815 vectors = pci_enable_msix_range(ioa_cfg->pdev,
9816 entries, 1, ipr_number_of_msix);
9817 if (vectors < 0) {
9818 ipr_wait_for_pci_err_recovery(ioa_cfg);
9819 return vectors;
9820 }
9821
9822 for (i = 0; i < vectors; i++)
9823 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9824 ioa_cfg->nvectors = vectors;
9825
9826 return 0;
9827 }
9828
ipr_enable_msi(struct ipr_ioa_cfg * ioa_cfg)9829 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9830 {
9831 int i, vectors;
9832
9833 vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9834 if (vectors < 0) {
9835 ipr_wait_for_pci_err_recovery(ioa_cfg);
9836 return vectors;
9837 }
9838
9839 for (i = 0; i < vectors; i++)
9840 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9841 ioa_cfg->nvectors = vectors;
9842
9843 return 0;
9844 }
9845
name_msi_vectors(struct ipr_ioa_cfg * ioa_cfg)9846 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9847 {
9848 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9849
9850 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9851 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9852 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9853 ioa_cfg->vectors_info[vec_idx].
9854 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9855 }
9856 }
9857
ipr_request_other_msi_irqs(struct ipr_ioa_cfg * ioa_cfg)9858 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9859 {
9860 int i, rc;
9861
9862 for (i = 1; i < ioa_cfg->nvectors; i++) {
9863 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9864 ipr_isr_mhrrq,
9865 0,
9866 ioa_cfg->vectors_info[i].desc,
9867 &ioa_cfg->hrrq[i]);
9868 if (rc) {
9869 while (--i >= 0)
9870 free_irq(ioa_cfg->vectors_info[i].vec,
9871 &ioa_cfg->hrrq[i]);
9872 return rc;
9873 }
9874 }
9875 return 0;
9876 }
9877
9878 /**
9879 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9880 * @pdev: PCI device struct
9881 *
9882 * Description: Simply set the msi_received flag to 1 indicating that
9883 * Message Signaled Interrupts are supported.
9884 *
9885 * Return value:
9886 * 0 on success / non-zero on failure
9887 **/
ipr_test_intr(int irq,void * devp)9888 static irqreturn_t ipr_test_intr(int irq, void *devp)
9889 {
9890 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9891 unsigned long lock_flags = 0;
9892 irqreturn_t rc = IRQ_HANDLED;
9893
9894 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9895 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9896
9897 ioa_cfg->msi_received = 1;
9898 wake_up(&ioa_cfg->msi_wait_q);
9899
9900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9901 return rc;
9902 }
9903
9904 /**
9905 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9906 * @pdev: PCI device struct
9907 *
9908 * Description: The return value from pci_enable_msi_range() can not always be
9909 * trusted. This routine sets up and initiates a test interrupt to determine
9910 * if the interrupt is received via the ipr_test_intr() service routine.
9911 * If the tests fails, the driver will fall back to LSI.
9912 *
9913 * Return value:
9914 * 0 on success / non-zero on failure
9915 **/
ipr_test_msi(struct ipr_ioa_cfg * ioa_cfg,struct pci_dev * pdev)9916 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9917 {
9918 int rc;
9919 volatile u32 int_reg;
9920 unsigned long lock_flags = 0;
9921
9922 ENTER;
9923
9924 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9925 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9926 ioa_cfg->msi_received = 0;
9927 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9928 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9929 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9930 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9931
9932 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9933 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9934 else
9935 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9936 if (rc) {
9937 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9938 return rc;
9939 } else if (ipr_debug)
9940 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9941
9942 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9943 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9944 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9945 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9946 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9947
9948 if (!ioa_cfg->msi_received) {
9949 /* MSI test failed */
9950 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
9951 rc = -EOPNOTSUPP;
9952 } else if (ipr_debug)
9953 dev_info(&pdev->dev, "MSI test succeeded.\n");
9954
9955 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9956
9957 if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9958 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9959 else
9960 free_irq(pdev->irq, ioa_cfg);
9961
9962 LEAVE;
9963
9964 return rc;
9965 }
9966
9967 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9968 * @pdev: PCI device struct
9969 * @dev_id: PCI device id struct
9970 *
9971 * Return value:
9972 * 0 on success / non-zero on failure
9973 **/
ipr_probe_ioa(struct pci_dev * pdev,const struct pci_device_id * dev_id)9974 static int ipr_probe_ioa(struct pci_dev *pdev,
9975 const struct pci_device_id *dev_id)
9976 {
9977 struct ipr_ioa_cfg *ioa_cfg;
9978 struct Scsi_Host *host;
9979 unsigned long ipr_regs_pci;
9980 void __iomem *ipr_regs;
9981 int rc = PCIBIOS_SUCCESSFUL;
9982 volatile u32 mask, uproc, interrupts;
9983 unsigned long lock_flags, driver_lock_flags;
9984
9985 ENTER;
9986
9987 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9988 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9989
9990 if (!host) {
9991 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9992 rc = -ENOMEM;
9993 goto out;
9994 }
9995
9996 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9997 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9998 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9999
10000 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
10001
10002 if (!ioa_cfg->ipr_chip) {
10003 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10004 dev_id->vendor, dev_id->device);
10005 goto out_scsi_host_put;
10006 }
10007
10008 /* set SIS 32 or SIS 64 */
10009 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
10010 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
10011 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
10012 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
10013
10014 if (ipr_transop_timeout)
10015 ioa_cfg->transop_timeout = ipr_transop_timeout;
10016 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10017 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10018 else
10019 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10020
10021 ioa_cfg->revid = pdev->revision;
10022
10023 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10024
10025 ipr_regs_pci = pci_resource_start(pdev, 0);
10026
10027 rc = pci_request_regions(pdev, IPR_NAME);
10028 if (rc < 0) {
10029 dev_err(&pdev->dev,
10030 "Couldn't register memory range of registers\n");
10031 goto out_scsi_host_put;
10032 }
10033
10034 rc = pci_enable_device(pdev);
10035
10036 if (rc || pci_channel_offline(pdev)) {
10037 if (pci_channel_offline(pdev)) {
10038 ipr_wait_for_pci_err_recovery(ioa_cfg);
10039 rc = pci_enable_device(pdev);
10040 }
10041
10042 if (rc) {
10043 dev_err(&pdev->dev, "Cannot enable adapter\n");
10044 ipr_wait_for_pci_err_recovery(ioa_cfg);
10045 goto out_release_regions;
10046 }
10047 }
10048
10049 ipr_regs = pci_ioremap_bar(pdev, 0);
10050
10051 if (!ipr_regs) {
10052 dev_err(&pdev->dev,
10053 "Couldn't map memory range of registers\n");
10054 rc = -ENOMEM;
10055 goto out_disable;
10056 }
10057
10058 ioa_cfg->hdw_dma_regs = ipr_regs;
10059 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10060 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10061
10062 ipr_init_regs(ioa_cfg);
10063
10064 if (ioa_cfg->sis64) {
10065 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10066 if (rc < 0) {
10067 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10068 rc = dma_set_mask_and_coherent(&pdev->dev,
10069 DMA_BIT_MASK(32));
10070 }
10071 } else
10072 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10073
10074 if (rc < 0) {
10075 dev_err(&pdev->dev, "Failed to set DMA mask\n");
10076 goto cleanup_nomem;
10077 }
10078
10079 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10080 ioa_cfg->chip_cfg->cache_line_size);
10081
10082 if (rc != PCIBIOS_SUCCESSFUL) {
10083 dev_err(&pdev->dev, "Write of cache line size failed\n");
10084 ipr_wait_for_pci_err_recovery(ioa_cfg);
10085 rc = -EIO;
10086 goto cleanup_nomem;
10087 }
10088
10089 /* Issue MMIO read to ensure card is not in EEH */
10090 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10091 ipr_wait_for_pci_err_recovery(ioa_cfg);
10092
10093 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10094 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10095 IPR_MAX_MSIX_VECTORS);
10096 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10097 }
10098
10099 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10100 ipr_enable_msix(ioa_cfg) == 0)
10101 ioa_cfg->intr_flag = IPR_USE_MSIX;
10102 else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
10103 ipr_enable_msi(ioa_cfg) == 0)
10104 ioa_cfg->intr_flag = IPR_USE_MSI;
10105 else {
10106 ioa_cfg->intr_flag = IPR_USE_LSI;
10107 ioa_cfg->clear_isr = 1;
10108 ioa_cfg->nvectors = 1;
10109 dev_info(&pdev->dev, "Cannot enable MSI.\n");
10110 }
10111
10112 pci_set_master(pdev);
10113
10114 if (pci_channel_offline(pdev)) {
10115 ipr_wait_for_pci_err_recovery(ioa_cfg);
10116 pci_set_master(pdev);
10117 if (pci_channel_offline(pdev)) {
10118 rc = -EIO;
10119 goto out_msi_disable;
10120 }
10121 }
10122
10123 if (ioa_cfg->intr_flag == IPR_USE_MSI ||
10124 ioa_cfg->intr_flag == IPR_USE_MSIX) {
10125 rc = ipr_test_msi(ioa_cfg, pdev);
10126 if (rc == -EOPNOTSUPP) {
10127 ipr_wait_for_pci_err_recovery(ioa_cfg);
10128 if (ioa_cfg->intr_flag == IPR_USE_MSI) {
10129 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
10130 pci_disable_msi(pdev);
10131 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
10132 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
10133 pci_disable_msix(pdev);
10134 }
10135
10136 ioa_cfg->intr_flag = IPR_USE_LSI;
10137 ioa_cfg->nvectors = 1;
10138 }
10139 else if (rc)
10140 goto out_msi_disable;
10141 else {
10142 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10143 dev_info(&pdev->dev,
10144 "Request for %d MSIs succeeded with starting IRQ: %d\n",
10145 ioa_cfg->nvectors, pdev->irq);
10146 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10147 dev_info(&pdev->dev,
10148 "Request for %d MSIXs succeeded.",
10149 ioa_cfg->nvectors);
10150 }
10151 }
10152
10153 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10154 (unsigned int)num_online_cpus(),
10155 (unsigned int)IPR_MAX_HRRQ_NUM);
10156
10157 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10158 goto out_msi_disable;
10159
10160 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10161 goto out_msi_disable;
10162
10163 rc = ipr_alloc_mem(ioa_cfg);
10164 if (rc < 0) {
10165 dev_err(&pdev->dev,
10166 "Couldn't allocate enough memory for device driver!\n");
10167 goto out_msi_disable;
10168 }
10169
10170 /* Save away PCI config space for use following IOA reset */
10171 rc = pci_save_state(pdev);
10172
10173 if (rc != PCIBIOS_SUCCESSFUL) {
10174 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10175 rc = -EIO;
10176 goto cleanup_nolog;
10177 }
10178
10179 /*
10180 * If HRRQ updated interrupt is not masked, or reset alert is set,
10181 * the card is in an unknown state and needs a hard reset
10182 */
10183 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10184 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10185 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10186 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10187 ioa_cfg->needs_hard_reset = 1;
10188 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10189 ioa_cfg->needs_hard_reset = 1;
10190 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10191 ioa_cfg->ioa_unit_checked = 1;
10192
10193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10194 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10195 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10196
10197 if (ioa_cfg->intr_flag == IPR_USE_MSI
10198 || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10199 name_msi_vectors(ioa_cfg);
10200 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10201 0,
10202 ioa_cfg->vectors_info[0].desc,
10203 &ioa_cfg->hrrq[0]);
10204 if (!rc)
10205 rc = ipr_request_other_msi_irqs(ioa_cfg);
10206 } else {
10207 rc = request_irq(pdev->irq, ipr_isr,
10208 IRQF_SHARED,
10209 IPR_NAME, &ioa_cfg->hrrq[0]);
10210 }
10211 if (rc) {
10212 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10213 pdev->irq, rc);
10214 goto cleanup_nolog;
10215 }
10216
10217 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10218 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10219 ioa_cfg->needs_warm_reset = 1;
10220 ioa_cfg->reset = ipr_reset_slot_reset;
10221
10222 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10223 WQ_MEM_RECLAIM, host->host_no);
10224
10225 if (!ioa_cfg->reset_work_q) {
10226 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10227 goto out_free_irq;
10228 }
10229 } else
10230 ioa_cfg->reset = ipr_reset_start_bist;
10231
10232 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10233 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10234 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10235
10236 LEAVE;
10237 out:
10238 return rc;
10239
10240 out_free_irq:
10241 ipr_free_irqs(ioa_cfg);
10242 cleanup_nolog:
10243 ipr_free_mem(ioa_cfg);
10244 out_msi_disable:
10245 ipr_wait_for_pci_err_recovery(ioa_cfg);
10246 if (ioa_cfg->intr_flag == IPR_USE_MSI)
10247 pci_disable_msi(pdev);
10248 else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10249 pci_disable_msix(pdev);
10250 cleanup_nomem:
10251 iounmap(ipr_regs);
10252 out_disable:
10253 pci_disable_device(pdev);
10254 out_release_regions:
10255 pci_release_regions(pdev);
10256 out_scsi_host_put:
10257 scsi_host_put(host);
10258 goto out;
10259 }
10260
10261 /**
10262 * ipr_initiate_ioa_bringdown - Bring down an adapter
10263 * @ioa_cfg: ioa config struct
10264 * @shutdown_type: shutdown type
10265 *
10266 * Description: This function will initiate bringing down the adapter.
10267 * This consists of issuing an IOA shutdown to the adapter
10268 * to flush the cache, and running BIST.
10269 * If the caller needs to wait on the completion of the reset,
10270 * the caller must sleep on the reset_wait_q.
10271 *
10272 * Return value:
10273 * none
10274 **/
ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)10275 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10276 enum ipr_shutdown_type shutdown_type)
10277 {
10278 ENTER;
10279 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10280 ioa_cfg->sdt_state = ABORT_DUMP;
10281 ioa_cfg->reset_retries = 0;
10282 ioa_cfg->in_ioa_bringdown = 1;
10283 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10284 LEAVE;
10285 }
10286
10287 /**
10288 * __ipr_remove - Remove a single adapter
10289 * @pdev: pci device struct
10290 *
10291 * Adapter hot plug remove entry point.
10292 *
10293 * Return value:
10294 * none
10295 **/
__ipr_remove(struct pci_dev * pdev)10296 static void __ipr_remove(struct pci_dev *pdev)
10297 {
10298 unsigned long host_lock_flags = 0;
10299 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10300 int i;
10301 unsigned long driver_lock_flags;
10302 ENTER;
10303
10304 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10305 while (ioa_cfg->in_reset_reload) {
10306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10307 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10308 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10309 }
10310
10311 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10312 spin_lock(&ioa_cfg->hrrq[i]._lock);
10313 ioa_cfg->hrrq[i].removing_ioa = 1;
10314 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10315 }
10316 wmb();
10317 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10318
10319 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10320 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10321 flush_work(&ioa_cfg->work_q);
10322 if (ioa_cfg->reset_work_q)
10323 flush_workqueue(ioa_cfg->reset_work_q);
10324 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10325 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10326
10327 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10328 list_del(&ioa_cfg->queue);
10329 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10330
10331 if (ioa_cfg->sdt_state == ABORT_DUMP)
10332 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10333 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10334
10335 ipr_free_all_resources(ioa_cfg);
10336
10337 LEAVE;
10338 }
10339
10340 /**
10341 * ipr_remove - IOA hot plug remove entry point
10342 * @pdev: pci device struct
10343 *
10344 * Adapter hot plug remove entry point.
10345 *
10346 * Return value:
10347 * none
10348 **/
ipr_remove(struct pci_dev * pdev)10349 static void ipr_remove(struct pci_dev *pdev)
10350 {
10351 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10352
10353 ENTER;
10354
10355 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10356 &ipr_trace_attr);
10357 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10358 &ipr_dump_attr);
10359 scsi_remove_host(ioa_cfg->host);
10360
10361 __ipr_remove(pdev);
10362
10363 LEAVE;
10364 }
10365
10366 /**
10367 * ipr_probe - Adapter hot plug add entry point
10368 *
10369 * Return value:
10370 * 0 on success / non-zero on failure
10371 **/
ipr_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)10372 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10373 {
10374 struct ipr_ioa_cfg *ioa_cfg;
10375 int rc, i;
10376
10377 rc = ipr_probe_ioa(pdev, dev_id);
10378
10379 if (rc)
10380 return rc;
10381
10382 ioa_cfg = pci_get_drvdata(pdev);
10383 rc = ipr_probe_ioa_part2(ioa_cfg);
10384
10385 if (rc) {
10386 __ipr_remove(pdev);
10387 return rc;
10388 }
10389
10390 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10391
10392 if (rc) {
10393 __ipr_remove(pdev);
10394 return rc;
10395 }
10396
10397 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10398 &ipr_trace_attr);
10399
10400 if (rc) {
10401 scsi_remove_host(ioa_cfg->host);
10402 __ipr_remove(pdev);
10403 return rc;
10404 }
10405
10406 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10407 &ipr_dump_attr);
10408
10409 if (rc) {
10410 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10411 &ipr_trace_attr);
10412 scsi_remove_host(ioa_cfg->host);
10413 __ipr_remove(pdev);
10414 return rc;
10415 }
10416
10417 scsi_scan_host(ioa_cfg->host);
10418 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10419
10420 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10421 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10422 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10423 ioa_cfg->iopoll_weight, ipr_iopoll);
10424 blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10425 }
10426 }
10427
10428 schedule_work(&ioa_cfg->work_q);
10429 return 0;
10430 }
10431
10432 /**
10433 * ipr_shutdown - Shutdown handler.
10434 * @pdev: pci device struct
10435 *
10436 * This function is invoked upon system shutdown/reboot. It will issue
10437 * an adapter shutdown to the adapter to flush the write cache.
10438 *
10439 * Return value:
10440 * none
10441 **/
ipr_shutdown(struct pci_dev * pdev)10442 static void ipr_shutdown(struct pci_dev *pdev)
10443 {
10444 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10445 unsigned long lock_flags = 0;
10446 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10447 int i;
10448
10449 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10450 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10451 ioa_cfg->iopoll_weight = 0;
10452 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10453 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10454 }
10455
10456 while (ioa_cfg->in_reset_reload) {
10457 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10458 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10459 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10460 }
10461
10462 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10463 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10464
10465 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10467 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10468 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10469 ipr_free_irqs(ioa_cfg);
10470 pci_disable_device(ioa_cfg->pdev);
10471 }
10472 }
10473
10474 static struct pci_device_id ipr_pci_table[] = {
10475 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10476 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10477 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10478 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10479 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10480 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10481 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10482 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10483 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10484 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10485 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10486 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10487 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10488 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10489 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10490 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10491 IPR_USE_LONG_TRANSOP_TIMEOUT },
10492 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10493 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10494 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10495 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10496 IPR_USE_LONG_TRANSOP_TIMEOUT },
10497 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10498 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10499 IPR_USE_LONG_TRANSOP_TIMEOUT },
10500 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10501 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10502 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10503 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10504 IPR_USE_LONG_TRANSOP_TIMEOUT},
10505 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10506 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10507 IPR_USE_LONG_TRANSOP_TIMEOUT },
10508 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10509 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10510 IPR_USE_LONG_TRANSOP_TIMEOUT },
10511 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10512 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10513 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10514 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10515 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10516 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10517 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10518 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10519 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10520 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10521 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10522 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10523 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10524 IPR_USE_LONG_TRANSOP_TIMEOUT },
10525 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10526 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10527 IPR_USE_LONG_TRANSOP_TIMEOUT },
10528 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10529 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10530 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10531 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10532 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10533 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10534 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10535 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10536 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10537 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10538 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10539 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10540 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10541 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10542 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10543 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10544 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10545 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10546 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10547 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10548 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10549 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10550 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10551 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10552 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10553 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10554 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10555 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10556 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10557 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10558 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10559 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10560 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10561 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10562 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10563 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10564 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10565 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10566 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10567 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10568 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10569 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10570 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10571 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10572 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10573 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10574 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10575 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10576 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10577 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10578 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10579 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10580 { }
10581 };
10582 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10583
10584 static const struct pci_error_handlers ipr_err_handler = {
10585 .error_detected = ipr_pci_error_detected,
10586 .mmio_enabled = ipr_pci_mmio_enabled,
10587 .slot_reset = ipr_pci_slot_reset,
10588 };
10589
10590 static struct pci_driver ipr_driver = {
10591 .name = IPR_NAME,
10592 .id_table = ipr_pci_table,
10593 .probe = ipr_probe,
10594 .remove = ipr_remove,
10595 .shutdown = ipr_shutdown,
10596 .err_handler = &ipr_err_handler,
10597 };
10598
10599 /**
10600 * ipr_halt_done - Shutdown prepare completion
10601 *
10602 * Return value:
10603 * none
10604 **/
ipr_halt_done(struct ipr_cmnd * ipr_cmd)10605 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10606 {
10607 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10608 }
10609
10610 /**
10611 * ipr_halt - Issue shutdown prepare to all adapters
10612 *
10613 * Return value:
10614 * NOTIFY_OK on success / NOTIFY_DONE on failure
10615 **/
ipr_halt(struct notifier_block * nb,ulong event,void * buf)10616 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10617 {
10618 struct ipr_cmnd *ipr_cmd;
10619 struct ipr_ioa_cfg *ioa_cfg;
10620 unsigned long flags = 0, driver_lock_flags;
10621
10622 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10623 return NOTIFY_DONE;
10624
10625 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10626
10627 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10628 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10629 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10630 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10631 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10632 continue;
10633 }
10634
10635 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10636 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10637 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10638 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10639 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10640
10641 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10642 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10643 }
10644 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10645
10646 return NOTIFY_OK;
10647 }
10648
10649 static struct notifier_block ipr_notifier = {
10650 ipr_halt, NULL, 0
10651 };
10652
10653 /**
10654 * ipr_init - Module entry point
10655 *
10656 * Return value:
10657 * 0 on success / negative value on failure
10658 **/
ipr_init(void)10659 static int __init ipr_init(void)
10660 {
10661 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10662 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10663
10664 register_reboot_notifier(&ipr_notifier);
10665 return pci_register_driver(&ipr_driver);
10666 }
10667
10668 /**
10669 * ipr_exit - Module unload
10670 *
10671 * Module unload entry point.
10672 *
10673 * Return value:
10674 * none
10675 **/
ipr_exit(void)10676 static void __exit ipr_exit(void)
10677 {
10678 unregister_reboot_notifier(&ipr_notifier);
10679 pci_unregister_driver(&ipr_driver);
10680 }
10681
10682 module_init(ipr_init);
10683 module_exit(ipr_exit);
10684