• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2014-2015 PMC-Sierra, Inc.
4  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
5  *
6  *    This program is free software; you can redistribute it and/or modify
7  *    it under the terms of the GNU General Public License as published by
8  *    the Free Software Foundation; version 2 of the License.
9  *
10  *    This program is distributed in the hope that it will be useful,
11  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
14  *
15  *    Questions/Comments/Bugfixes to storagedev@pmcs.com
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/fs.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_transport_sas.h>
45 #include <scsi/scsi_dbg.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
55 #include "hpsa_cmd.h"
56 #include "hpsa.h"
57 
58 /*
59  * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
60  * with an optional trailing '-' followed by a byte value (0-255).
61  */
62 #define HPSA_DRIVER_VERSION "3.4.14-0"
63 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
64 #define HPSA "hpsa"
65 
66 /* How long to wait for CISS doorbell communication */
67 #define CLEAR_EVENT_WAIT_INTERVAL 20	/* ms for each msleep() call */
68 #define MODE_CHANGE_WAIT_INTERVAL 10	/* ms for each msleep() call */
69 #define MAX_CLEAR_EVENT_WAIT 30000	/* times 20 ms = 600 s */
70 #define MAX_MODE_CHANGE_WAIT 2000	/* times 10 ms = 20 s */
71 #define MAX_IOCTL_CONFIG_WAIT 1000
72 
73 /*define how many times we will try a command because of bus resets */
74 #define MAX_CMD_RETRIES 3
75 
76 /* Embedded module documentation macros - see modules.h */
77 MODULE_AUTHOR("Hewlett-Packard Company");
78 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
79 	HPSA_DRIVER_VERSION);
80 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
81 MODULE_VERSION(HPSA_DRIVER_VERSION);
82 MODULE_LICENSE("GPL");
83 
84 static int hpsa_allow_any;
85 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
86 MODULE_PARM_DESC(hpsa_allow_any,
87 		"Allow hpsa driver to access unknown HP Smart Array hardware");
88 static int hpsa_simple_mode;
89 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
90 MODULE_PARM_DESC(hpsa_simple_mode,
91 	"Use 'simple mode' rather than 'performant mode'");
92 
93 /* define the PCI info for the cards we can control */
94 static const struct pci_device_id hpsa_pci_device_id[] = {
95 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
96 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
97 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
98 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
99 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
100 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
101 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
102 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
103 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
104 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
105 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
106 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
107 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
108 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
109 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
110 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
111 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
112 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
113 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
114 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
115 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
116 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
117 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
118 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
119 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
120 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
121 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
122 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
123 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
124 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
125 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
126 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
127 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
128 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
129 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
130 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
131 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
132 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
133 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
134 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
135 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
136 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
137 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
138 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
139 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
140 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
141 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
142 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
143 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
144 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
145 	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
146 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
147 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
148 	{0,}
149 };
150 
151 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
152 
153 /*  board_id = Subsystem Device ID & Vendor ID
154  *  product = Marketing Name for the board
155  *  access = Address of the struct of function pointers
156  */
157 static struct board_type products[] = {
158 	{0x3241103C, "Smart Array P212", &SA5_access},
159 	{0x3243103C, "Smart Array P410", &SA5_access},
160 	{0x3245103C, "Smart Array P410i", &SA5_access},
161 	{0x3247103C, "Smart Array P411", &SA5_access},
162 	{0x3249103C, "Smart Array P812", &SA5_access},
163 	{0x324A103C, "Smart Array P712m", &SA5_access},
164 	{0x324B103C, "Smart Array P711m", &SA5_access},
165 	{0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
166 	{0x3350103C, "Smart Array P222", &SA5_access},
167 	{0x3351103C, "Smart Array P420", &SA5_access},
168 	{0x3352103C, "Smart Array P421", &SA5_access},
169 	{0x3353103C, "Smart Array P822", &SA5_access},
170 	{0x3354103C, "Smart Array P420i", &SA5_access},
171 	{0x3355103C, "Smart Array P220i", &SA5_access},
172 	{0x3356103C, "Smart Array P721m", &SA5_access},
173 	{0x1921103C, "Smart Array P830i", &SA5_access},
174 	{0x1922103C, "Smart Array P430", &SA5_access},
175 	{0x1923103C, "Smart Array P431", &SA5_access},
176 	{0x1924103C, "Smart Array P830", &SA5_access},
177 	{0x1926103C, "Smart Array P731m", &SA5_access},
178 	{0x1928103C, "Smart Array P230i", &SA5_access},
179 	{0x1929103C, "Smart Array P530", &SA5_access},
180 	{0x21BD103C, "Smart Array P244br", &SA5_access},
181 	{0x21BE103C, "Smart Array P741m", &SA5_access},
182 	{0x21BF103C, "Smart HBA H240ar", &SA5_access},
183 	{0x21C0103C, "Smart Array P440ar", &SA5_access},
184 	{0x21C1103C, "Smart Array P840ar", &SA5_access},
185 	{0x21C2103C, "Smart Array P440", &SA5_access},
186 	{0x21C3103C, "Smart Array P441", &SA5_access},
187 	{0x21C4103C, "Smart Array", &SA5_access},
188 	{0x21C5103C, "Smart Array P841", &SA5_access},
189 	{0x21C6103C, "Smart HBA H244br", &SA5_access},
190 	{0x21C7103C, "Smart HBA H240", &SA5_access},
191 	{0x21C8103C, "Smart HBA H241", &SA5_access},
192 	{0x21C9103C, "Smart Array", &SA5_access},
193 	{0x21CA103C, "Smart Array P246br", &SA5_access},
194 	{0x21CB103C, "Smart Array P840", &SA5_access},
195 	{0x21CC103C, "Smart Array", &SA5_access},
196 	{0x21CD103C, "Smart Array", &SA5_access},
197 	{0x21CE103C, "Smart HBA", &SA5_access},
198 	{0x05809005, "SmartHBA-SA", &SA5_access},
199 	{0x05819005, "SmartHBA-SA 8i", &SA5_access},
200 	{0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
201 	{0x05839005, "SmartHBA-SA 8e", &SA5_access},
202 	{0x05849005, "SmartHBA-SA 16i", &SA5_access},
203 	{0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
204 	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
205 	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
206 	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
207 	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
208 	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
209 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
210 };
211 
212 static struct scsi_transport_template *hpsa_sas_transport_template;
213 static int hpsa_add_sas_host(struct ctlr_info *h);
214 static void hpsa_delete_sas_host(struct ctlr_info *h);
215 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
216 			struct hpsa_scsi_dev_t *device);
217 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
218 static struct hpsa_scsi_dev_t
219 	*hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
220 		struct sas_rphy *rphy);
221 
222 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
223 static const struct scsi_cmnd hpsa_cmd_busy;
224 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
225 static const struct scsi_cmnd hpsa_cmd_idle;
226 static int number_of_controllers;
227 
228 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
229 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
230 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
231 
232 #ifdef CONFIG_COMPAT
233 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
234 	void __user *arg);
235 #endif
236 
237 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
238 static struct CommandList *cmd_alloc(struct ctlr_info *h);
239 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
240 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
241 					    struct scsi_cmnd *scmd);
242 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
243 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
244 	int cmd_type);
245 static void hpsa_free_cmd_pool(struct ctlr_info *h);
246 #define VPD_PAGE (1 << 8)
247 #define HPSA_SIMPLE_ERROR_BITS 0x03
248 
249 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
250 static void hpsa_scan_start(struct Scsi_Host *);
251 static int hpsa_scan_finished(struct Scsi_Host *sh,
252 	unsigned long elapsed_time);
253 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
254 
255 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
256 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
257 static int hpsa_slave_alloc(struct scsi_device *sdev);
258 static int hpsa_slave_configure(struct scsi_device *sdev);
259 static void hpsa_slave_destroy(struct scsi_device *sdev);
260 
261 static void hpsa_update_scsi_devices(struct ctlr_info *h);
262 static int check_for_unit_attention(struct ctlr_info *h,
263 	struct CommandList *c);
264 static void check_ioctl_unit_attention(struct ctlr_info *h,
265 	struct CommandList *c);
266 /* performant mode helper functions */
267 static void calc_bucket_map(int *bucket, int num_buckets,
268 	int nsgs, int min_blocks, u32 *bucket_map);
269 static void hpsa_free_performant_mode(struct ctlr_info *h);
270 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
271 static inline u32 next_command(struct ctlr_info *h, u8 q);
272 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
273 			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
274 			       u64 *cfg_offset);
275 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
276 				    unsigned long *memory_bar);
277 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
278 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
279 				     int wait_for_ready);
280 static inline void finish_cmd(struct CommandList *c);
281 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
282 #define BOARD_NOT_READY 0
283 #define BOARD_READY 1
284 static void hpsa_drain_accel_commands(struct ctlr_info *h);
285 static void hpsa_flush_cache(struct ctlr_info *h);
286 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
287 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
288 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
289 static void hpsa_command_resubmit_worker(struct work_struct *work);
290 static u32 lockup_detected(struct ctlr_info *h);
291 static int detect_controller_lockup(struct ctlr_info *h);
292 static void hpsa_disable_rld_caching(struct ctlr_info *h);
293 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
294 	struct ReportExtendedLUNdata *buf, int bufsize);
295 static int hpsa_luns_changed(struct ctlr_info *h);
296 
sdev_to_hba(struct scsi_device * sdev)297 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
298 {
299 	unsigned long *priv = shost_priv(sdev->host);
300 	return (struct ctlr_info *) *priv;
301 }
302 
shost_to_hba(struct Scsi_Host * sh)303 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
304 {
305 	unsigned long *priv = shost_priv(sh);
306 	return (struct ctlr_info *) *priv;
307 }
308 
hpsa_is_cmd_idle(struct CommandList * c)309 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
310 {
311 	return c->scsi_cmd == SCSI_CMD_IDLE;
312 }
313 
hpsa_is_pending_event(struct CommandList * c)314 static inline bool hpsa_is_pending_event(struct CommandList *c)
315 {
316 	return c->abort_pending || c->reset_pending;
317 }
318 
319 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
decode_sense_data(const u8 * sense_data,int sense_data_len,u8 * sense_key,u8 * asc,u8 * ascq)320 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
321 			u8 *sense_key, u8 *asc, u8 *ascq)
322 {
323 	struct scsi_sense_hdr sshdr;
324 	bool rc;
325 
326 	*sense_key = -1;
327 	*asc = -1;
328 	*ascq = -1;
329 
330 	if (sense_data_len < 1)
331 		return;
332 
333 	rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
334 	if (rc) {
335 		*sense_key = sshdr.sense_key;
336 		*asc = sshdr.asc;
337 		*ascq = sshdr.ascq;
338 	}
339 }
340 
check_for_unit_attention(struct ctlr_info * h,struct CommandList * c)341 static int check_for_unit_attention(struct ctlr_info *h,
342 	struct CommandList *c)
343 {
344 	u8 sense_key, asc, ascq;
345 	int sense_len;
346 
347 	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
348 		sense_len = sizeof(c->err_info->SenseInfo);
349 	else
350 		sense_len = c->err_info->SenseLen;
351 
352 	decode_sense_data(c->err_info->SenseInfo, sense_len,
353 				&sense_key, &asc, &ascq);
354 	if (sense_key != UNIT_ATTENTION || asc == 0xff)
355 		return 0;
356 
357 	switch (asc) {
358 	case STATE_CHANGED:
359 		dev_warn(&h->pdev->dev,
360 			"%s: a state change detected, command retried\n",
361 			h->devname);
362 		break;
363 	case LUN_FAILED:
364 		dev_warn(&h->pdev->dev,
365 			"%s: LUN failure detected\n", h->devname);
366 		break;
367 	case REPORT_LUNS_CHANGED:
368 		dev_warn(&h->pdev->dev,
369 			"%s: report LUN data changed\n", h->devname);
370 	/*
371 	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
372 	 * target (array) devices.
373 	 */
374 		break;
375 	case POWER_OR_RESET:
376 		dev_warn(&h->pdev->dev,
377 			"%s: a power on or device reset detected\n",
378 			h->devname);
379 		break;
380 	case UNIT_ATTENTION_CLEARED:
381 		dev_warn(&h->pdev->dev,
382 			"%s: unit attention cleared by another initiator\n",
383 			h->devname);
384 		break;
385 	default:
386 		dev_warn(&h->pdev->dev,
387 			"%s: unknown unit attention detected\n",
388 			h->devname);
389 		break;
390 	}
391 	return 1;
392 }
393 
check_for_busy(struct ctlr_info * h,struct CommandList * c)394 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
395 {
396 	if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
397 		(c->err_info->ScsiStatus != SAM_STAT_BUSY &&
398 		 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
399 		return 0;
400 	dev_warn(&h->pdev->dev, HPSA "device busy");
401 	return 1;
402 }
403 
404 static u32 lockup_detected(struct ctlr_info *h);
host_show_lockup_detected(struct device * dev,struct device_attribute * attr,char * buf)405 static ssize_t host_show_lockup_detected(struct device *dev,
406 		struct device_attribute *attr, char *buf)
407 {
408 	int ld;
409 	struct ctlr_info *h;
410 	struct Scsi_Host *shost = class_to_shost(dev);
411 
412 	h = shost_to_hba(shost);
413 	ld = lockup_detected(h);
414 
415 	return sprintf(buf, "ld=%d\n", ld);
416 }
417 
host_store_hp_ssd_smart_path_status(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)418 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
419 					 struct device_attribute *attr,
420 					 const char *buf, size_t count)
421 {
422 	int status, len;
423 	struct ctlr_info *h;
424 	struct Scsi_Host *shost = class_to_shost(dev);
425 	char tmpbuf[10];
426 
427 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
428 		return -EACCES;
429 	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
430 	strncpy(tmpbuf, buf, len);
431 	tmpbuf[len] = '\0';
432 	if (sscanf(tmpbuf, "%d", &status) != 1)
433 		return -EINVAL;
434 	h = shost_to_hba(shost);
435 	h->acciopath_status = !!status;
436 	dev_warn(&h->pdev->dev,
437 		"hpsa: HP SSD Smart Path %s via sysfs update.\n",
438 		h->acciopath_status ? "enabled" : "disabled");
439 	return count;
440 }
441 
host_store_raid_offload_debug(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)442 static ssize_t host_store_raid_offload_debug(struct device *dev,
443 					 struct device_attribute *attr,
444 					 const char *buf, size_t count)
445 {
446 	int debug_level, len;
447 	struct ctlr_info *h;
448 	struct Scsi_Host *shost = class_to_shost(dev);
449 	char tmpbuf[10];
450 
451 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
452 		return -EACCES;
453 	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
454 	strncpy(tmpbuf, buf, len);
455 	tmpbuf[len] = '\0';
456 	if (sscanf(tmpbuf, "%d", &debug_level) != 1)
457 		return -EINVAL;
458 	if (debug_level < 0)
459 		debug_level = 0;
460 	h = shost_to_hba(shost);
461 	h->raid_offload_debug = debug_level;
462 	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
463 		h->raid_offload_debug);
464 	return count;
465 }
466 
host_store_rescan(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)467 static ssize_t host_store_rescan(struct device *dev,
468 				 struct device_attribute *attr,
469 				 const char *buf, size_t count)
470 {
471 	struct ctlr_info *h;
472 	struct Scsi_Host *shost = class_to_shost(dev);
473 	h = shost_to_hba(shost);
474 	hpsa_scan_start(h->scsi_host);
475 	return count;
476 }
477 
host_show_firmware_revision(struct device * dev,struct device_attribute * attr,char * buf)478 static ssize_t host_show_firmware_revision(struct device *dev,
479 	     struct device_attribute *attr, char *buf)
480 {
481 	struct ctlr_info *h;
482 	struct Scsi_Host *shost = class_to_shost(dev);
483 	unsigned char *fwrev;
484 
485 	h = shost_to_hba(shost);
486 	if (!h->hba_inquiry_data)
487 		return 0;
488 	fwrev = &h->hba_inquiry_data[32];
489 	return snprintf(buf, 20, "%c%c%c%c\n",
490 		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
491 }
492 
host_show_commands_outstanding(struct device * dev,struct device_attribute * attr,char * buf)493 static ssize_t host_show_commands_outstanding(struct device *dev,
494 	     struct device_attribute *attr, char *buf)
495 {
496 	struct Scsi_Host *shost = class_to_shost(dev);
497 	struct ctlr_info *h = shost_to_hba(shost);
498 
499 	return snprintf(buf, 20, "%d\n",
500 			atomic_read(&h->commands_outstanding));
501 }
502 
host_show_transport_mode(struct device * dev,struct device_attribute * attr,char * buf)503 static ssize_t host_show_transport_mode(struct device *dev,
504 	struct device_attribute *attr, char *buf)
505 {
506 	struct ctlr_info *h;
507 	struct Scsi_Host *shost = class_to_shost(dev);
508 
509 	h = shost_to_hba(shost);
510 	return snprintf(buf, 20, "%s\n",
511 		h->transMethod & CFGTBL_Trans_Performant ?
512 			"performant" : "simple");
513 }
514 
host_show_hp_ssd_smart_path_status(struct device * dev,struct device_attribute * attr,char * buf)515 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
516 	struct device_attribute *attr, char *buf)
517 {
518 	struct ctlr_info *h;
519 	struct Scsi_Host *shost = class_to_shost(dev);
520 
521 	h = shost_to_hba(shost);
522 	return snprintf(buf, 30, "HP SSD Smart Path %s\n",
523 		(h->acciopath_status == 1) ?  "enabled" : "disabled");
524 }
525 
526 /* List of controllers which cannot be hard reset on kexec with reset_devices */
527 static u32 unresettable_controller[] = {
528 	0x324a103C, /* Smart Array P712m */
529 	0x324b103C, /* Smart Array P711m */
530 	0x3223103C, /* Smart Array P800 */
531 	0x3234103C, /* Smart Array P400 */
532 	0x3235103C, /* Smart Array P400i */
533 	0x3211103C, /* Smart Array E200i */
534 	0x3212103C, /* Smart Array E200 */
535 	0x3213103C, /* Smart Array E200i */
536 	0x3214103C, /* Smart Array E200i */
537 	0x3215103C, /* Smart Array E200i */
538 	0x3237103C, /* Smart Array E500 */
539 	0x323D103C, /* Smart Array P700m */
540 	0x40800E11, /* Smart Array 5i */
541 	0x409C0E11, /* Smart Array 6400 */
542 	0x409D0E11, /* Smart Array 6400 EM */
543 	0x40700E11, /* Smart Array 5300 */
544 	0x40820E11, /* Smart Array 532 */
545 	0x40830E11, /* Smart Array 5312 */
546 	0x409A0E11, /* Smart Array 641 */
547 	0x409B0E11, /* Smart Array 642 */
548 	0x40910E11, /* Smart Array 6i */
549 };
550 
551 /* List of controllers which cannot even be soft reset */
552 static u32 soft_unresettable_controller[] = {
553 	0x40800E11, /* Smart Array 5i */
554 	0x40700E11, /* Smart Array 5300 */
555 	0x40820E11, /* Smart Array 532 */
556 	0x40830E11, /* Smart Array 5312 */
557 	0x409A0E11, /* Smart Array 641 */
558 	0x409B0E11, /* Smart Array 642 */
559 	0x40910E11, /* Smart Array 6i */
560 	/* Exclude 640x boards.  These are two pci devices in one slot
561 	 * which share a battery backed cache module.  One controls the
562 	 * cache, the other accesses the cache through the one that controls
563 	 * it.  If we reset the one controlling the cache, the other will
564 	 * likely not be happy.  Just forbid resetting this conjoined mess.
565 	 * The 640x isn't really supported by hpsa anyway.
566 	 */
567 	0x409C0E11, /* Smart Array 6400 */
568 	0x409D0E11, /* Smart Array 6400 EM */
569 };
570 
571 static u32 needs_abort_tags_swizzled[] = {
572 	0x323D103C, /* Smart Array P700m */
573 	0x324a103C, /* Smart Array P712m */
574 	0x324b103C, /* SmartArray P711m */
575 };
576 
board_id_in_array(u32 a[],int nelems,u32 board_id)577 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
578 {
579 	int i;
580 
581 	for (i = 0; i < nelems; i++)
582 		if (a[i] == board_id)
583 			return 1;
584 	return 0;
585 }
586 
ctlr_is_hard_resettable(u32 board_id)587 static int ctlr_is_hard_resettable(u32 board_id)
588 {
589 	return !board_id_in_array(unresettable_controller,
590 			ARRAY_SIZE(unresettable_controller), board_id);
591 }
592 
ctlr_is_soft_resettable(u32 board_id)593 static int ctlr_is_soft_resettable(u32 board_id)
594 {
595 	return !board_id_in_array(soft_unresettable_controller,
596 			ARRAY_SIZE(soft_unresettable_controller), board_id);
597 }
598 
ctlr_is_resettable(u32 board_id)599 static int ctlr_is_resettable(u32 board_id)
600 {
601 	return ctlr_is_hard_resettable(board_id) ||
602 		ctlr_is_soft_resettable(board_id);
603 }
604 
ctlr_needs_abort_tags_swizzled(u32 board_id)605 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
606 {
607 	return board_id_in_array(needs_abort_tags_swizzled,
608 			ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
609 }
610 
host_show_resettable(struct device * dev,struct device_attribute * attr,char * buf)611 static ssize_t host_show_resettable(struct device *dev,
612 	struct device_attribute *attr, char *buf)
613 {
614 	struct ctlr_info *h;
615 	struct Scsi_Host *shost = class_to_shost(dev);
616 
617 	h = shost_to_hba(shost);
618 	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
619 }
620 
is_logical_dev_addr_mode(unsigned char scsi3addr[])621 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
622 {
623 	return (scsi3addr[3] & 0xC0) == 0x40;
624 }
625 
626 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
627 	"1(+0)ADM", "UNKNOWN", "PHYS DRV"
628 };
629 #define HPSA_RAID_0	0
630 #define HPSA_RAID_4	1
631 #define HPSA_RAID_1	2	/* also used for RAID 10 */
632 #define HPSA_RAID_5	3	/* also used for RAID 50 */
633 #define HPSA_RAID_51	4
634 #define HPSA_RAID_6	5	/* also used for RAID 60 */
635 #define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */
636 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
637 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
638 
is_logical_device(struct hpsa_scsi_dev_t * device)639 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
640 {
641 	return !device->physical_device;
642 }
643 
raid_level_show(struct device * dev,struct device_attribute * attr,char * buf)644 static ssize_t raid_level_show(struct device *dev,
645 	     struct device_attribute *attr, char *buf)
646 {
647 	ssize_t l = 0;
648 	unsigned char rlevel;
649 	struct ctlr_info *h;
650 	struct scsi_device *sdev;
651 	struct hpsa_scsi_dev_t *hdev;
652 	unsigned long flags;
653 
654 	sdev = to_scsi_device(dev);
655 	h = sdev_to_hba(sdev);
656 	spin_lock_irqsave(&h->lock, flags);
657 	hdev = sdev->hostdata;
658 	if (!hdev) {
659 		spin_unlock_irqrestore(&h->lock, flags);
660 		return -ENODEV;
661 	}
662 
663 	/* Is this even a logical drive? */
664 	if (!is_logical_device(hdev)) {
665 		spin_unlock_irqrestore(&h->lock, flags);
666 		l = snprintf(buf, PAGE_SIZE, "N/A\n");
667 		return l;
668 	}
669 
670 	rlevel = hdev->raid_level;
671 	spin_unlock_irqrestore(&h->lock, flags);
672 	if (rlevel > RAID_UNKNOWN)
673 		rlevel = RAID_UNKNOWN;
674 	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
675 	return l;
676 }
677 
lunid_show(struct device * dev,struct device_attribute * attr,char * buf)678 static ssize_t lunid_show(struct device *dev,
679 	     struct device_attribute *attr, char *buf)
680 {
681 	struct ctlr_info *h;
682 	struct scsi_device *sdev;
683 	struct hpsa_scsi_dev_t *hdev;
684 	unsigned long flags;
685 	unsigned char lunid[8];
686 
687 	sdev = to_scsi_device(dev);
688 	h = sdev_to_hba(sdev);
689 	spin_lock_irqsave(&h->lock, flags);
690 	hdev = sdev->hostdata;
691 	if (!hdev) {
692 		spin_unlock_irqrestore(&h->lock, flags);
693 		return -ENODEV;
694 	}
695 	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
696 	spin_unlock_irqrestore(&h->lock, flags);
697 	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
698 		lunid[0], lunid[1], lunid[2], lunid[3],
699 		lunid[4], lunid[5], lunid[6], lunid[7]);
700 }
701 
unique_id_show(struct device * dev,struct device_attribute * attr,char * buf)702 static ssize_t unique_id_show(struct device *dev,
703 	     struct device_attribute *attr, char *buf)
704 {
705 	struct ctlr_info *h;
706 	struct scsi_device *sdev;
707 	struct hpsa_scsi_dev_t *hdev;
708 	unsigned long flags;
709 	unsigned char sn[16];
710 
711 	sdev = to_scsi_device(dev);
712 	h = sdev_to_hba(sdev);
713 	spin_lock_irqsave(&h->lock, flags);
714 	hdev = sdev->hostdata;
715 	if (!hdev) {
716 		spin_unlock_irqrestore(&h->lock, flags);
717 		return -ENODEV;
718 	}
719 	memcpy(sn, hdev->device_id, sizeof(sn));
720 	spin_unlock_irqrestore(&h->lock, flags);
721 	return snprintf(buf, 16 * 2 + 2,
722 			"%02X%02X%02X%02X%02X%02X%02X%02X"
723 			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
724 			sn[0], sn[1], sn[2], sn[3],
725 			sn[4], sn[5], sn[6], sn[7],
726 			sn[8], sn[9], sn[10], sn[11],
727 			sn[12], sn[13], sn[14], sn[15]);
728 }
729 
host_show_hp_ssd_smart_path_enabled(struct device * dev,struct device_attribute * attr,char * buf)730 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
731 	     struct device_attribute *attr, char *buf)
732 {
733 	struct ctlr_info *h;
734 	struct scsi_device *sdev;
735 	struct hpsa_scsi_dev_t *hdev;
736 	unsigned long flags;
737 	int offload_enabled;
738 
739 	sdev = to_scsi_device(dev);
740 	h = sdev_to_hba(sdev);
741 	spin_lock_irqsave(&h->lock, flags);
742 	hdev = sdev->hostdata;
743 	if (!hdev) {
744 		spin_unlock_irqrestore(&h->lock, flags);
745 		return -ENODEV;
746 	}
747 	offload_enabled = hdev->offload_enabled;
748 	spin_unlock_irqrestore(&h->lock, flags);
749 	return snprintf(buf, 20, "%d\n", offload_enabled);
750 }
751 
752 #define MAX_PATHS 8
753 
path_info_show(struct device * dev,struct device_attribute * attr,char * buf)754 static ssize_t path_info_show(struct device *dev,
755 	     struct device_attribute *attr, char *buf)
756 {
757 	struct ctlr_info *h;
758 	struct scsi_device *sdev;
759 	struct hpsa_scsi_dev_t *hdev;
760 	unsigned long flags;
761 	int i;
762 	int output_len = 0;
763 	u8 box;
764 	u8 bay;
765 	u8 path_map_index = 0;
766 	char *active;
767 	unsigned char phys_connector[2];
768 
769 	sdev = to_scsi_device(dev);
770 	h = sdev_to_hba(sdev);
771 	spin_lock_irqsave(&h->devlock, flags);
772 	hdev = sdev->hostdata;
773 	if (!hdev) {
774 		spin_unlock_irqrestore(&h->devlock, flags);
775 		return -ENODEV;
776 	}
777 
778 	bay = hdev->bay;
779 	for (i = 0; i < MAX_PATHS; i++) {
780 		path_map_index = 1<<i;
781 		if (i == hdev->active_path_index)
782 			active = "Active";
783 		else if (hdev->path_map & path_map_index)
784 			active = "Inactive";
785 		else
786 			continue;
787 
788 		output_len += scnprintf(buf + output_len,
789 				PAGE_SIZE - output_len,
790 				"[%d:%d:%d:%d] %20.20s ",
791 				h->scsi_host->host_no,
792 				hdev->bus, hdev->target, hdev->lun,
793 				scsi_device_type(hdev->devtype));
794 
795 		if (hdev->external ||
796 			hdev->devtype == TYPE_RAID ||
797 			is_logical_device(hdev)) {
798 			output_len += snprintf(buf + output_len,
799 						PAGE_SIZE - output_len,
800 						"%s\n", active);
801 			continue;
802 		}
803 
804 		box = hdev->box[i];
805 		memcpy(&phys_connector, &hdev->phys_connector[i],
806 			sizeof(phys_connector));
807 		if (phys_connector[0] < '0')
808 			phys_connector[0] = '0';
809 		if (phys_connector[1] < '0')
810 			phys_connector[1] = '0';
811 		if (hdev->phys_connector[i] > 0)
812 			output_len += snprintf(buf + output_len,
813 				PAGE_SIZE - output_len,
814 				"PORT: %.2s ",
815 				phys_connector);
816 		if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
817 			if (box == 0 || box == 0xFF) {
818 				output_len += snprintf(buf + output_len,
819 					PAGE_SIZE - output_len,
820 					"BAY: %hhu %s\n",
821 					bay, active);
822 			} else {
823 				output_len += snprintf(buf + output_len,
824 					PAGE_SIZE - output_len,
825 					"BOX: %hhu BAY: %hhu %s\n",
826 					box, bay, active);
827 			}
828 		} else if (box != 0 && box != 0xFF) {
829 			output_len += snprintf(buf + output_len,
830 				PAGE_SIZE - output_len, "BOX: %hhu %s\n",
831 				box, active);
832 		} else
833 			output_len += snprintf(buf + output_len,
834 				PAGE_SIZE - output_len, "%s\n", active);
835 	}
836 
837 	spin_unlock_irqrestore(&h->devlock, flags);
838 	return output_len;
839 }
840 
841 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
842 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
843 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
844 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
845 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
846 			host_show_hp_ssd_smart_path_enabled, NULL);
847 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
848 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
849 		host_show_hp_ssd_smart_path_status,
850 		host_store_hp_ssd_smart_path_status);
851 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
852 			host_store_raid_offload_debug);
853 static DEVICE_ATTR(firmware_revision, S_IRUGO,
854 	host_show_firmware_revision, NULL);
855 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
856 	host_show_commands_outstanding, NULL);
857 static DEVICE_ATTR(transport_mode, S_IRUGO,
858 	host_show_transport_mode, NULL);
859 static DEVICE_ATTR(resettable, S_IRUGO,
860 	host_show_resettable, NULL);
861 static DEVICE_ATTR(lockup_detected, S_IRUGO,
862 	host_show_lockup_detected, NULL);
863 
864 static struct device_attribute *hpsa_sdev_attrs[] = {
865 	&dev_attr_raid_level,
866 	&dev_attr_lunid,
867 	&dev_attr_unique_id,
868 	&dev_attr_hp_ssd_smart_path_enabled,
869 	&dev_attr_path_info,
870 	NULL,
871 };
872 
873 static struct device_attribute *hpsa_shost_attrs[] = {
874 	&dev_attr_rescan,
875 	&dev_attr_firmware_revision,
876 	&dev_attr_commands_outstanding,
877 	&dev_attr_transport_mode,
878 	&dev_attr_resettable,
879 	&dev_attr_hp_ssd_smart_path_status,
880 	&dev_attr_raid_offload_debug,
881 	&dev_attr_lockup_detected,
882 	NULL,
883 };
884 
885 #define HPSA_NRESERVED_CMDS	(HPSA_CMDS_RESERVED_FOR_ABORTS + \
886 		HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
887 
888 static struct scsi_host_template hpsa_driver_template = {
889 	.module			= THIS_MODULE,
890 	.name			= HPSA,
891 	.proc_name		= HPSA,
892 	.queuecommand		= hpsa_scsi_queue_command,
893 	.scan_start		= hpsa_scan_start,
894 	.scan_finished		= hpsa_scan_finished,
895 	.change_queue_depth	= hpsa_change_queue_depth,
896 	.this_id		= -1,
897 	.use_clustering		= ENABLE_CLUSTERING,
898 	.eh_abort_handler	= hpsa_eh_abort_handler,
899 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
900 	.ioctl			= hpsa_ioctl,
901 	.slave_alloc		= hpsa_slave_alloc,
902 	.slave_configure	= hpsa_slave_configure,
903 	.slave_destroy		= hpsa_slave_destroy,
904 #ifdef CONFIG_COMPAT
905 	.compat_ioctl		= hpsa_compat_ioctl,
906 #endif
907 	.sdev_attrs = hpsa_sdev_attrs,
908 	.shost_attrs = hpsa_shost_attrs,
909 	.max_sectors = 8192,
910 	.no_write_same = 1,
911 };
912 
next_command(struct ctlr_info * h,u8 q)913 static inline u32 next_command(struct ctlr_info *h, u8 q)
914 {
915 	u32 a;
916 	struct reply_queue_buffer *rq = &h->reply_queue[q];
917 
918 	if (h->transMethod & CFGTBL_Trans_io_accel1)
919 		return h->access.command_completed(h, q);
920 
921 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
922 		return h->access.command_completed(h, q);
923 
924 	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
925 		a = rq->head[rq->current_entry];
926 		rq->current_entry++;
927 		atomic_dec(&h->commands_outstanding);
928 	} else {
929 		a = FIFO_EMPTY;
930 	}
931 	/* Check for wraparound */
932 	if (rq->current_entry == h->max_commands) {
933 		rq->current_entry = 0;
934 		rq->wraparound ^= 1;
935 	}
936 	return a;
937 }
938 
939 /*
940  * There are some special bits in the bus address of the
941  * command that we have to set for the controller to know
942  * how to process the command:
943  *
944  * Normal performant mode:
945  * bit 0: 1 means performant mode, 0 means simple mode.
946  * bits 1-3 = block fetch table entry
947  * bits 4-6 = command type (== 0)
948  *
949  * ioaccel1 mode:
950  * bit 0 = "performant mode" bit.
951  * bits 1-3 = block fetch table entry
952  * bits 4-6 = command type (== 110)
953  * (command type is needed because ioaccel1 mode
954  * commands are submitted through the same register as normal
955  * mode commands, so this is how the controller knows whether
956  * the command is normal mode or ioaccel1 mode.)
957  *
958  * ioaccel2 mode:
959  * bit 0 = "performant mode" bit.
960  * bits 1-4 = block fetch table entry (note extra bit)
961  * bits 4-6 = not needed, because ioaccel2 mode has
962  * a separate special register for submitting commands.
963  */
964 
965 /*
966  * set_performant_mode: Modify the tag for cciss performant
967  * set bit 0 for pull model, bits 3-1 for block fetch
968  * register number
969  */
970 #define DEFAULT_REPLY_QUEUE (-1)
set_performant_mode(struct ctlr_info * h,struct CommandList * c,int reply_queue)971 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
972 					int reply_queue)
973 {
974 	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
975 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
976 		if (unlikely(!h->msix_vector))
977 			return;
978 		if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
979 			c->Header.ReplyQueue =
980 				raw_smp_processor_id() % h->nreply_queues;
981 		else
982 			c->Header.ReplyQueue = reply_queue % h->nreply_queues;
983 	}
984 }
985 
set_ioaccel1_performant_mode(struct ctlr_info * h,struct CommandList * c,int reply_queue)986 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
987 						struct CommandList *c,
988 						int reply_queue)
989 {
990 	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
991 
992 	/*
993 	 * Tell the controller to post the reply to the queue for this
994 	 * processor.  This seems to give the best I/O throughput.
995 	 */
996 	if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
997 		cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
998 	else
999 		cp->ReplyQueue = reply_queue % h->nreply_queues;
1000 	/*
1001 	 * Set the bits in the address sent down to include:
1002 	 *  - performant mode bit (bit 0)
1003 	 *  - pull count (bits 1-3)
1004 	 *  - command type (bits 4-6)
1005 	 */
1006 	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1007 					IOACCEL1_BUSADDR_CMDTYPE;
1008 }
1009 
set_ioaccel2_tmf_performant_mode(struct ctlr_info * h,struct CommandList * c,int reply_queue)1010 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1011 						struct CommandList *c,
1012 						int reply_queue)
1013 {
1014 	struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1015 		&h->ioaccel2_cmd_pool[c->cmdindex];
1016 
1017 	/* Tell the controller to post the reply to the queue for this
1018 	 * processor.  This seems to give the best I/O throughput.
1019 	 */
1020 	if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1021 		cp->reply_queue = smp_processor_id() % h->nreply_queues;
1022 	else
1023 		cp->reply_queue = reply_queue % h->nreply_queues;
1024 	/* Set the bits in the address sent down to include:
1025 	 *  - performant mode bit not used in ioaccel mode 2
1026 	 *  - pull count (bits 0-3)
1027 	 *  - command type isn't needed for ioaccel2
1028 	 */
1029 	c->busaddr |= h->ioaccel2_blockFetchTable[0];
1030 }
1031 
set_ioaccel2_performant_mode(struct ctlr_info * h,struct CommandList * c,int reply_queue)1032 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1033 						struct CommandList *c,
1034 						int reply_queue)
1035 {
1036 	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1037 
1038 	/*
1039 	 * Tell the controller to post the reply to the queue for this
1040 	 * processor.  This seems to give the best I/O throughput.
1041 	 */
1042 	if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1043 		cp->reply_queue = smp_processor_id() % h->nreply_queues;
1044 	else
1045 		cp->reply_queue = reply_queue % h->nreply_queues;
1046 	/*
1047 	 * Set the bits in the address sent down to include:
1048 	 *  - performant mode bit not used in ioaccel mode 2
1049 	 *  - pull count (bits 0-3)
1050 	 *  - command type isn't needed for ioaccel2
1051 	 */
1052 	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1053 }
1054 
is_firmware_flash_cmd(u8 * cdb)1055 static int is_firmware_flash_cmd(u8 *cdb)
1056 {
1057 	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1058 }
1059 
1060 /*
1061  * During firmware flash, the heartbeat register may not update as frequently
1062  * as it should.  So we dial down lockup detection during firmware flash. and
1063  * dial it back up when firmware flash completes.
1064  */
1065 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1066 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
dial_down_lockup_detection_during_fw_flash(struct ctlr_info * h,struct CommandList * c)1067 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1068 		struct CommandList *c)
1069 {
1070 	if (!is_firmware_flash_cmd(c->Request.CDB))
1071 		return;
1072 	atomic_inc(&h->firmware_flash_in_progress);
1073 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1074 }
1075 
dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info * h,struct CommandList * c)1076 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1077 		struct CommandList *c)
1078 {
1079 	if (is_firmware_flash_cmd(c->Request.CDB) &&
1080 		atomic_dec_and_test(&h->firmware_flash_in_progress))
1081 		h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1082 }
1083 
__enqueue_cmd_and_start_io(struct ctlr_info * h,struct CommandList * c,int reply_queue)1084 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1085 	struct CommandList *c, int reply_queue)
1086 {
1087 	dial_down_lockup_detection_during_fw_flash(h, c);
1088 	atomic_inc(&h->commands_outstanding);
1089 	switch (c->cmd_type) {
1090 	case CMD_IOACCEL1:
1091 		set_ioaccel1_performant_mode(h, c, reply_queue);
1092 		writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1093 		break;
1094 	case CMD_IOACCEL2:
1095 		set_ioaccel2_performant_mode(h, c, reply_queue);
1096 		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1097 		break;
1098 	case IOACCEL2_TMF:
1099 		set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1100 		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1101 		break;
1102 	default:
1103 		set_performant_mode(h, c, reply_queue);
1104 		h->access.submit_command(h, c);
1105 	}
1106 }
1107 
enqueue_cmd_and_start_io(struct ctlr_info * h,struct CommandList * c)1108 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1109 {
1110 	if (unlikely(hpsa_is_pending_event(c)))
1111 		return finish_cmd(c);
1112 
1113 	__enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1114 }
1115 
is_hba_lunid(unsigned char scsi3addr[])1116 static inline int is_hba_lunid(unsigned char scsi3addr[])
1117 {
1118 	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1119 }
1120 
is_scsi_rev_5(struct ctlr_info * h)1121 static inline int is_scsi_rev_5(struct ctlr_info *h)
1122 {
1123 	if (!h->hba_inquiry_data)
1124 		return 0;
1125 	if ((h->hba_inquiry_data[2] & 0x07) == 5)
1126 		return 1;
1127 	return 0;
1128 }
1129 
hpsa_find_target_lun(struct ctlr_info * h,unsigned char scsi3addr[],int bus,int * target,int * lun)1130 static int hpsa_find_target_lun(struct ctlr_info *h,
1131 	unsigned char scsi3addr[], int bus, int *target, int *lun)
1132 {
1133 	/* finds an unused bus, target, lun for a new physical device
1134 	 * assumes h->devlock is held
1135 	 */
1136 	int i, found = 0;
1137 	DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1138 
1139 	bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1140 
1141 	for (i = 0; i < h->ndevices; i++) {
1142 		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1143 			__set_bit(h->dev[i]->target, lun_taken);
1144 	}
1145 
1146 	i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1147 	if (i < HPSA_MAX_DEVICES) {
1148 		/* *bus = 1; */
1149 		*target = i;
1150 		*lun = 0;
1151 		found = 1;
1152 	}
1153 	return !found;
1154 }
1155 
hpsa_show_dev_msg(const char * level,struct ctlr_info * h,struct hpsa_scsi_dev_t * dev,char * description)1156 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1157 	struct hpsa_scsi_dev_t *dev, char *description)
1158 {
1159 #define LABEL_SIZE 25
1160 	char label[LABEL_SIZE];
1161 
1162 	if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1163 		return;
1164 
1165 	switch (dev->devtype) {
1166 	case TYPE_RAID:
1167 		snprintf(label, LABEL_SIZE, "controller");
1168 		break;
1169 	case TYPE_ENCLOSURE:
1170 		snprintf(label, LABEL_SIZE, "enclosure");
1171 		break;
1172 	case TYPE_DISK:
1173 		if (dev->external)
1174 			snprintf(label, LABEL_SIZE, "external");
1175 		else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1176 			snprintf(label, LABEL_SIZE, "%s",
1177 				raid_label[PHYSICAL_DRIVE]);
1178 		else
1179 			snprintf(label, LABEL_SIZE, "RAID-%s",
1180 				dev->raid_level > RAID_UNKNOWN ? "?" :
1181 				raid_label[dev->raid_level]);
1182 		break;
1183 	case TYPE_ROM:
1184 		snprintf(label, LABEL_SIZE, "rom");
1185 		break;
1186 	case TYPE_TAPE:
1187 		snprintf(label, LABEL_SIZE, "tape");
1188 		break;
1189 	case TYPE_MEDIUM_CHANGER:
1190 		snprintf(label, LABEL_SIZE, "changer");
1191 		break;
1192 	default:
1193 		snprintf(label, LABEL_SIZE, "UNKNOWN");
1194 		break;
1195 	}
1196 
1197 	dev_printk(level, &h->pdev->dev,
1198 			"scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1199 			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1200 			description,
1201 			scsi_device_type(dev->devtype),
1202 			dev->vendor,
1203 			dev->model,
1204 			label,
1205 			dev->offload_config ? '+' : '-',
1206 			dev->offload_enabled ? '+' : '-',
1207 			dev->expose_device);
1208 }
1209 
1210 /* Add an entry into h->dev[] array. */
hpsa_scsi_add_entry(struct ctlr_info * h,struct hpsa_scsi_dev_t * device,struct hpsa_scsi_dev_t * added[],int * nadded)1211 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1212 		struct hpsa_scsi_dev_t *device,
1213 		struct hpsa_scsi_dev_t *added[], int *nadded)
1214 {
1215 	/* assumes h->devlock is held */
1216 	int n = h->ndevices;
1217 	int i;
1218 	unsigned char addr1[8], addr2[8];
1219 	struct hpsa_scsi_dev_t *sd;
1220 
1221 	if (n >= HPSA_MAX_DEVICES) {
1222 		dev_err(&h->pdev->dev, "too many devices, some will be "
1223 			"inaccessible.\n");
1224 		return -1;
1225 	}
1226 
1227 	/* physical devices do not have lun or target assigned until now. */
1228 	if (device->lun != -1)
1229 		/* Logical device, lun is already assigned. */
1230 		goto lun_assigned;
1231 
1232 	/* If this device a non-zero lun of a multi-lun device
1233 	 * byte 4 of the 8-byte LUN addr will contain the logical
1234 	 * unit no, zero otherwise.
1235 	 */
1236 	if (device->scsi3addr[4] == 0) {
1237 		/* This is not a non-zero lun of a multi-lun device */
1238 		if (hpsa_find_target_lun(h, device->scsi3addr,
1239 			device->bus, &device->target, &device->lun) != 0)
1240 			return -1;
1241 		goto lun_assigned;
1242 	}
1243 
1244 	/* This is a non-zero lun of a multi-lun device.
1245 	 * Search through our list and find the device which
1246 	 * has the same 8 byte LUN address, excepting byte 4 and 5.
1247 	 * Assign the same bus and target for this new LUN.
1248 	 * Use the logical unit number from the firmware.
1249 	 */
1250 	memcpy(addr1, device->scsi3addr, 8);
1251 	addr1[4] = 0;
1252 	addr1[5] = 0;
1253 	for (i = 0; i < n; i++) {
1254 		sd = h->dev[i];
1255 		memcpy(addr2, sd->scsi3addr, 8);
1256 		addr2[4] = 0;
1257 		addr2[5] = 0;
1258 		/* differ only in byte 4 and 5? */
1259 		if (memcmp(addr1, addr2, 8) == 0) {
1260 			device->bus = sd->bus;
1261 			device->target = sd->target;
1262 			device->lun = device->scsi3addr[4];
1263 			break;
1264 		}
1265 	}
1266 	if (device->lun == -1) {
1267 		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1268 			" suspect firmware bug or unsupported hardware "
1269 			"configuration.\n");
1270 			return -1;
1271 	}
1272 
1273 lun_assigned:
1274 
1275 	h->dev[n] = device;
1276 	h->ndevices++;
1277 	added[*nadded] = device;
1278 	(*nadded)++;
1279 	hpsa_show_dev_msg(KERN_INFO, h, device,
1280 		device->expose_device ? "added" : "masked");
1281 	device->offload_to_be_enabled = device->offload_enabled;
1282 	device->offload_enabled = 0;
1283 	return 0;
1284 }
1285 
1286 /* Update an entry in h->dev[] array. */
hpsa_scsi_update_entry(struct ctlr_info * h,int entry,struct hpsa_scsi_dev_t * new_entry)1287 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1288 	int entry, struct hpsa_scsi_dev_t *new_entry)
1289 {
1290 	int offload_enabled;
1291 	/* assumes h->devlock is held */
1292 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1293 
1294 	/* Raid level changed. */
1295 	h->dev[entry]->raid_level = new_entry->raid_level;
1296 
1297 	/* Raid offload parameters changed.  Careful about the ordering. */
1298 	if (new_entry->offload_config && new_entry->offload_enabled) {
1299 		/*
1300 		 * if drive is newly offload_enabled, we want to copy the
1301 		 * raid map data first.  If previously offload_enabled and
1302 		 * offload_config were set, raid map data had better be
1303 		 * the same as it was before.  if raid map data is changed
1304 		 * then it had better be the case that
1305 		 * h->dev[entry]->offload_enabled is currently 0.
1306 		 */
1307 		h->dev[entry]->raid_map = new_entry->raid_map;
1308 		h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1309 	}
1310 	if (new_entry->hba_ioaccel_enabled) {
1311 		h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1312 		wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1313 	}
1314 	h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1315 	h->dev[entry]->offload_config = new_entry->offload_config;
1316 	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1317 	h->dev[entry]->queue_depth = new_entry->queue_depth;
1318 
1319 	/*
1320 	 * We can turn off ioaccel offload now, but need to delay turning
1321 	 * it on until we can update h->dev[entry]->phys_disk[], but we
1322 	 * can't do that until all the devices are updated.
1323 	 */
1324 	h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1325 	if (!new_entry->offload_enabled)
1326 		h->dev[entry]->offload_enabled = 0;
1327 
1328 	offload_enabled = h->dev[entry]->offload_enabled;
1329 	h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1330 	hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1331 	h->dev[entry]->offload_enabled = offload_enabled;
1332 }
1333 
1334 /* Replace an entry from h->dev[] array. */
hpsa_scsi_replace_entry(struct ctlr_info * h,int entry,struct hpsa_scsi_dev_t * new_entry,struct hpsa_scsi_dev_t * added[],int * nadded,struct hpsa_scsi_dev_t * removed[],int * nremoved)1335 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1336 	int entry, struct hpsa_scsi_dev_t *new_entry,
1337 	struct hpsa_scsi_dev_t *added[], int *nadded,
1338 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
1339 {
1340 	/* assumes h->devlock is held */
1341 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1342 	removed[*nremoved] = h->dev[entry];
1343 	(*nremoved)++;
1344 
1345 	/*
1346 	 * New physical devices won't have target/lun assigned yet
1347 	 * so we need to preserve the values in the slot we are replacing.
1348 	 */
1349 	if (new_entry->target == -1) {
1350 		new_entry->target = h->dev[entry]->target;
1351 		new_entry->lun = h->dev[entry]->lun;
1352 	}
1353 
1354 	h->dev[entry] = new_entry;
1355 	added[*nadded] = new_entry;
1356 	(*nadded)++;
1357 	hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1358 	new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1359 	new_entry->offload_enabled = 0;
1360 }
1361 
1362 /* Remove an entry from h->dev[] array. */
hpsa_scsi_remove_entry(struct ctlr_info * h,int entry,struct hpsa_scsi_dev_t * removed[],int * nremoved)1363 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1364 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
1365 {
1366 	/* assumes h->devlock is held */
1367 	int i;
1368 	struct hpsa_scsi_dev_t *sd;
1369 
1370 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1371 
1372 	sd = h->dev[entry];
1373 	removed[*nremoved] = h->dev[entry];
1374 	(*nremoved)++;
1375 
1376 	for (i = entry; i < h->ndevices-1; i++)
1377 		h->dev[i] = h->dev[i+1];
1378 	h->ndevices--;
1379 	hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1380 }
1381 
1382 #define SCSI3ADDR_EQ(a, b) ( \
1383 	(a)[7] == (b)[7] && \
1384 	(a)[6] == (b)[6] && \
1385 	(a)[5] == (b)[5] && \
1386 	(a)[4] == (b)[4] && \
1387 	(a)[3] == (b)[3] && \
1388 	(a)[2] == (b)[2] && \
1389 	(a)[1] == (b)[1] && \
1390 	(a)[0] == (b)[0])
1391 
fixup_botched_add(struct ctlr_info * h,struct hpsa_scsi_dev_t * added)1392 static void fixup_botched_add(struct ctlr_info *h,
1393 	struct hpsa_scsi_dev_t *added)
1394 {
1395 	/* called when scsi_add_device fails in order to re-adjust
1396 	 * h->dev[] to match the mid layer's view.
1397 	 */
1398 	unsigned long flags;
1399 	int i, j;
1400 
1401 	spin_lock_irqsave(&h->lock, flags);
1402 	for (i = 0; i < h->ndevices; i++) {
1403 		if (h->dev[i] == added) {
1404 			for (j = i; j < h->ndevices-1; j++)
1405 				h->dev[j] = h->dev[j+1];
1406 			h->ndevices--;
1407 			break;
1408 		}
1409 	}
1410 	spin_unlock_irqrestore(&h->lock, flags);
1411 	kfree(added);
1412 }
1413 
device_is_the_same(struct hpsa_scsi_dev_t * dev1,struct hpsa_scsi_dev_t * dev2)1414 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1415 	struct hpsa_scsi_dev_t *dev2)
1416 {
1417 	/* we compare everything except lun and target as these
1418 	 * are not yet assigned.  Compare parts likely
1419 	 * to differ first
1420 	 */
1421 	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1422 		sizeof(dev1->scsi3addr)) != 0)
1423 		return 0;
1424 	if (memcmp(dev1->device_id, dev2->device_id,
1425 		sizeof(dev1->device_id)) != 0)
1426 		return 0;
1427 	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1428 		return 0;
1429 	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1430 		return 0;
1431 	if (dev1->devtype != dev2->devtype)
1432 		return 0;
1433 	if (dev1->bus != dev2->bus)
1434 		return 0;
1435 	return 1;
1436 }
1437 
device_updated(struct hpsa_scsi_dev_t * dev1,struct hpsa_scsi_dev_t * dev2)1438 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1439 	struct hpsa_scsi_dev_t *dev2)
1440 {
1441 	/* Device attributes that can change, but don't mean
1442 	 * that the device is a different device, nor that the OS
1443 	 * needs to be told anything about the change.
1444 	 */
1445 	if (dev1->raid_level != dev2->raid_level)
1446 		return 1;
1447 	if (dev1->offload_config != dev2->offload_config)
1448 		return 1;
1449 	if (dev1->offload_enabled != dev2->offload_enabled)
1450 		return 1;
1451 	if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1452 		if (dev1->queue_depth != dev2->queue_depth)
1453 			return 1;
1454 	return 0;
1455 }
1456 
1457 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1458  * and return needle location in *index.  If scsi3addr matches, but not
1459  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1460  * location in *index.
1461  * In the case of a minor device attribute change, such as RAID level, just
1462  * return DEVICE_UPDATED, along with the updated device's location in index.
1463  * If needle not found, return DEVICE_NOT_FOUND.
1464  */
hpsa_scsi_find_entry(struct hpsa_scsi_dev_t * needle,struct hpsa_scsi_dev_t * haystack[],int haystack_size,int * index)1465 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1466 	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1467 	int *index)
1468 {
1469 	int i;
1470 #define DEVICE_NOT_FOUND 0
1471 #define DEVICE_CHANGED 1
1472 #define DEVICE_SAME 2
1473 #define DEVICE_UPDATED 3
1474 	if (needle == NULL)
1475 		return DEVICE_NOT_FOUND;
1476 
1477 	for (i = 0; i < haystack_size; i++) {
1478 		if (haystack[i] == NULL) /* previously removed. */
1479 			continue;
1480 		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1481 			*index = i;
1482 			if (device_is_the_same(needle, haystack[i])) {
1483 				if (device_updated(needle, haystack[i]))
1484 					return DEVICE_UPDATED;
1485 				return DEVICE_SAME;
1486 			} else {
1487 				/* Keep offline devices offline */
1488 				if (needle->volume_offline)
1489 					return DEVICE_NOT_FOUND;
1490 				return DEVICE_CHANGED;
1491 			}
1492 		}
1493 	}
1494 	*index = -1;
1495 	return DEVICE_NOT_FOUND;
1496 }
1497 
hpsa_monitor_offline_device(struct ctlr_info * h,unsigned char scsi3addr[])1498 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1499 					unsigned char scsi3addr[])
1500 {
1501 	struct offline_device_entry *device;
1502 	unsigned long flags;
1503 
1504 	/* Check to see if device is already on the list */
1505 	spin_lock_irqsave(&h->offline_device_lock, flags);
1506 	list_for_each_entry(device, &h->offline_device_list, offline_list) {
1507 		if (memcmp(device->scsi3addr, scsi3addr,
1508 			sizeof(device->scsi3addr)) == 0) {
1509 			spin_unlock_irqrestore(&h->offline_device_lock, flags);
1510 			return;
1511 		}
1512 	}
1513 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
1514 
1515 	/* Device is not on the list, add it. */
1516 	device = kmalloc(sizeof(*device), GFP_KERNEL);
1517 	if (!device) {
1518 		dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1519 		return;
1520 	}
1521 	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1522 	spin_lock_irqsave(&h->offline_device_lock, flags);
1523 	list_add_tail(&device->offline_list, &h->offline_device_list);
1524 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
1525 }
1526 
1527 /* Print a message explaining various offline volume states */
hpsa_show_volume_status(struct ctlr_info * h,struct hpsa_scsi_dev_t * sd)1528 static void hpsa_show_volume_status(struct ctlr_info *h,
1529 	struct hpsa_scsi_dev_t *sd)
1530 {
1531 	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1532 		dev_info(&h->pdev->dev,
1533 			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1534 			h->scsi_host->host_no,
1535 			sd->bus, sd->target, sd->lun);
1536 	switch (sd->volume_offline) {
1537 	case HPSA_LV_OK:
1538 		break;
1539 	case HPSA_LV_UNDERGOING_ERASE:
1540 		dev_info(&h->pdev->dev,
1541 			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1542 			h->scsi_host->host_no,
1543 			sd->bus, sd->target, sd->lun);
1544 		break;
1545 	case HPSA_LV_NOT_AVAILABLE:
1546 		dev_info(&h->pdev->dev,
1547 			"C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1548 			h->scsi_host->host_no,
1549 			sd->bus, sd->target, sd->lun);
1550 		break;
1551 	case HPSA_LV_UNDERGOING_RPI:
1552 		dev_info(&h->pdev->dev,
1553 			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1554 			h->scsi_host->host_no,
1555 			sd->bus, sd->target, sd->lun);
1556 		break;
1557 	case HPSA_LV_PENDING_RPI:
1558 		dev_info(&h->pdev->dev,
1559 			"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1560 			h->scsi_host->host_no,
1561 			sd->bus, sd->target, sd->lun);
1562 		break;
1563 	case HPSA_LV_ENCRYPTED_NO_KEY:
1564 		dev_info(&h->pdev->dev,
1565 			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1566 			h->scsi_host->host_no,
1567 			sd->bus, sd->target, sd->lun);
1568 		break;
1569 	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1570 		dev_info(&h->pdev->dev,
1571 			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1572 			h->scsi_host->host_no,
1573 			sd->bus, sd->target, sd->lun);
1574 		break;
1575 	case HPSA_LV_UNDERGOING_ENCRYPTION:
1576 		dev_info(&h->pdev->dev,
1577 			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1578 			h->scsi_host->host_no,
1579 			sd->bus, sd->target, sd->lun);
1580 		break;
1581 	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1582 		dev_info(&h->pdev->dev,
1583 			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1584 			h->scsi_host->host_no,
1585 			sd->bus, sd->target, sd->lun);
1586 		break;
1587 	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1588 		dev_info(&h->pdev->dev,
1589 			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1590 			h->scsi_host->host_no,
1591 			sd->bus, sd->target, sd->lun);
1592 		break;
1593 	case HPSA_LV_PENDING_ENCRYPTION:
1594 		dev_info(&h->pdev->dev,
1595 			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1596 			h->scsi_host->host_no,
1597 			sd->bus, sd->target, sd->lun);
1598 		break;
1599 	case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1600 		dev_info(&h->pdev->dev,
1601 			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1602 			h->scsi_host->host_no,
1603 			sd->bus, sd->target, sd->lun);
1604 		break;
1605 	}
1606 }
1607 
1608 /*
1609  * Figure the list of physical drive pointers for a logical drive with
1610  * raid offload configured.
1611  */
hpsa_figure_phys_disk_ptrs(struct ctlr_info * h,struct hpsa_scsi_dev_t * dev[],int ndevices,struct hpsa_scsi_dev_t * logical_drive)1612 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1613 				struct hpsa_scsi_dev_t *dev[], int ndevices,
1614 				struct hpsa_scsi_dev_t *logical_drive)
1615 {
1616 	struct raid_map_data *map = &logical_drive->raid_map;
1617 	struct raid_map_disk_data *dd = &map->data[0];
1618 	int i, j;
1619 	int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1620 				le16_to_cpu(map->metadata_disks_per_row);
1621 	int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1622 				le16_to_cpu(map->layout_map_count) *
1623 				total_disks_per_row;
1624 	int nphys_disk = le16_to_cpu(map->layout_map_count) *
1625 				total_disks_per_row;
1626 	int qdepth;
1627 
1628 	if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1629 		nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1630 
1631 	logical_drive->nphysical_disks = nraid_map_entries;
1632 
1633 	qdepth = 0;
1634 	for (i = 0; i < nraid_map_entries; i++) {
1635 		logical_drive->phys_disk[i] = NULL;
1636 		if (!logical_drive->offload_config)
1637 			continue;
1638 		for (j = 0; j < ndevices; j++) {
1639 			if (dev[j] == NULL)
1640 				continue;
1641 			if (dev[j]->devtype != TYPE_DISK)
1642 				continue;
1643 			if (is_logical_device(dev[j]))
1644 				continue;
1645 			if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1646 				continue;
1647 
1648 			logical_drive->phys_disk[i] = dev[j];
1649 			if (i < nphys_disk)
1650 				qdepth = min(h->nr_cmds, qdepth +
1651 				    logical_drive->phys_disk[i]->queue_depth);
1652 			break;
1653 		}
1654 
1655 		/*
1656 		 * This can happen if a physical drive is removed and
1657 		 * the logical drive is degraded.  In that case, the RAID
1658 		 * map data will refer to a physical disk which isn't actually
1659 		 * present.  And in that case offload_enabled should already
1660 		 * be 0, but we'll turn it off here just in case
1661 		 */
1662 		if (!logical_drive->phys_disk[i]) {
1663 			logical_drive->offload_enabled = 0;
1664 			logical_drive->offload_to_be_enabled = 0;
1665 			logical_drive->queue_depth = 8;
1666 		}
1667 	}
1668 	if (nraid_map_entries)
1669 		/*
1670 		 * This is correct for reads, too high for full stripe writes,
1671 		 * way too high for partial stripe writes
1672 		 */
1673 		logical_drive->queue_depth = qdepth;
1674 	else
1675 		logical_drive->queue_depth = h->nr_cmds;
1676 }
1677 
hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info * h,struct hpsa_scsi_dev_t * dev[],int ndevices)1678 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1679 				struct hpsa_scsi_dev_t *dev[], int ndevices)
1680 {
1681 	int i;
1682 
1683 	for (i = 0; i < ndevices; i++) {
1684 		if (dev[i] == NULL)
1685 			continue;
1686 		if (dev[i]->devtype != TYPE_DISK)
1687 			continue;
1688 		if (!is_logical_device(dev[i]))
1689 			continue;
1690 
1691 		/*
1692 		 * If offload is currently enabled, the RAID map and
1693 		 * phys_disk[] assignment *better* not be changing
1694 		 * and since it isn't changing, we do not need to
1695 		 * update it.
1696 		 */
1697 		if (dev[i]->offload_enabled)
1698 			continue;
1699 
1700 		hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1701 	}
1702 }
1703 
hpsa_add_device(struct ctlr_info * h,struct hpsa_scsi_dev_t * device)1704 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1705 {
1706 	int rc = 0;
1707 
1708 	if (!h->scsi_host)
1709 		return 1;
1710 
1711 	if (is_logical_device(device)) /* RAID */
1712 		rc = scsi_add_device(h->scsi_host, device->bus,
1713 					device->target, device->lun);
1714 	else /* HBA */
1715 		rc = hpsa_add_sas_device(h->sas_host, device);
1716 
1717 	return rc;
1718 }
1719 
hpsa_remove_device(struct ctlr_info * h,struct hpsa_scsi_dev_t * device)1720 static void hpsa_remove_device(struct ctlr_info *h,
1721 			struct hpsa_scsi_dev_t *device)
1722 {
1723 	struct scsi_device *sdev = NULL;
1724 
1725 	if (!h->scsi_host)
1726 		return;
1727 
1728 	if (is_logical_device(device)) { /* RAID */
1729 		sdev = scsi_device_lookup(h->scsi_host, device->bus,
1730 						device->target, device->lun);
1731 		if (sdev) {
1732 			scsi_remove_device(sdev);
1733 			scsi_device_put(sdev);
1734 		} else {
1735 			/*
1736 			 * We don't expect to get here.  Future commands
1737 			 * to this device will get a selection timeout as
1738 			 * if the device were gone.
1739 			 */
1740 			hpsa_show_dev_msg(KERN_WARNING, h, device,
1741 					"didn't find device for removal.");
1742 		}
1743 	} else /* HBA */
1744 		hpsa_remove_sas_device(device);
1745 }
1746 
adjust_hpsa_scsi_table(struct ctlr_info * h,struct hpsa_scsi_dev_t * sd[],int nsds)1747 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1748 	struct hpsa_scsi_dev_t *sd[], int nsds)
1749 {
1750 	/* sd contains scsi3 addresses and devtypes, and inquiry
1751 	 * data.  This function takes what's in sd to be the current
1752 	 * reality and updates h->dev[] to reflect that reality.
1753 	 */
1754 	int i, entry, device_change, changes = 0;
1755 	struct hpsa_scsi_dev_t *csd;
1756 	unsigned long flags;
1757 	struct hpsa_scsi_dev_t **added, **removed;
1758 	int nadded, nremoved;
1759 
1760 	/*
1761 	 * A reset can cause a device status to change
1762 	 * re-schedule the scan to see what happened.
1763 	 */
1764 	if (h->reset_in_progress) {
1765 		h->drv_req_rescan = 1;
1766 		return;
1767 	}
1768 
1769 	added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1770 	removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1771 
1772 	if (!added || !removed) {
1773 		dev_warn(&h->pdev->dev, "out of memory in "
1774 			"adjust_hpsa_scsi_table\n");
1775 		goto free_and_out;
1776 	}
1777 
1778 	spin_lock_irqsave(&h->devlock, flags);
1779 
1780 	/* find any devices in h->dev[] that are not in
1781 	 * sd[] and remove them from h->dev[], and for any
1782 	 * devices which have changed, remove the old device
1783 	 * info and add the new device info.
1784 	 * If minor device attributes change, just update
1785 	 * the existing device structure.
1786 	 */
1787 	i = 0;
1788 	nremoved = 0;
1789 	nadded = 0;
1790 	while (i < h->ndevices) {
1791 		csd = h->dev[i];
1792 		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1793 		if (device_change == DEVICE_NOT_FOUND) {
1794 			changes++;
1795 			hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1796 			continue; /* remove ^^^, hence i not incremented */
1797 		} else if (device_change == DEVICE_CHANGED) {
1798 			changes++;
1799 			hpsa_scsi_replace_entry(h, i, sd[entry],
1800 				added, &nadded, removed, &nremoved);
1801 			/* Set it to NULL to prevent it from being freed
1802 			 * at the bottom of hpsa_update_scsi_devices()
1803 			 */
1804 			sd[entry] = NULL;
1805 		} else if (device_change == DEVICE_UPDATED) {
1806 			hpsa_scsi_update_entry(h, i, sd[entry]);
1807 		}
1808 		i++;
1809 	}
1810 
1811 	/* Now, make sure every device listed in sd[] is also
1812 	 * listed in h->dev[], adding them if they aren't found
1813 	 */
1814 
1815 	for (i = 0; i < nsds; i++) {
1816 		if (!sd[i]) /* if already added above. */
1817 			continue;
1818 
1819 		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1820 		 * as the SCSI mid-layer does not handle such devices well.
1821 		 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1822 		 * at 160Hz, and prevents the system from coming up.
1823 		 */
1824 		if (sd[i]->volume_offline) {
1825 			hpsa_show_volume_status(h, sd[i]);
1826 			hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1827 			continue;
1828 		}
1829 
1830 		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1831 					h->ndevices, &entry);
1832 		if (device_change == DEVICE_NOT_FOUND) {
1833 			changes++;
1834 			if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1835 				break;
1836 			sd[i] = NULL; /* prevent from being freed later. */
1837 		} else if (device_change == DEVICE_CHANGED) {
1838 			/* should never happen... */
1839 			changes++;
1840 			dev_warn(&h->pdev->dev,
1841 				"device unexpectedly changed.\n");
1842 			/* but if it does happen, we just ignore that device */
1843 		}
1844 	}
1845 	hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1846 
1847 	/* Now that h->dev[]->phys_disk[] is coherent, we can enable
1848 	 * any logical drives that need it enabled.
1849 	 */
1850 	for (i = 0; i < h->ndevices; i++) {
1851 		if (h->dev[i] == NULL)
1852 			continue;
1853 		h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1854 	}
1855 
1856 	spin_unlock_irqrestore(&h->devlock, flags);
1857 
1858 	/* Monitor devices which are in one of several NOT READY states to be
1859 	 * brought online later. This must be done without holding h->devlock,
1860 	 * so don't touch h->dev[]
1861 	 */
1862 	for (i = 0; i < nsds; i++) {
1863 		if (!sd[i]) /* if already added above. */
1864 			continue;
1865 		if (sd[i]->volume_offline)
1866 			hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1867 	}
1868 
1869 	/* Don't notify scsi mid layer of any changes the first time through
1870 	 * (or if there are no changes) scsi_scan_host will do it later the
1871 	 * first time through.
1872 	 */
1873 	if (!changes)
1874 		goto free_and_out;
1875 
1876 	/* Notify scsi mid layer of any removed devices */
1877 	for (i = 0; i < nremoved; i++) {
1878 		if (removed[i] == NULL)
1879 			continue;
1880 		if (removed[i]->expose_device)
1881 			hpsa_remove_device(h, removed[i]);
1882 		kfree(removed[i]);
1883 		removed[i] = NULL;
1884 	}
1885 
1886 	/* Notify scsi mid layer of any added devices */
1887 	for (i = 0; i < nadded; i++) {
1888 		int rc = 0;
1889 
1890 		if (added[i] == NULL)
1891 			continue;
1892 		if (!(added[i]->expose_device))
1893 			continue;
1894 		rc = hpsa_add_device(h, added[i]);
1895 		if (!rc)
1896 			continue;
1897 		dev_warn(&h->pdev->dev,
1898 			"addition failed %d, device not added.", rc);
1899 		/* now we have to remove it from h->dev,
1900 		 * since it didn't get added to scsi mid layer
1901 		 */
1902 		fixup_botched_add(h, added[i]);
1903 		h->drv_req_rescan = 1;
1904 	}
1905 
1906 free_and_out:
1907 	kfree(added);
1908 	kfree(removed);
1909 }
1910 
1911 /*
1912  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1913  * Assume's h->devlock is held.
1914  */
lookup_hpsa_scsi_dev(struct ctlr_info * h,int bus,int target,int lun)1915 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1916 	int bus, int target, int lun)
1917 {
1918 	int i;
1919 	struct hpsa_scsi_dev_t *sd;
1920 
1921 	for (i = 0; i < h->ndevices; i++) {
1922 		sd = h->dev[i];
1923 		if (sd->bus == bus && sd->target == target && sd->lun == lun)
1924 			return sd;
1925 	}
1926 	return NULL;
1927 }
1928 
hpsa_slave_alloc(struct scsi_device * sdev)1929 static int hpsa_slave_alloc(struct scsi_device *sdev)
1930 {
1931 	struct hpsa_scsi_dev_t *sd;
1932 	unsigned long flags;
1933 	struct ctlr_info *h;
1934 
1935 	h = sdev_to_hba(sdev);
1936 	spin_lock_irqsave(&h->devlock, flags);
1937 	if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
1938 		struct scsi_target *starget;
1939 		struct sas_rphy *rphy;
1940 
1941 		starget = scsi_target(sdev);
1942 		rphy = target_to_rphy(starget);
1943 		sd = hpsa_find_device_by_sas_rphy(h, rphy);
1944 		if (sd) {
1945 			sd->target = sdev_id(sdev);
1946 			sd->lun = sdev->lun;
1947 		}
1948 	} else
1949 		sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1950 					sdev_id(sdev), sdev->lun);
1951 
1952 	if (sd && sd->expose_device) {
1953 		atomic_set(&sd->ioaccel_cmds_out, 0);
1954 		sdev->hostdata = sd;
1955 	} else
1956 		sdev->hostdata = NULL;
1957 	spin_unlock_irqrestore(&h->devlock, flags);
1958 	return 0;
1959 }
1960 
1961 /* configure scsi device based on internal per-device structure */
hpsa_slave_configure(struct scsi_device * sdev)1962 static int hpsa_slave_configure(struct scsi_device *sdev)
1963 {
1964 	struct hpsa_scsi_dev_t *sd;
1965 	int queue_depth;
1966 
1967 	sd = sdev->hostdata;
1968 	sdev->no_uld_attach = !sd || !sd->expose_device;
1969 
1970 	if (sd)
1971 		queue_depth = sd->queue_depth != 0 ?
1972 			sd->queue_depth : sdev->host->can_queue;
1973 	else
1974 		queue_depth = sdev->host->can_queue;
1975 
1976 	scsi_change_queue_depth(sdev, queue_depth);
1977 
1978 	return 0;
1979 }
1980 
hpsa_slave_destroy(struct scsi_device * sdev)1981 static void hpsa_slave_destroy(struct scsi_device *sdev)
1982 {
1983 	/* nothing to do. */
1984 }
1985 
hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info * h)1986 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1987 {
1988 	int i;
1989 
1990 	if (!h->ioaccel2_cmd_sg_list)
1991 		return;
1992 	for (i = 0; i < h->nr_cmds; i++) {
1993 		kfree(h->ioaccel2_cmd_sg_list[i]);
1994 		h->ioaccel2_cmd_sg_list[i] = NULL;
1995 	}
1996 	kfree(h->ioaccel2_cmd_sg_list);
1997 	h->ioaccel2_cmd_sg_list = NULL;
1998 }
1999 
hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info * h)2000 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2001 {
2002 	int i;
2003 
2004 	if (h->chainsize <= 0)
2005 		return 0;
2006 
2007 	h->ioaccel2_cmd_sg_list =
2008 		kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2009 					GFP_KERNEL);
2010 	if (!h->ioaccel2_cmd_sg_list)
2011 		return -ENOMEM;
2012 	for (i = 0; i < h->nr_cmds; i++) {
2013 		h->ioaccel2_cmd_sg_list[i] =
2014 			kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2015 					h->maxsgentries, GFP_KERNEL);
2016 		if (!h->ioaccel2_cmd_sg_list[i])
2017 			goto clean;
2018 	}
2019 	return 0;
2020 
2021 clean:
2022 	hpsa_free_ioaccel2_sg_chain_blocks(h);
2023 	return -ENOMEM;
2024 }
2025 
hpsa_free_sg_chain_blocks(struct ctlr_info * h)2026 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2027 {
2028 	int i;
2029 
2030 	if (!h->cmd_sg_list)
2031 		return;
2032 	for (i = 0; i < h->nr_cmds; i++) {
2033 		kfree(h->cmd_sg_list[i]);
2034 		h->cmd_sg_list[i] = NULL;
2035 	}
2036 	kfree(h->cmd_sg_list);
2037 	h->cmd_sg_list = NULL;
2038 }
2039 
hpsa_alloc_sg_chain_blocks(struct ctlr_info * h)2040 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2041 {
2042 	int i;
2043 
2044 	if (h->chainsize <= 0)
2045 		return 0;
2046 
2047 	h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2048 				GFP_KERNEL);
2049 	if (!h->cmd_sg_list) {
2050 		dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
2051 		return -ENOMEM;
2052 	}
2053 	for (i = 0; i < h->nr_cmds; i++) {
2054 		h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2055 						h->chainsize, GFP_KERNEL);
2056 		if (!h->cmd_sg_list[i]) {
2057 			dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
2058 			goto clean;
2059 		}
2060 	}
2061 	return 0;
2062 
2063 clean:
2064 	hpsa_free_sg_chain_blocks(h);
2065 	return -ENOMEM;
2066 }
2067 
hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info * h,struct io_accel2_cmd * cp,struct CommandList * c)2068 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2069 	struct io_accel2_cmd *cp, struct CommandList *c)
2070 {
2071 	struct ioaccel2_sg_element *chain_block;
2072 	u64 temp64;
2073 	u32 chain_size;
2074 
2075 	chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2076 	chain_size = le32_to_cpu(cp->sg[0].length);
2077 	temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2078 				PCI_DMA_TODEVICE);
2079 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
2080 		/* prevent subsequent unmapping */
2081 		cp->sg->address = 0;
2082 		return -1;
2083 	}
2084 	cp->sg->address = cpu_to_le64(temp64);
2085 	return 0;
2086 }
2087 
hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info * h,struct io_accel2_cmd * cp)2088 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2089 	struct io_accel2_cmd *cp)
2090 {
2091 	struct ioaccel2_sg_element *chain_sg;
2092 	u64 temp64;
2093 	u32 chain_size;
2094 
2095 	chain_sg = cp->sg;
2096 	temp64 = le64_to_cpu(chain_sg->address);
2097 	chain_size = le32_to_cpu(cp->sg[0].length);
2098 	pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2099 }
2100 
hpsa_map_sg_chain_block(struct ctlr_info * h,struct CommandList * c)2101 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2102 	struct CommandList *c)
2103 {
2104 	struct SGDescriptor *chain_sg, *chain_block;
2105 	u64 temp64;
2106 	u32 chain_len;
2107 
2108 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2109 	chain_block = h->cmd_sg_list[c->cmdindex];
2110 	chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2111 	chain_len = sizeof(*chain_sg) *
2112 		(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2113 	chain_sg->Len = cpu_to_le32(chain_len);
2114 	temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2115 				PCI_DMA_TODEVICE);
2116 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
2117 		/* prevent subsequent unmapping */
2118 		chain_sg->Addr = cpu_to_le64(0);
2119 		return -1;
2120 	}
2121 	chain_sg->Addr = cpu_to_le64(temp64);
2122 	return 0;
2123 }
2124 
hpsa_unmap_sg_chain_block(struct ctlr_info * h,struct CommandList * c)2125 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2126 	struct CommandList *c)
2127 {
2128 	struct SGDescriptor *chain_sg;
2129 
2130 	if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2131 		return;
2132 
2133 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2134 	pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2135 			le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2136 }
2137 
2138 
2139 /* Decode the various types of errors on ioaccel2 path.
2140  * Return 1 for any error that should generate a RAID path retry.
2141  * Return 0 for errors that don't require a RAID path retry.
2142  */
handle_ioaccel_mode2_error(struct ctlr_info * h,struct CommandList * c,struct scsi_cmnd * cmd,struct io_accel2_cmd * c2)2143 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2144 					struct CommandList *c,
2145 					struct scsi_cmnd *cmd,
2146 					struct io_accel2_cmd *c2)
2147 {
2148 	int data_len;
2149 	int retry = 0;
2150 	u32 ioaccel2_resid = 0;
2151 
2152 	switch (c2->error_data.serv_response) {
2153 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
2154 		switch (c2->error_data.status) {
2155 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2156 			if (cmd)
2157 				cmd->result = 0;
2158 			break;
2159 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2160 			cmd->result |= SAM_STAT_CHECK_CONDITION;
2161 			if (c2->error_data.data_present !=
2162 					IOACCEL2_SENSE_DATA_PRESENT) {
2163 				memset(cmd->sense_buffer, 0,
2164 					SCSI_SENSE_BUFFERSIZE);
2165 				break;
2166 			}
2167 			/* copy the sense data */
2168 			data_len = c2->error_data.sense_data_len;
2169 			if (data_len > SCSI_SENSE_BUFFERSIZE)
2170 				data_len = SCSI_SENSE_BUFFERSIZE;
2171 			if (data_len > sizeof(c2->error_data.sense_data_buff))
2172 				data_len =
2173 					sizeof(c2->error_data.sense_data_buff);
2174 			memcpy(cmd->sense_buffer,
2175 				c2->error_data.sense_data_buff, data_len);
2176 			retry = 1;
2177 			break;
2178 		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2179 			retry = 1;
2180 			break;
2181 		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2182 			retry = 1;
2183 			break;
2184 		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2185 			retry = 1;
2186 			break;
2187 		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2188 			retry = 1;
2189 			break;
2190 		default:
2191 			retry = 1;
2192 			break;
2193 		}
2194 		break;
2195 	case IOACCEL2_SERV_RESPONSE_FAILURE:
2196 		switch (c2->error_data.status) {
2197 		case IOACCEL2_STATUS_SR_IO_ERROR:
2198 		case IOACCEL2_STATUS_SR_IO_ABORTED:
2199 		case IOACCEL2_STATUS_SR_OVERRUN:
2200 			retry = 1;
2201 			break;
2202 		case IOACCEL2_STATUS_SR_UNDERRUN:
2203 			cmd->result = (DID_OK << 16);		/* host byte */
2204 			cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
2205 			ioaccel2_resid = get_unaligned_le32(
2206 						&c2->error_data.resid_cnt[0]);
2207 			scsi_set_resid(cmd, ioaccel2_resid);
2208 			break;
2209 		case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2210 		case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2211 		case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2212 			/* We will get an event from ctlr to trigger rescan */
2213 			retry = 1;
2214 			break;
2215 		default:
2216 			retry = 1;
2217 		}
2218 		break;
2219 	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2220 		break;
2221 	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2222 		break;
2223 	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2224 		retry = 1;
2225 		break;
2226 	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2227 		break;
2228 	default:
2229 		retry = 1;
2230 		break;
2231 	}
2232 
2233 	return retry;	/* retry on raid path? */
2234 }
2235 
hpsa_cmd_resolve_events(struct ctlr_info * h,struct CommandList * c)2236 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2237 		struct CommandList *c)
2238 {
2239 	bool do_wake = false;
2240 
2241 	/*
2242 	 * Prevent the following race in the abort handler:
2243 	 *
2244 	 * 1. LLD is requested to abort a SCSI command
2245 	 * 2. The SCSI command completes
2246 	 * 3. The struct CommandList associated with step 2 is made available
2247 	 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2248 	 * 5. Abort handler follows scsi_cmnd->host_scribble and
2249 	 *    finds struct CommandList and tries to aborts it
2250 	 * Now we have aborted the wrong command.
2251 	 *
2252 	 * Reset c->scsi_cmd here so that the abort or reset handler will know
2253 	 * this command has completed.  Then, check to see if the handler is
2254 	 * waiting for this command, and, if so, wake it.
2255 	 */
2256 	c->scsi_cmd = SCSI_CMD_IDLE;
2257 	mb();	/* Declare command idle before checking for pending events. */
2258 	if (c->abort_pending) {
2259 		do_wake = true;
2260 		c->abort_pending = false;
2261 	}
2262 	if (c->reset_pending) {
2263 		unsigned long flags;
2264 		struct hpsa_scsi_dev_t *dev;
2265 
2266 		/*
2267 		 * There appears to be a reset pending; lock the lock and
2268 		 * reconfirm.  If so, then decrement the count of outstanding
2269 		 * commands and wake the reset command if this is the last one.
2270 		 */
2271 		spin_lock_irqsave(&h->lock, flags);
2272 		dev = c->reset_pending;		/* Re-fetch under the lock. */
2273 		if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2274 			do_wake = true;
2275 		c->reset_pending = NULL;
2276 		spin_unlock_irqrestore(&h->lock, flags);
2277 	}
2278 
2279 	if (do_wake)
2280 		wake_up_all(&h->event_sync_wait_queue);
2281 }
2282 
hpsa_cmd_resolve_and_free(struct ctlr_info * h,struct CommandList * c)2283 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2284 				      struct CommandList *c)
2285 {
2286 	hpsa_cmd_resolve_events(h, c);
2287 	cmd_tagged_free(h, c);
2288 }
2289 
hpsa_cmd_free_and_done(struct ctlr_info * h,struct CommandList * c,struct scsi_cmnd * cmd)2290 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2291 		struct CommandList *c, struct scsi_cmnd *cmd)
2292 {
2293 	hpsa_cmd_resolve_and_free(h, c);
2294 	cmd->scsi_done(cmd);
2295 }
2296 
hpsa_retry_cmd(struct ctlr_info * h,struct CommandList * c)2297 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2298 {
2299 	INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2300 	queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2301 }
2302 
hpsa_set_scsi_cmd_aborted(struct scsi_cmnd * cmd)2303 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2304 {
2305 	cmd->result = DID_ABORT << 16;
2306 }
2307 
hpsa_cmd_abort_and_free(struct ctlr_info * h,struct CommandList * c,struct scsi_cmnd * cmd)2308 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2309 				    struct scsi_cmnd *cmd)
2310 {
2311 	hpsa_set_scsi_cmd_aborted(cmd);
2312 	dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2313 			 c->Request.CDB, c->err_info->ScsiStatus);
2314 	hpsa_cmd_resolve_and_free(h, c);
2315 }
2316 
process_ioaccel2_completion(struct ctlr_info * h,struct CommandList * c,struct scsi_cmnd * cmd,struct hpsa_scsi_dev_t * dev)2317 static void process_ioaccel2_completion(struct ctlr_info *h,
2318 		struct CommandList *c, struct scsi_cmnd *cmd,
2319 		struct hpsa_scsi_dev_t *dev)
2320 {
2321 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2322 
2323 	/* check for good status */
2324 	if (likely(c2->error_data.serv_response == 0 &&
2325 			c2->error_data.status == 0)) {
2326 		cmd->result = 0;
2327 		return hpsa_cmd_free_and_done(h, c, cmd);
2328 	}
2329 
2330 	/*
2331 	 * Any RAID offload error results in retry which will use
2332 	 * the normal I/O path so the controller can handle whatever's
2333 	 * wrong.
2334 	 */
2335 	if (is_logical_device(dev) &&
2336 		c2->error_data.serv_response ==
2337 			IOACCEL2_SERV_RESPONSE_FAILURE) {
2338 		if (c2->error_data.status ==
2339 			IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2340 			dev->offload_enabled = 0;
2341 
2342 		return hpsa_retry_cmd(h, c);
2343 	}
2344 
2345 	if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2346 		return hpsa_retry_cmd(h, c);
2347 
2348 	return hpsa_cmd_free_and_done(h, c, cmd);
2349 }
2350 
2351 /* Returns 0 on success, < 0 otherwise. */
hpsa_evaluate_tmf_status(struct ctlr_info * h,struct CommandList * cp)2352 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2353 					struct CommandList *cp)
2354 {
2355 	u8 tmf_status = cp->err_info->ScsiStatus;
2356 
2357 	switch (tmf_status) {
2358 	case CISS_TMF_COMPLETE:
2359 		/*
2360 		 * CISS_TMF_COMPLETE never happens, instead,
2361 		 * ei->CommandStatus == 0 for this case.
2362 		 */
2363 	case CISS_TMF_SUCCESS:
2364 		return 0;
2365 	case CISS_TMF_INVALID_FRAME:
2366 	case CISS_TMF_NOT_SUPPORTED:
2367 	case CISS_TMF_FAILED:
2368 	case CISS_TMF_WRONG_LUN:
2369 	case CISS_TMF_OVERLAPPED_TAG:
2370 		break;
2371 	default:
2372 		dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2373 				tmf_status);
2374 		break;
2375 	}
2376 	return -tmf_status;
2377 }
2378 
complete_scsi_command(struct CommandList * cp)2379 static void complete_scsi_command(struct CommandList *cp)
2380 {
2381 	struct scsi_cmnd *cmd;
2382 	struct ctlr_info *h;
2383 	struct ErrorInfo *ei;
2384 	struct hpsa_scsi_dev_t *dev;
2385 	struct io_accel2_cmd *c2;
2386 
2387 	u8 sense_key;
2388 	u8 asc;      /* additional sense code */
2389 	u8 ascq;     /* additional sense code qualifier */
2390 	unsigned long sense_data_size;
2391 
2392 	ei = cp->err_info;
2393 	cmd = cp->scsi_cmd;
2394 	h = cp->h;
2395 	dev = cmd->device->hostdata;
2396 	c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2397 
2398 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
2399 	if ((cp->cmd_type == CMD_SCSI) &&
2400 		(le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2401 		hpsa_unmap_sg_chain_block(h, cp);
2402 
2403 	if ((cp->cmd_type == CMD_IOACCEL2) &&
2404 		(c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2405 		hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2406 
2407 	cmd->result = (DID_OK << 16); 		/* host byte */
2408 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
2409 
2410 	if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2411 		atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2412 
2413 	/*
2414 	 * We check for lockup status here as it may be set for
2415 	 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2416 	 * fail_all_oustanding_cmds()
2417 	 */
2418 	if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2419 		/* DID_NO_CONNECT will prevent a retry */
2420 		cmd->result = DID_NO_CONNECT << 16;
2421 		return hpsa_cmd_free_and_done(h, cp, cmd);
2422 	}
2423 
2424 	if ((unlikely(hpsa_is_pending_event(cp)))) {
2425 		if (cp->reset_pending)
2426 			return hpsa_cmd_resolve_and_free(h, cp);
2427 		if (cp->abort_pending)
2428 			return hpsa_cmd_abort_and_free(h, cp, cmd);
2429 	}
2430 
2431 	if (cp->cmd_type == CMD_IOACCEL2)
2432 		return process_ioaccel2_completion(h, cp, cmd, dev);
2433 
2434 	scsi_set_resid(cmd, ei->ResidualCnt);
2435 	if (ei->CommandStatus == 0)
2436 		return hpsa_cmd_free_and_done(h, cp, cmd);
2437 
2438 	/* For I/O accelerator commands, copy over some fields to the normal
2439 	 * CISS header used below for error handling.
2440 	 */
2441 	if (cp->cmd_type == CMD_IOACCEL1) {
2442 		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2443 		cp->Header.SGList = scsi_sg_count(cmd);
2444 		cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2445 		cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2446 			IOACCEL1_IOFLAGS_CDBLEN_MASK;
2447 		cp->Header.tag = c->tag;
2448 		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2449 		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2450 
2451 		/* Any RAID offload error results in retry which will use
2452 		 * the normal I/O path so the controller can handle whatever's
2453 		 * wrong.
2454 		 */
2455 		if (is_logical_device(dev)) {
2456 			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2457 				dev->offload_enabled = 0;
2458 			return hpsa_retry_cmd(h, cp);
2459 		}
2460 	}
2461 
2462 	/* an error has occurred */
2463 	switch (ei->CommandStatus) {
2464 
2465 	case CMD_TARGET_STATUS:
2466 		cmd->result |= ei->ScsiStatus;
2467 		/* copy the sense data */
2468 		if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2469 			sense_data_size = SCSI_SENSE_BUFFERSIZE;
2470 		else
2471 			sense_data_size = sizeof(ei->SenseInfo);
2472 		if (ei->SenseLen < sense_data_size)
2473 			sense_data_size = ei->SenseLen;
2474 		memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2475 		if (ei->ScsiStatus)
2476 			decode_sense_data(ei->SenseInfo, sense_data_size,
2477 				&sense_key, &asc, &ascq);
2478 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2479 			if (sense_key == ABORTED_COMMAND) {
2480 				cmd->result |= DID_SOFT_ERROR << 16;
2481 				break;
2482 			}
2483 			break;
2484 		}
2485 		/* Problem was not a check condition
2486 		 * Pass it up to the upper layers...
2487 		 */
2488 		if (ei->ScsiStatus) {
2489 			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2490 				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2491 				"Returning result: 0x%x\n",
2492 				cp, ei->ScsiStatus,
2493 				sense_key, asc, ascq,
2494 				cmd->result);
2495 		} else {  /* scsi status is zero??? How??? */
2496 			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2497 				"Returning no connection.\n", cp),
2498 
2499 			/* Ordinarily, this case should never happen,
2500 			 * but there is a bug in some released firmware
2501 			 * revisions that allows it to happen if, for
2502 			 * example, a 4100 backplane loses power and
2503 			 * the tape drive is in it.  We assume that
2504 			 * it's a fatal error of some kind because we
2505 			 * can't show that it wasn't. We will make it
2506 			 * look like selection timeout since that is
2507 			 * the most common reason for this to occur,
2508 			 * and it's severe enough.
2509 			 */
2510 
2511 			cmd->result = DID_NO_CONNECT << 16;
2512 		}
2513 		break;
2514 
2515 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2516 		break;
2517 	case CMD_DATA_OVERRUN:
2518 		dev_warn(&h->pdev->dev,
2519 			"CDB %16phN data overrun\n", cp->Request.CDB);
2520 		break;
2521 	case CMD_INVALID: {
2522 		/* print_bytes(cp, sizeof(*cp), 1, 0);
2523 		print_cmd(cp); */
2524 		/* We get CMD_INVALID if you address a non-existent device
2525 		 * instead of a selection timeout (no response).  You will
2526 		 * see this if you yank out a drive, then try to access it.
2527 		 * This is kind of a shame because it means that any other
2528 		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2529 		 * missing target. */
2530 		cmd->result = DID_NO_CONNECT << 16;
2531 	}
2532 		break;
2533 	case CMD_PROTOCOL_ERR:
2534 		cmd->result = DID_ERROR << 16;
2535 		dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2536 				cp->Request.CDB);
2537 		break;
2538 	case CMD_HARDWARE_ERR:
2539 		cmd->result = DID_ERROR << 16;
2540 		dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2541 			cp->Request.CDB);
2542 		break;
2543 	case CMD_CONNECTION_LOST:
2544 		cmd->result = DID_ERROR << 16;
2545 		dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2546 			cp->Request.CDB);
2547 		break;
2548 	case CMD_ABORTED:
2549 		/* Return now to avoid calling scsi_done(). */
2550 		return hpsa_cmd_abort_and_free(h, cp, cmd);
2551 	case CMD_ABORT_FAILED:
2552 		cmd->result = DID_ERROR << 16;
2553 		dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2554 			cp->Request.CDB);
2555 		break;
2556 	case CMD_UNSOLICITED_ABORT:
2557 		cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2558 		dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2559 			cp->Request.CDB);
2560 		break;
2561 	case CMD_TIMEOUT:
2562 		cmd->result = DID_TIME_OUT << 16;
2563 		dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2564 			cp->Request.CDB);
2565 		break;
2566 	case CMD_UNABORTABLE:
2567 		cmd->result = DID_ERROR << 16;
2568 		dev_warn(&h->pdev->dev, "Command unabortable\n");
2569 		break;
2570 	case CMD_TMF_STATUS:
2571 		if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2572 			cmd->result = DID_ERROR << 16;
2573 		break;
2574 	case CMD_IOACCEL_DISABLED:
2575 		/* This only handles the direct pass-through case since RAID
2576 		 * offload is handled above.  Just attempt a retry.
2577 		 */
2578 		cmd->result = DID_SOFT_ERROR << 16;
2579 		dev_warn(&h->pdev->dev,
2580 				"cp %p had HP SSD Smart Path error\n", cp);
2581 		break;
2582 	default:
2583 		cmd->result = DID_ERROR << 16;
2584 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2585 				cp, ei->CommandStatus);
2586 	}
2587 
2588 	return hpsa_cmd_free_and_done(h, cp, cmd);
2589 }
2590 
hpsa_pci_unmap(struct pci_dev * pdev,struct CommandList * c,int sg_used,int data_direction)2591 static void hpsa_pci_unmap(struct pci_dev *pdev,
2592 	struct CommandList *c, int sg_used, int data_direction)
2593 {
2594 	int i;
2595 
2596 	for (i = 0; i < sg_used; i++)
2597 		pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2598 				le32_to_cpu(c->SG[i].Len),
2599 				data_direction);
2600 }
2601 
hpsa_map_one(struct pci_dev * pdev,struct CommandList * cp,unsigned char * buf,size_t buflen,int data_direction)2602 static int hpsa_map_one(struct pci_dev *pdev,
2603 		struct CommandList *cp,
2604 		unsigned char *buf,
2605 		size_t buflen,
2606 		int data_direction)
2607 {
2608 	u64 addr64;
2609 
2610 	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2611 		cp->Header.SGList = 0;
2612 		cp->Header.SGTotal = cpu_to_le16(0);
2613 		return 0;
2614 	}
2615 
2616 	addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2617 	if (dma_mapping_error(&pdev->dev, addr64)) {
2618 		/* Prevent subsequent unmap of something never mapped */
2619 		cp->Header.SGList = 0;
2620 		cp->Header.SGTotal = cpu_to_le16(0);
2621 		return -1;
2622 	}
2623 	cp->SG[0].Addr = cpu_to_le64(addr64);
2624 	cp->SG[0].Len = cpu_to_le32(buflen);
2625 	cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2626 	cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2627 	cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2628 	return 0;
2629 }
2630 
2631 #define NO_TIMEOUT ((unsigned long) -1)
2632 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
hpsa_scsi_do_simple_cmd_core(struct ctlr_info * h,struct CommandList * c,int reply_queue,unsigned long timeout_msecs)2633 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2634 	struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2635 {
2636 	DECLARE_COMPLETION_ONSTACK(wait);
2637 
2638 	c->waiting = &wait;
2639 	__enqueue_cmd_and_start_io(h, c, reply_queue);
2640 	if (timeout_msecs == NO_TIMEOUT) {
2641 		/* TODO: get rid of this no-timeout thing */
2642 		wait_for_completion_io(&wait);
2643 		return IO_OK;
2644 	}
2645 	if (!wait_for_completion_io_timeout(&wait,
2646 					msecs_to_jiffies(timeout_msecs))) {
2647 		dev_warn(&h->pdev->dev, "Command timed out.\n");
2648 		return -ETIMEDOUT;
2649 	}
2650 	return IO_OK;
2651 }
2652 
hpsa_scsi_do_simple_cmd(struct ctlr_info * h,struct CommandList * c,int reply_queue,unsigned long timeout_msecs)2653 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2654 				   int reply_queue, unsigned long timeout_msecs)
2655 {
2656 	if (unlikely(lockup_detected(h))) {
2657 		c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2658 		return IO_OK;
2659 	}
2660 	return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2661 }
2662 
lockup_detected(struct ctlr_info * h)2663 static u32 lockup_detected(struct ctlr_info *h)
2664 {
2665 	int cpu;
2666 	u32 rc, *lockup_detected;
2667 
2668 	cpu = get_cpu();
2669 	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2670 	rc = *lockup_detected;
2671 	put_cpu();
2672 	return rc;
2673 }
2674 
2675 #define MAX_DRIVER_CMD_RETRIES 25
hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info * h,struct CommandList * c,int data_direction,unsigned long timeout_msecs)2676 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2677 	struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2678 {
2679 	int backoff_time = 10, retry_count = 0;
2680 	int rc;
2681 
2682 	do {
2683 		memset(c->err_info, 0, sizeof(*c->err_info));
2684 		rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2685 						  timeout_msecs);
2686 		if (rc)
2687 			break;
2688 		retry_count++;
2689 		if (retry_count > 3) {
2690 			msleep(backoff_time);
2691 			if (backoff_time < 1000)
2692 				backoff_time *= 2;
2693 		}
2694 	} while ((check_for_unit_attention(h, c) ||
2695 			check_for_busy(h, c)) &&
2696 			retry_count <= MAX_DRIVER_CMD_RETRIES);
2697 	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2698 	if (retry_count > MAX_DRIVER_CMD_RETRIES)
2699 		rc = -EIO;
2700 	return rc;
2701 }
2702 
hpsa_print_cmd(struct ctlr_info * h,char * txt,struct CommandList * c)2703 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2704 				struct CommandList *c)
2705 {
2706 	const u8 *cdb = c->Request.CDB;
2707 	const u8 *lun = c->Header.LUN.LunAddrBytes;
2708 
2709 	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2710 	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2711 		txt, lun[0], lun[1], lun[2], lun[3],
2712 		lun[4], lun[5], lun[6], lun[7],
2713 		cdb[0], cdb[1], cdb[2], cdb[3],
2714 		cdb[4], cdb[5], cdb[6], cdb[7],
2715 		cdb[8], cdb[9], cdb[10], cdb[11],
2716 		cdb[12], cdb[13], cdb[14], cdb[15]);
2717 }
2718 
hpsa_scsi_interpret_error(struct ctlr_info * h,struct CommandList * cp)2719 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2720 			struct CommandList *cp)
2721 {
2722 	const struct ErrorInfo *ei = cp->err_info;
2723 	struct device *d = &cp->h->pdev->dev;
2724 	u8 sense_key, asc, ascq;
2725 	int sense_len;
2726 
2727 	switch (ei->CommandStatus) {
2728 	case CMD_TARGET_STATUS:
2729 		if (ei->SenseLen > sizeof(ei->SenseInfo))
2730 			sense_len = sizeof(ei->SenseInfo);
2731 		else
2732 			sense_len = ei->SenseLen;
2733 		decode_sense_data(ei->SenseInfo, sense_len,
2734 					&sense_key, &asc, &ascq);
2735 		hpsa_print_cmd(h, "SCSI status", cp);
2736 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2737 			dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2738 				sense_key, asc, ascq);
2739 		else
2740 			dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2741 		if (ei->ScsiStatus == 0)
2742 			dev_warn(d, "SCSI status is abnormally zero.  "
2743 			"(probably indicates selection timeout "
2744 			"reported incorrectly due to a known "
2745 			"firmware bug, circa July, 2001.)\n");
2746 		break;
2747 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2748 		break;
2749 	case CMD_DATA_OVERRUN:
2750 		hpsa_print_cmd(h, "overrun condition", cp);
2751 		break;
2752 	case CMD_INVALID: {
2753 		/* controller unfortunately reports SCSI passthru's
2754 		 * to non-existent targets as invalid commands.
2755 		 */
2756 		hpsa_print_cmd(h, "invalid command", cp);
2757 		dev_warn(d, "probably means device no longer present\n");
2758 		}
2759 		break;
2760 	case CMD_PROTOCOL_ERR:
2761 		hpsa_print_cmd(h, "protocol error", cp);
2762 		break;
2763 	case CMD_HARDWARE_ERR:
2764 		hpsa_print_cmd(h, "hardware error", cp);
2765 		break;
2766 	case CMD_CONNECTION_LOST:
2767 		hpsa_print_cmd(h, "connection lost", cp);
2768 		break;
2769 	case CMD_ABORTED:
2770 		hpsa_print_cmd(h, "aborted", cp);
2771 		break;
2772 	case CMD_ABORT_FAILED:
2773 		hpsa_print_cmd(h, "abort failed", cp);
2774 		break;
2775 	case CMD_UNSOLICITED_ABORT:
2776 		hpsa_print_cmd(h, "unsolicited abort", cp);
2777 		break;
2778 	case CMD_TIMEOUT:
2779 		hpsa_print_cmd(h, "timed out", cp);
2780 		break;
2781 	case CMD_UNABORTABLE:
2782 		hpsa_print_cmd(h, "unabortable", cp);
2783 		break;
2784 	case CMD_CTLR_LOCKUP:
2785 		hpsa_print_cmd(h, "controller lockup detected", cp);
2786 		break;
2787 	default:
2788 		hpsa_print_cmd(h, "unknown status", cp);
2789 		dev_warn(d, "Unknown command status %x\n",
2790 				ei->CommandStatus);
2791 	}
2792 }
2793 
hpsa_scsi_do_inquiry(struct ctlr_info * h,unsigned char * scsi3addr,u16 page,unsigned char * buf,unsigned char bufsize)2794 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2795 			u16 page, unsigned char *buf,
2796 			unsigned char bufsize)
2797 {
2798 	int rc = IO_OK;
2799 	struct CommandList *c;
2800 	struct ErrorInfo *ei;
2801 
2802 	c = cmd_alloc(h);
2803 
2804 	if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2805 			page, scsi3addr, TYPE_CMD)) {
2806 		rc = -1;
2807 		goto out;
2808 	}
2809 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2810 					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2811 	if (rc)
2812 		goto out;
2813 	ei = c->err_info;
2814 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2815 		hpsa_scsi_interpret_error(h, c);
2816 		rc = -1;
2817 	}
2818 out:
2819 	cmd_free(h, c);
2820 	return rc;
2821 }
2822 
hpsa_send_reset(struct ctlr_info * h,unsigned char * scsi3addr,u8 reset_type,int reply_queue)2823 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2824 	u8 reset_type, int reply_queue)
2825 {
2826 	int rc = IO_OK;
2827 	struct CommandList *c;
2828 	struct ErrorInfo *ei;
2829 
2830 	c = cmd_alloc(h);
2831 
2832 
2833 	/* fill_cmd can't fail here, no data buffer to map. */
2834 	(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2835 			scsi3addr, TYPE_MSG);
2836 	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2837 	if (rc) {
2838 		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2839 		goto out;
2840 	}
2841 	/* no unmap needed here because no data xfer. */
2842 
2843 	ei = c->err_info;
2844 	if (ei->CommandStatus != 0) {
2845 		hpsa_scsi_interpret_error(h, c);
2846 		rc = -1;
2847 	}
2848 out:
2849 	cmd_free(h, c);
2850 	return rc;
2851 }
2852 
hpsa_cmd_dev_match(struct ctlr_info * h,struct CommandList * c,struct hpsa_scsi_dev_t * dev,unsigned char * scsi3addr)2853 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2854 			       struct hpsa_scsi_dev_t *dev,
2855 			       unsigned char *scsi3addr)
2856 {
2857 	int i;
2858 	bool match = false;
2859 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2860 	struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2861 
2862 	if (hpsa_is_cmd_idle(c))
2863 		return false;
2864 
2865 	switch (c->cmd_type) {
2866 	case CMD_SCSI:
2867 	case CMD_IOCTL_PEND:
2868 		match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2869 				sizeof(c->Header.LUN.LunAddrBytes));
2870 		break;
2871 
2872 	case CMD_IOACCEL1:
2873 	case CMD_IOACCEL2:
2874 		if (c->phys_disk == dev) {
2875 			/* HBA mode match */
2876 			match = true;
2877 		} else {
2878 			/* Possible RAID mode -- check each phys dev. */
2879 			/* FIXME:  Do we need to take out a lock here?  If
2880 			 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2881 			 * instead. */
2882 			for (i = 0; i < dev->nphysical_disks && !match; i++) {
2883 				/* FIXME: an alternate test might be
2884 				 *
2885 				 * match = dev->phys_disk[i]->ioaccel_handle
2886 				 *              == c2->scsi_nexus;      */
2887 				match = dev->phys_disk[i] == c->phys_disk;
2888 			}
2889 		}
2890 		break;
2891 
2892 	case IOACCEL2_TMF:
2893 		for (i = 0; i < dev->nphysical_disks && !match; i++) {
2894 			match = dev->phys_disk[i]->ioaccel_handle ==
2895 					le32_to_cpu(ac->it_nexus);
2896 		}
2897 		break;
2898 
2899 	case 0:		/* The command is in the middle of being initialized. */
2900 		match = false;
2901 		break;
2902 
2903 	default:
2904 		dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2905 			c->cmd_type);
2906 		BUG();
2907 	}
2908 
2909 	return match;
2910 }
2911 
hpsa_do_reset(struct ctlr_info * h,struct hpsa_scsi_dev_t * dev,unsigned char * scsi3addr,u8 reset_type,int reply_queue)2912 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2913 	unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2914 {
2915 	int i;
2916 	int rc = 0;
2917 
2918 	/* We can really only handle one reset at a time */
2919 	if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2920 		dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2921 		return -EINTR;
2922 	}
2923 
2924 	BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2925 
2926 	for (i = 0; i < h->nr_cmds; i++) {
2927 		struct CommandList *c = h->cmd_pool + i;
2928 		int refcount = atomic_inc_return(&c->refcount);
2929 
2930 		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2931 			unsigned long flags;
2932 
2933 			/*
2934 			 * Mark the target command as having a reset pending,
2935 			 * then lock a lock so that the command cannot complete
2936 			 * while we're considering it.  If the command is not
2937 			 * idle then count it; otherwise revoke the event.
2938 			 */
2939 			c->reset_pending = dev;
2940 			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
2941 			if (!hpsa_is_cmd_idle(c))
2942 				atomic_inc(&dev->reset_cmds_out);
2943 			else
2944 				c->reset_pending = NULL;
2945 			spin_unlock_irqrestore(&h->lock, flags);
2946 		}
2947 
2948 		cmd_free(h, c);
2949 	}
2950 
2951 	rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2952 	if (!rc)
2953 		wait_event(h->event_sync_wait_queue,
2954 			atomic_read(&dev->reset_cmds_out) == 0 ||
2955 			lockup_detected(h));
2956 
2957 	if (unlikely(lockup_detected(h))) {
2958 		dev_warn(&h->pdev->dev,
2959 			 "Controller lockup detected during reset wait\n");
2960 		rc = -ENODEV;
2961 	}
2962 
2963 	if (unlikely(rc))
2964 		atomic_set(&dev->reset_cmds_out, 0);
2965 
2966 	mutex_unlock(&h->reset_mutex);
2967 	return rc;
2968 }
2969 
hpsa_get_raid_level(struct ctlr_info * h,unsigned char * scsi3addr,unsigned char * raid_level)2970 static void hpsa_get_raid_level(struct ctlr_info *h,
2971 	unsigned char *scsi3addr, unsigned char *raid_level)
2972 {
2973 	int rc;
2974 	unsigned char *buf;
2975 
2976 	*raid_level = RAID_UNKNOWN;
2977 	buf = kzalloc(64, GFP_KERNEL);
2978 	if (!buf)
2979 		return;
2980 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2981 	if (rc == 0)
2982 		*raid_level = buf[8];
2983 	if (*raid_level > RAID_UNKNOWN)
2984 		*raid_level = RAID_UNKNOWN;
2985 	kfree(buf);
2986 	return;
2987 }
2988 
2989 #define HPSA_MAP_DEBUG
2990 #ifdef HPSA_MAP_DEBUG
hpsa_debug_map_buff(struct ctlr_info * h,int rc,struct raid_map_data * map_buff)2991 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2992 				struct raid_map_data *map_buff)
2993 {
2994 	struct raid_map_disk_data *dd = &map_buff->data[0];
2995 	int map, row, col;
2996 	u16 map_cnt, row_cnt, disks_per_row;
2997 
2998 	if (rc != 0)
2999 		return;
3000 
3001 	/* Show details only if debugging has been activated. */
3002 	if (h->raid_offload_debug < 2)
3003 		return;
3004 
3005 	dev_info(&h->pdev->dev, "structure_size = %u\n",
3006 				le32_to_cpu(map_buff->structure_size));
3007 	dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3008 			le32_to_cpu(map_buff->volume_blk_size));
3009 	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3010 			le64_to_cpu(map_buff->volume_blk_cnt));
3011 	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3012 			map_buff->phys_blk_shift);
3013 	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3014 			map_buff->parity_rotation_shift);
3015 	dev_info(&h->pdev->dev, "strip_size = %u\n",
3016 			le16_to_cpu(map_buff->strip_size));
3017 	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3018 			le64_to_cpu(map_buff->disk_starting_blk));
3019 	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3020 			le64_to_cpu(map_buff->disk_blk_cnt));
3021 	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3022 			le16_to_cpu(map_buff->data_disks_per_row));
3023 	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3024 			le16_to_cpu(map_buff->metadata_disks_per_row));
3025 	dev_info(&h->pdev->dev, "row_cnt = %u\n",
3026 			le16_to_cpu(map_buff->row_cnt));
3027 	dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3028 			le16_to_cpu(map_buff->layout_map_count));
3029 	dev_info(&h->pdev->dev, "flags = 0x%x\n",
3030 			le16_to_cpu(map_buff->flags));
3031 	dev_info(&h->pdev->dev, "encrypytion = %s\n",
3032 			le16_to_cpu(map_buff->flags) &
3033 			RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
3034 	dev_info(&h->pdev->dev, "dekindex = %u\n",
3035 			le16_to_cpu(map_buff->dekindex));
3036 	map_cnt = le16_to_cpu(map_buff->layout_map_count);
3037 	for (map = 0; map < map_cnt; map++) {
3038 		dev_info(&h->pdev->dev, "Map%u:\n", map);
3039 		row_cnt = le16_to_cpu(map_buff->row_cnt);
3040 		for (row = 0; row < row_cnt; row++) {
3041 			dev_info(&h->pdev->dev, "  Row%u:\n", row);
3042 			disks_per_row =
3043 				le16_to_cpu(map_buff->data_disks_per_row);
3044 			for (col = 0; col < disks_per_row; col++, dd++)
3045 				dev_info(&h->pdev->dev,
3046 					"    D%02u: h=0x%04x xor=%u,%u\n",
3047 					col, dd->ioaccel_handle,
3048 					dd->xor_mult[0], dd->xor_mult[1]);
3049 			disks_per_row =
3050 				le16_to_cpu(map_buff->metadata_disks_per_row);
3051 			for (col = 0; col < disks_per_row; col++, dd++)
3052 				dev_info(&h->pdev->dev,
3053 					"    M%02u: h=0x%04x xor=%u,%u\n",
3054 					col, dd->ioaccel_handle,
3055 					dd->xor_mult[0], dd->xor_mult[1]);
3056 		}
3057 	}
3058 }
3059 #else
hpsa_debug_map_buff(struct ctlr_info * h,int rc,struct raid_map_data * map_buff)3060 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3061 			__attribute__((unused)) int rc,
3062 			__attribute__((unused)) struct raid_map_data *map_buff)
3063 {
3064 }
3065 #endif
3066 
hpsa_get_raid_map(struct ctlr_info * h,unsigned char * scsi3addr,struct hpsa_scsi_dev_t * this_device)3067 static int hpsa_get_raid_map(struct ctlr_info *h,
3068 	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3069 {
3070 	int rc = 0;
3071 	struct CommandList *c;
3072 	struct ErrorInfo *ei;
3073 
3074 	c = cmd_alloc(h);
3075 
3076 	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3077 			sizeof(this_device->raid_map), 0,
3078 			scsi3addr, TYPE_CMD)) {
3079 		dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3080 		cmd_free(h, c);
3081 		return -1;
3082 	}
3083 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3084 					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3085 	if (rc)
3086 		goto out;
3087 	ei = c->err_info;
3088 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3089 		hpsa_scsi_interpret_error(h, c);
3090 		rc = -1;
3091 		goto out;
3092 	}
3093 	cmd_free(h, c);
3094 
3095 	/* @todo in the future, dynamically allocate RAID map memory */
3096 	if (le32_to_cpu(this_device->raid_map.structure_size) >
3097 				sizeof(this_device->raid_map)) {
3098 		dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3099 		rc = -1;
3100 	}
3101 	hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3102 	return rc;
3103 out:
3104 	cmd_free(h, c);
3105 	return rc;
3106 }
3107 
hpsa_bmic_sense_subsystem_information(struct ctlr_info * h,unsigned char scsi3addr[],u16 bmic_device_index,struct bmic_sense_subsystem_info * buf,size_t bufsize)3108 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3109 		unsigned char scsi3addr[], u16 bmic_device_index,
3110 		struct bmic_sense_subsystem_info *buf, size_t bufsize)
3111 {
3112 	int rc = IO_OK;
3113 	struct CommandList *c;
3114 	struct ErrorInfo *ei;
3115 
3116 	c = cmd_alloc(h);
3117 
3118 	rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3119 		0, RAID_CTLR_LUNID, TYPE_CMD);
3120 	if (rc)
3121 		goto out;
3122 
3123 	c->Request.CDB[2] = bmic_device_index & 0xff;
3124 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3125 
3126 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3127 				PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3128 	if (rc)
3129 		goto out;
3130 	ei = c->err_info;
3131 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3132 		hpsa_scsi_interpret_error(h, c);
3133 		rc = -1;
3134 	}
3135 out:
3136 	cmd_free(h, c);
3137 	return rc;
3138 }
3139 
hpsa_bmic_id_controller(struct ctlr_info * h,struct bmic_identify_controller * buf,size_t bufsize)3140 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3141 	struct bmic_identify_controller *buf, size_t bufsize)
3142 {
3143 	int rc = IO_OK;
3144 	struct CommandList *c;
3145 	struct ErrorInfo *ei;
3146 
3147 	c = cmd_alloc(h);
3148 
3149 	rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3150 		0, RAID_CTLR_LUNID, TYPE_CMD);
3151 	if (rc)
3152 		goto out;
3153 
3154 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3155 		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3156 	if (rc)
3157 		goto out;
3158 	ei = c->err_info;
3159 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3160 		hpsa_scsi_interpret_error(h, c);
3161 		rc = -1;
3162 	}
3163 out:
3164 	cmd_free(h, c);
3165 	return rc;
3166 }
3167 
hpsa_bmic_id_physical_device(struct ctlr_info * h,unsigned char scsi3addr[],u16 bmic_device_index,struct bmic_identify_physical_device * buf,size_t bufsize)3168 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3169 		unsigned char scsi3addr[], u16 bmic_device_index,
3170 		struct bmic_identify_physical_device *buf, size_t bufsize)
3171 {
3172 	int rc = IO_OK;
3173 	struct CommandList *c;
3174 	struct ErrorInfo *ei;
3175 
3176 	c = cmd_alloc(h);
3177 	rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3178 		0, RAID_CTLR_LUNID, TYPE_CMD);
3179 	if (rc)
3180 		goto out;
3181 
3182 	c->Request.CDB[2] = bmic_device_index & 0xff;
3183 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3184 
3185 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3186 						NO_TIMEOUT);
3187 	ei = c->err_info;
3188 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3189 		hpsa_scsi_interpret_error(h, c);
3190 		rc = -1;
3191 	}
3192 out:
3193 	cmd_free(h, c);
3194 
3195 	return rc;
3196 }
3197 
hpsa_get_sas_address_from_report_physical(struct ctlr_info * h,unsigned char * scsi3addr)3198 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3199 						unsigned char *scsi3addr)
3200 {
3201 	struct ReportExtendedLUNdata *physdev;
3202 	u32 nphysicals;
3203 	u64 sa = 0;
3204 	int i;
3205 
3206 	physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3207 	if (!physdev)
3208 		return 0;
3209 
3210 	if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3211 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3212 		kfree(physdev);
3213 		return 0;
3214 	}
3215 	nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3216 
3217 	for (i = 0; i < nphysicals; i++)
3218 		if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3219 			sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3220 			break;
3221 		}
3222 
3223 	kfree(physdev);
3224 
3225 	return sa;
3226 }
3227 
hpsa_get_sas_address(struct ctlr_info * h,unsigned char * scsi3addr,struct hpsa_scsi_dev_t * dev)3228 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3229 					struct hpsa_scsi_dev_t *dev)
3230 {
3231 	int rc;
3232 	u64 sa = 0;
3233 
3234 	if (is_hba_lunid(scsi3addr)) {
3235 		struct bmic_sense_subsystem_info *ssi;
3236 
3237 		ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3238 		if (ssi == NULL) {
3239 			dev_warn(&h->pdev->dev,
3240 				"%s: out of memory\n", __func__);
3241 			return;
3242 		}
3243 
3244 		rc = hpsa_bmic_sense_subsystem_information(h,
3245 					scsi3addr, 0, ssi, sizeof(*ssi));
3246 		if (rc == 0) {
3247 			sa = get_unaligned_be64(ssi->primary_world_wide_id);
3248 			h->sas_address = sa;
3249 		}
3250 
3251 		kfree(ssi);
3252 	} else
3253 		sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3254 
3255 	dev->sas_address = sa;
3256 }
3257 
3258 /* Get a device id from inquiry page 0x83 */
hpsa_vpd_page_supported(struct ctlr_info * h,unsigned char scsi3addr[],u8 page)3259 static int hpsa_vpd_page_supported(struct ctlr_info *h,
3260 	unsigned char scsi3addr[], u8 page)
3261 {
3262 	int rc;
3263 	int i;
3264 	int pages;
3265 	unsigned char *buf, bufsize;
3266 
3267 	buf = kzalloc(256, GFP_KERNEL);
3268 	if (!buf)
3269 		return 0;
3270 
3271 	/* Get the size of the page list first */
3272 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3273 				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3274 				buf, HPSA_VPD_HEADER_SZ);
3275 	if (rc != 0)
3276 		goto exit_unsupported;
3277 	pages = buf[3];
3278 	if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3279 		bufsize = pages + HPSA_VPD_HEADER_SZ;
3280 	else
3281 		bufsize = 255;
3282 
3283 	/* Get the whole VPD page list */
3284 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3285 				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3286 				buf, bufsize);
3287 	if (rc != 0)
3288 		goto exit_unsupported;
3289 
3290 	pages = buf[3];
3291 	for (i = 1; i <= pages; i++)
3292 		if (buf[3 + i] == page)
3293 			goto exit_supported;
3294 exit_unsupported:
3295 	kfree(buf);
3296 	return 0;
3297 exit_supported:
3298 	kfree(buf);
3299 	return 1;
3300 }
3301 
hpsa_get_ioaccel_status(struct ctlr_info * h,unsigned char * scsi3addr,struct hpsa_scsi_dev_t * this_device)3302 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3303 	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3304 {
3305 	int rc;
3306 	unsigned char *buf;
3307 	u8 ioaccel_status;
3308 
3309 	this_device->offload_config = 0;
3310 	this_device->offload_enabled = 0;
3311 	this_device->offload_to_be_enabled = 0;
3312 
3313 	buf = kzalloc(64, GFP_KERNEL);
3314 	if (!buf)
3315 		return;
3316 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3317 		goto out;
3318 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3319 			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3320 	if (rc != 0)
3321 		goto out;
3322 
3323 #define IOACCEL_STATUS_BYTE 4
3324 #define OFFLOAD_CONFIGURED_BIT 0x01
3325 #define OFFLOAD_ENABLED_BIT 0x02
3326 	ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3327 	this_device->offload_config =
3328 		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3329 	if (this_device->offload_config) {
3330 		this_device->offload_enabled =
3331 			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3332 		if (hpsa_get_raid_map(h, scsi3addr, this_device))
3333 			this_device->offload_enabled = 0;
3334 	}
3335 	this_device->offload_to_be_enabled = this_device->offload_enabled;
3336 out:
3337 	kfree(buf);
3338 	return;
3339 }
3340 
3341 /* Get the device id from inquiry page 0x83 */
hpsa_get_device_id(struct ctlr_info * h,unsigned char * scsi3addr,unsigned char * device_id,int index,int buflen)3342 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3343 	unsigned char *device_id, int index, int buflen)
3344 {
3345 	int rc;
3346 	unsigned char *buf;
3347 
3348 	if (buflen > 16)
3349 		buflen = 16;
3350 	buf = kzalloc(64, GFP_KERNEL);
3351 	if (!buf)
3352 		return -ENOMEM;
3353 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
3354 	if (rc == 0)
3355 		memcpy(device_id, &buf[index], buflen);
3356 
3357 	kfree(buf);
3358 
3359 	return rc != 0;
3360 }
3361 
hpsa_scsi_do_report_luns(struct ctlr_info * h,int logical,void * buf,int bufsize,int extended_response)3362 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3363 		void *buf, int bufsize,
3364 		int extended_response)
3365 {
3366 	int rc = IO_OK;
3367 	struct CommandList *c;
3368 	unsigned char scsi3addr[8];
3369 	struct ErrorInfo *ei;
3370 
3371 	c = cmd_alloc(h);
3372 
3373 	/* address the controller */
3374 	memset(scsi3addr, 0, sizeof(scsi3addr));
3375 	if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3376 		buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3377 		rc = -1;
3378 		goto out;
3379 	}
3380 	if (extended_response)
3381 		c->Request.CDB[1] = extended_response;
3382 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3383 					PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3384 	if (rc)
3385 		goto out;
3386 	ei = c->err_info;
3387 	if (ei->CommandStatus != 0 &&
3388 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
3389 		hpsa_scsi_interpret_error(h, c);
3390 		rc = -1;
3391 	} else {
3392 		struct ReportLUNdata *rld = buf;
3393 
3394 		if (rld->extended_response_flag != extended_response) {
3395 			dev_err(&h->pdev->dev,
3396 				"report luns requested format %u, got %u\n",
3397 				extended_response,
3398 				rld->extended_response_flag);
3399 			rc = -1;
3400 		}
3401 	}
3402 out:
3403 	cmd_free(h, c);
3404 	return rc;
3405 }
3406 
hpsa_scsi_do_report_phys_luns(struct ctlr_info * h,struct ReportExtendedLUNdata * buf,int bufsize)3407 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3408 		struct ReportExtendedLUNdata *buf, int bufsize)
3409 {
3410 	return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3411 						HPSA_REPORT_PHYS_EXTENDED);
3412 }
3413 
hpsa_scsi_do_report_log_luns(struct ctlr_info * h,struct ReportLUNdata * buf,int bufsize)3414 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3415 		struct ReportLUNdata *buf, int bufsize)
3416 {
3417 	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3418 }
3419 
hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t * device,int bus,int target,int lun)3420 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3421 	int bus, int target, int lun)
3422 {
3423 	device->bus = bus;
3424 	device->target = target;
3425 	device->lun = lun;
3426 }
3427 
3428 /* Use VPD inquiry to get details of volume status */
hpsa_get_volume_status(struct ctlr_info * h,unsigned char scsi3addr[])3429 static int hpsa_get_volume_status(struct ctlr_info *h,
3430 					unsigned char scsi3addr[])
3431 {
3432 	int rc;
3433 	int status;
3434 	int size;
3435 	unsigned char *buf;
3436 
3437 	buf = kzalloc(64, GFP_KERNEL);
3438 	if (!buf)
3439 		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3440 
3441 	/* Does controller have VPD for logical volume status? */
3442 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3443 		goto exit_failed;
3444 
3445 	/* Get the size of the VPD return buffer */
3446 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3447 					buf, HPSA_VPD_HEADER_SZ);
3448 	if (rc != 0)
3449 		goto exit_failed;
3450 	size = buf[3];
3451 
3452 	/* Now get the whole VPD buffer */
3453 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3454 					buf, size + HPSA_VPD_HEADER_SZ);
3455 	if (rc != 0)
3456 		goto exit_failed;
3457 	status = buf[4]; /* status byte */
3458 
3459 	kfree(buf);
3460 	return status;
3461 exit_failed:
3462 	kfree(buf);
3463 	return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3464 }
3465 
3466 /* Determine offline status of a volume.
3467  * Return either:
3468  *  0 (not offline)
3469  *  0xff (offline for unknown reasons)
3470  *  # (integer code indicating one of several NOT READY states
3471  *     describing why a volume is to be kept offline)
3472  */
hpsa_volume_offline(struct ctlr_info * h,unsigned char scsi3addr[])3473 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3474 					unsigned char scsi3addr[])
3475 {
3476 	struct CommandList *c;
3477 	unsigned char *sense;
3478 	u8 sense_key, asc, ascq;
3479 	int sense_len;
3480 	int rc, ldstat = 0;
3481 	u16 cmd_status;
3482 	u8 scsi_status;
3483 #define ASC_LUN_NOT_READY 0x04
3484 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3485 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3486 
3487 	c = cmd_alloc(h);
3488 
3489 	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3490 	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3491 	if (rc) {
3492 		cmd_free(h, c);
3493 		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3494 	}
3495 	sense = c->err_info->SenseInfo;
3496 	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3497 		sense_len = sizeof(c->err_info->SenseInfo);
3498 	else
3499 		sense_len = c->err_info->SenseLen;
3500 	decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3501 	cmd_status = c->err_info->CommandStatus;
3502 	scsi_status = c->err_info->ScsiStatus;
3503 	cmd_free(h, c);
3504 
3505 	/* Determine the reason for not ready state */
3506 	ldstat = hpsa_get_volume_status(h, scsi3addr);
3507 
3508 	/* Keep volume offline in certain cases: */
3509 	switch (ldstat) {
3510 	case HPSA_LV_FAILED:
3511 	case HPSA_LV_UNDERGOING_ERASE:
3512 	case HPSA_LV_NOT_AVAILABLE:
3513 	case HPSA_LV_UNDERGOING_RPI:
3514 	case HPSA_LV_PENDING_RPI:
3515 	case HPSA_LV_ENCRYPTED_NO_KEY:
3516 	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3517 	case HPSA_LV_UNDERGOING_ENCRYPTION:
3518 	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3519 	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3520 		return ldstat;
3521 	case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3522 		/* If VPD status page isn't available,
3523 		 * use ASC/ASCQ to determine state
3524 		 */
3525 		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3526 			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3527 			return ldstat;
3528 		break;
3529 	default:
3530 		break;
3531 	}
3532 	return HPSA_LV_OK;
3533 }
3534 
3535 /*
3536  * Find out if a logical device supports aborts by simply trying one.
3537  * Smart Array may claim not to support aborts on logical drives, but
3538  * if a MSA2000 * is connected, the drives on that will be presented
3539  * by the Smart Array as logical drives, and aborts may be sent to
3540  * those devices successfully.  So the simplest way to find out is
3541  * to simply try an abort and see how the device responds.
3542  */
hpsa_device_supports_aborts(struct ctlr_info * h,unsigned char * scsi3addr)3543 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3544 					unsigned char *scsi3addr)
3545 {
3546 	struct CommandList *c;
3547 	struct ErrorInfo *ei;
3548 	int rc = 0;
3549 
3550 	u64 tag = (u64) -1; /* bogus tag */
3551 
3552 	/* Assume that physical devices support aborts */
3553 	if (!is_logical_dev_addr_mode(scsi3addr))
3554 		return 1;
3555 
3556 	c = cmd_alloc(h);
3557 
3558 	(void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3559 	(void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3560 	/* no unmap needed here because no data xfer. */
3561 	ei = c->err_info;
3562 	switch (ei->CommandStatus) {
3563 	case CMD_INVALID:
3564 		rc = 0;
3565 		break;
3566 	case CMD_UNABORTABLE:
3567 	case CMD_ABORT_FAILED:
3568 		rc = 1;
3569 		break;
3570 	case CMD_TMF_STATUS:
3571 		rc = hpsa_evaluate_tmf_status(h, c);
3572 		break;
3573 	default:
3574 		rc = 0;
3575 		break;
3576 	}
3577 	cmd_free(h, c);
3578 	return rc;
3579 }
3580 
sanitize_inquiry_string(unsigned char * s,int len)3581 static void sanitize_inquiry_string(unsigned char *s, int len)
3582 {
3583 	bool terminated = false;
3584 
3585 	for (; len > 0; (--len, ++s)) {
3586 		if (*s == 0)
3587 			terminated = true;
3588 		if (terminated || *s < 0x20 || *s > 0x7e)
3589 			*s = ' ';
3590 	}
3591 }
3592 
hpsa_update_device_info(struct ctlr_info * h,unsigned char scsi3addr[],struct hpsa_scsi_dev_t * this_device,unsigned char * is_OBDR_device)3593 static int hpsa_update_device_info(struct ctlr_info *h,
3594 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3595 	unsigned char *is_OBDR_device)
3596 {
3597 
3598 #define OBDR_SIG_OFFSET 43
3599 #define OBDR_TAPE_SIG "$DR-10"
3600 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3601 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3602 
3603 	unsigned char *inq_buff;
3604 	unsigned char *obdr_sig;
3605 	int rc = 0;
3606 
3607 	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3608 	if (!inq_buff) {
3609 		rc = -ENOMEM;
3610 		goto bail_out;
3611 	}
3612 
3613 	/* Do an inquiry to the device to see what it is. */
3614 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3615 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3616 		dev_err(&h->pdev->dev,
3617 			"%s: inquiry failed, device will be skipped.\n",
3618 			__func__);
3619 		rc = HPSA_INQUIRY_FAILED;
3620 		goto bail_out;
3621 	}
3622 
3623 	sanitize_inquiry_string(&inq_buff[8], 8);
3624 	sanitize_inquiry_string(&inq_buff[16], 16);
3625 
3626 	this_device->devtype = (inq_buff[0] & 0x1f);
3627 	memcpy(this_device->scsi3addr, scsi3addr, 8);
3628 	memcpy(this_device->vendor, &inq_buff[8],
3629 		sizeof(this_device->vendor));
3630 	memcpy(this_device->model, &inq_buff[16],
3631 		sizeof(this_device->model));
3632 	memset(this_device->device_id, 0,
3633 		sizeof(this_device->device_id));
3634 	hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3635 		sizeof(this_device->device_id));
3636 
3637 	if (this_device->devtype == TYPE_DISK &&
3638 		is_logical_dev_addr_mode(scsi3addr)) {
3639 		unsigned char volume_offline;
3640 
3641 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3642 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3643 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3644 		volume_offline = hpsa_volume_offline(h, scsi3addr);
3645 		this_device->volume_offline = volume_offline;
3646 		if (volume_offline == HPSA_LV_FAILED) {
3647 			rc = HPSA_LV_FAILED;
3648 			dev_err(&h->pdev->dev,
3649 				"%s: LV failed, device will be skipped.\n",
3650 				__func__);
3651 			goto bail_out;
3652 		}
3653 	} else {
3654 		this_device->raid_level = RAID_UNKNOWN;
3655 		this_device->offload_config = 0;
3656 		this_device->offload_enabled = 0;
3657 		this_device->offload_to_be_enabled = 0;
3658 		this_device->hba_ioaccel_enabled = 0;
3659 		this_device->volume_offline = 0;
3660 		this_device->queue_depth = h->nr_cmds;
3661 	}
3662 
3663 	if (is_OBDR_device) {
3664 		/* See if this is a One-Button-Disaster-Recovery device
3665 		 * by looking for "$DR-10" at offset 43 in inquiry data.
3666 		 */
3667 		obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3668 		*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3669 					strncmp(obdr_sig, OBDR_TAPE_SIG,
3670 						OBDR_SIG_LEN) == 0);
3671 	}
3672 	kfree(inq_buff);
3673 	return 0;
3674 
3675 bail_out:
3676 	kfree(inq_buff);
3677 	return rc;
3678 }
3679 
hpsa_update_device_supports_aborts(struct ctlr_info * h,struct hpsa_scsi_dev_t * dev,u8 * scsi3addr)3680 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3681 			struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3682 {
3683 	unsigned long flags;
3684 	int rc, entry;
3685 	/*
3686 	 * See if this device supports aborts.  If we already know
3687 	 * the device, we already know if it supports aborts, otherwise
3688 	 * we have to find out if it supports aborts by trying one.
3689 	 */
3690 	spin_lock_irqsave(&h->devlock, flags);
3691 	rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3692 	if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3693 		entry >= 0 && entry < h->ndevices) {
3694 		dev->supports_aborts = h->dev[entry]->supports_aborts;
3695 		spin_unlock_irqrestore(&h->devlock, flags);
3696 	} else {
3697 		spin_unlock_irqrestore(&h->devlock, flags);
3698 		dev->supports_aborts =
3699 				hpsa_device_supports_aborts(h, scsi3addr);
3700 		if (dev->supports_aborts < 0)
3701 			dev->supports_aborts = 0;
3702 	}
3703 }
3704 
3705 /*
3706  * Helper function to assign bus, target, lun mapping of devices.
3707  * Logical drive target and lun are assigned at this time, but
3708  * physical device lun and target assignment are deferred (assigned
3709  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3710 */
figure_bus_target_lun(struct ctlr_info * h,u8 * lunaddrbytes,struct hpsa_scsi_dev_t * device)3711 static void figure_bus_target_lun(struct ctlr_info *h,
3712 	u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3713 {
3714 	u32 lunid = get_unaligned_le32(lunaddrbytes);
3715 
3716 	if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3717 		/* physical device, target and lun filled in later */
3718 		if (is_hba_lunid(lunaddrbytes))
3719 			hpsa_set_bus_target_lun(device,
3720 					HPSA_HBA_BUS, 0, lunid & 0x3fff);
3721 		else
3722 			/* defer target, lun assignment for physical devices */
3723 			hpsa_set_bus_target_lun(device,
3724 					HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3725 		return;
3726 	}
3727 	/* It's a logical device */
3728 	if (device->external) {
3729 		hpsa_set_bus_target_lun(device,
3730 			HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3731 			lunid & 0x00ff);
3732 		return;
3733 	}
3734 	hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3735 				0, lunid & 0x3fff);
3736 }
3737 
3738 
3739 /*
3740  * Get address of physical disk used for an ioaccel2 mode command:
3741  *	1. Extract ioaccel2 handle from the command.
3742  *	2. Find a matching ioaccel2 handle from list of physical disks.
3743  *	3. Return:
3744  *		1 and set scsi3addr to address of matching physical
3745  *		0 if no matching physical disk was found.
3746  */
hpsa_get_pdisk_of_ioaccel2(struct ctlr_info * h,struct CommandList * ioaccel2_cmd_to_abort,unsigned char * scsi3addr)3747 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3748 	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3749 {
3750 	struct io_accel2_cmd *c2 =
3751 			&h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3752 	unsigned long flags;
3753 	int i;
3754 
3755 	spin_lock_irqsave(&h->devlock, flags);
3756 	for (i = 0; i < h->ndevices; i++)
3757 		if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3758 			memcpy(scsi3addr, h->dev[i]->scsi3addr,
3759 				sizeof(h->dev[i]->scsi3addr));
3760 			spin_unlock_irqrestore(&h->devlock, flags);
3761 			return 1;
3762 		}
3763 	spin_unlock_irqrestore(&h->devlock, flags);
3764 	return 0;
3765 }
3766 
figure_external_status(struct ctlr_info * h,int raid_ctlr_position,int i,int nphysicals,int nlocal_logicals)3767 static int  figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
3768 	int i, int nphysicals, int nlocal_logicals)
3769 {
3770 	/* In report logicals, local logicals are listed first,
3771 	* then any externals.
3772 	*/
3773 	int logicals_start = nphysicals + (raid_ctlr_position == 0);
3774 
3775 	if (i == raid_ctlr_position)
3776 		return 0;
3777 
3778 	if (i < logicals_start)
3779 		return 0;
3780 
3781 	/* i is in logicals range, but still within local logicals */
3782 	if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
3783 		return 0;
3784 
3785 	return 1; /* it's an external lun */
3786 }
3787 
3788 /*
3789  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
3790  * logdev.  The number of luns in physdev and logdev are returned in
3791  * *nphysicals and *nlogicals, respectively.
3792  * Returns 0 on success, -1 otherwise.
3793  */
hpsa_gather_lun_info(struct ctlr_info * h,struct ReportExtendedLUNdata * physdev,u32 * nphysicals,struct ReportLUNdata * logdev,u32 * nlogicals)3794 static int hpsa_gather_lun_info(struct ctlr_info *h,
3795 	struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3796 	struct ReportLUNdata *logdev, u32 *nlogicals)
3797 {
3798 	if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3799 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3800 		return -1;
3801 	}
3802 	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3803 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3804 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3805 			HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3806 		*nphysicals = HPSA_MAX_PHYS_LUN;
3807 	}
3808 	if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3809 		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3810 		return -1;
3811 	}
3812 	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3813 	/* Reject Logicals in excess of our max capability. */
3814 	if (*nlogicals > HPSA_MAX_LUN) {
3815 		dev_warn(&h->pdev->dev,
3816 			"maximum logical LUNs (%d) exceeded.  "
3817 			"%d LUNs ignored.\n", HPSA_MAX_LUN,
3818 			*nlogicals - HPSA_MAX_LUN);
3819 			*nlogicals = HPSA_MAX_LUN;
3820 	}
3821 	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3822 		dev_warn(&h->pdev->dev,
3823 			"maximum logical + physical LUNs (%d) exceeded. "
3824 			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3825 			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3826 		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3827 	}
3828 	return 0;
3829 }
3830 
figure_lunaddrbytes(struct ctlr_info * h,int raid_ctlr_position,int i,int nphysicals,int nlogicals,struct ReportExtendedLUNdata * physdev_list,struct ReportLUNdata * logdev_list)3831 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3832 	int i, int nphysicals, int nlogicals,
3833 	struct ReportExtendedLUNdata *physdev_list,
3834 	struct ReportLUNdata *logdev_list)
3835 {
3836 	/* Helper function, figure out where the LUN ID info is coming from
3837 	 * given index i, lists of physical and logical devices, where in
3838 	 * the list the raid controller is supposed to appear (first or last)
3839 	 */
3840 
3841 	int logicals_start = nphysicals + (raid_ctlr_position == 0);
3842 	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3843 
3844 	if (i == raid_ctlr_position)
3845 		return RAID_CTLR_LUNID;
3846 
3847 	if (i < logicals_start)
3848 		return &physdev_list->LUN[i -
3849 				(raid_ctlr_position == 0)].lunid[0];
3850 
3851 	if (i < last_device)
3852 		return &logdev_list->LUN[i - nphysicals -
3853 			(raid_ctlr_position == 0)][0];
3854 	BUG();
3855 	return NULL;
3856 }
3857 
3858 /* get physical drive ioaccel handle and queue depth */
hpsa_get_ioaccel_drive_info(struct ctlr_info * h,struct hpsa_scsi_dev_t * dev,struct ReportExtendedLUNdata * rlep,int rle_index,struct bmic_identify_physical_device * id_phys)3859 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3860 		struct hpsa_scsi_dev_t *dev,
3861 		struct ReportExtendedLUNdata *rlep, int rle_index,
3862 		struct bmic_identify_physical_device *id_phys)
3863 {
3864 	int rc;
3865 	struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3866 
3867 	dev->ioaccel_handle = rle->ioaccel_handle;
3868 	if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
3869 		dev->hba_ioaccel_enabled = 1;
3870 	memset(id_phys, 0, sizeof(*id_phys));
3871 	rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
3872 			GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
3873 			sizeof(*id_phys));
3874 	if (!rc)
3875 		/* Reserve space for FW operations */
3876 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3877 #define DRIVE_QUEUE_DEPTH 7
3878 		dev->queue_depth =
3879 			le16_to_cpu(id_phys->current_queue_depth_limit) -
3880 				DRIVE_CMDS_RESERVED_FOR_FW;
3881 	else
3882 		dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3883 }
3884 
hpsa_get_path_info(struct hpsa_scsi_dev_t * this_device,struct ReportExtendedLUNdata * rlep,int rle_index,struct bmic_identify_physical_device * id_phys)3885 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
3886 	struct ReportExtendedLUNdata *rlep, int rle_index,
3887 	struct bmic_identify_physical_device *id_phys)
3888 {
3889 	struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3890 
3891 	if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
3892 		this_device->hba_ioaccel_enabled = 1;
3893 
3894 	memcpy(&this_device->active_path_index,
3895 		&id_phys->active_path_number,
3896 		sizeof(this_device->active_path_index));
3897 	memcpy(&this_device->path_map,
3898 		&id_phys->redundant_path_present_map,
3899 		sizeof(this_device->path_map));
3900 	memcpy(&this_device->box,
3901 		&id_phys->alternate_paths_phys_box_on_port,
3902 		sizeof(this_device->box));
3903 	memcpy(&this_device->phys_connector,
3904 		&id_phys->alternate_paths_phys_connector,
3905 		sizeof(this_device->phys_connector));
3906 	memcpy(&this_device->bay,
3907 		&id_phys->phys_bay_in_box,
3908 		sizeof(this_device->bay));
3909 }
3910 
3911 /* get number of local logical disks. */
hpsa_set_local_logical_count(struct ctlr_info * h,struct bmic_identify_controller * id_ctlr,u32 * nlocals)3912 static int hpsa_set_local_logical_count(struct ctlr_info *h,
3913 	struct bmic_identify_controller *id_ctlr,
3914 	u32 *nlocals)
3915 {
3916 	int rc;
3917 
3918 	if (!id_ctlr) {
3919 		dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
3920 			__func__);
3921 		return -ENOMEM;
3922 	}
3923 	memset(id_ctlr, 0, sizeof(*id_ctlr));
3924 	rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
3925 	if (!rc)
3926 		if (id_ctlr->configured_logical_drive_count < 256)
3927 			*nlocals = id_ctlr->configured_logical_drive_count;
3928 		else
3929 			*nlocals = le16_to_cpu(
3930 					id_ctlr->extended_logical_unit_count);
3931 	else
3932 		*nlocals = -1;
3933 	return rc;
3934 }
3935 
hpsa_is_disk_spare(struct ctlr_info * h,u8 * lunaddrbytes)3936 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
3937 {
3938 	struct bmic_identify_physical_device *id_phys;
3939 	bool is_spare = false;
3940 	int rc;
3941 
3942 	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3943 	if (!id_phys)
3944 		return false;
3945 
3946 	rc = hpsa_bmic_id_physical_device(h,
3947 					lunaddrbytes,
3948 					GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
3949 					id_phys, sizeof(*id_phys));
3950 	if (rc == 0)
3951 		is_spare = (id_phys->more_flags >> 6) & 0x01;
3952 
3953 	kfree(id_phys);
3954 	return is_spare;
3955 }
3956 
3957 #define RPL_DEV_FLAG_NON_DISK                           0x1
3958 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED  0x2
3959 #define RPL_DEV_FLAG_UNCONFIG_DISK                      0x4
3960 
3961 #define BMIC_DEVICE_TYPE_ENCLOSURE  6
3962 
hpsa_skip_device(struct ctlr_info * h,u8 * lunaddrbytes,struct ext_report_lun_entry * rle)3963 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
3964 				struct ext_report_lun_entry *rle)
3965 {
3966 	u8 device_flags;
3967 	u8 device_type;
3968 
3969 	if (!MASKED_DEVICE(lunaddrbytes))
3970 		return false;
3971 
3972 	device_flags = rle->device_flags;
3973 	device_type = rle->device_type;
3974 
3975 	if (device_flags & RPL_DEV_FLAG_NON_DISK) {
3976 		if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
3977 			return false;
3978 		return true;
3979 	}
3980 
3981 	if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
3982 		return false;
3983 
3984 	if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
3985 		return false;
3986 
3987 	/*
3988 	 * Spares may be spun down, we do not want to
3989 	 * do an Inquiry to a RAID set spare drive as
3990 	 * that would have them spun up, that is a
3991 	 * performance hit because I/O to the RAID device
3992 	 * stops while the spin up occurs which can take
3993 	 * over 50 seconds.
3994 	 */
3995 	if (hpsa_is_disk_spare(h, lunaddrbytes))
3996 		return true;
3997 
3998 	return false;
3999 }
4000 
hpsa_update_scsi_devices(struct ctlr_info * h)4001 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4002 {
4003 	/* the idea here is we could get notified
4004 	 * that some devices have changed, so we do a report
4005 	 * physical luns and report logical luns cmd, and adjust
4006 	 * our list of devices accordingly.
4007 	 *
4008 	 * The scsi3addr's of devices won't change so long as the
4009 	 * adapter is not reset.  That means we can rescan and
4010 	 * tell which devices we already know about, vs. new
4011 	 * devices, vs.  disappearing devices.
4012 	 */
4013 	struct ReportExtendedLUNdata *physdev_list = NULL;
4014 	struct ReportLUNdata *logdev_list = NULL;
4015 	struct bmic_identify_physical_device *id_phys = NULL;
4016 	struct bmic_identify_controller *id_ctlr = NULL;
4017 	u32 nphysicals = 0;
4018 	u32 nlogicals = 0;
4019 	u32 nlocal_logicals = 0;
4020 	u32 ndev_allocated = 0;
4021 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4022 	int ncurrent = 0;
4023 	int i, n_ext_target_devs, ndevs_to_allocate;
4024 	int raid_ctlr_position;
4025 	bool physical_device;
4026 	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4027 
4028 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
4029 	physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4030 	logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4031 	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4032 	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4033 	id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4034 
4035 	if (!currentsd || !physdev_list || !logdev_list ||
4036 		!tmpdevice || !id_phys || !id_ctlr) {
4037 		dev_err(&h->pdev->dev, "out of memory\n");
4038 		goto out;
4039 	}
4040 	memset(lunzerobits, 0, sizeof(lunzerobits));
4041 
4042 	h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4043 
4044 	if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4045 			logdev_list, &nlogicals)) {
4046 		h->drv_req_rescan = 1;
4047 		goto out;
4048 	}
4049 
4050 	/* Set number of local logicals (non PTRAID) */
4051 	if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4052 		dev_warn(&h->pdev->dev,
4053 			"%s: Can't determine number of local logical devices.\n",
4054 			__func__);
4055 	}
4056 
4057 	/* We might see up to the maximum number of logical and physical disks
4058 	 * plus external target devices, and a device for the local RAID
4059 	 * controller.
4060 	 */
4061 	ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4062 
4063 	/* Allocate the per device structures */
4064 	for (i = 0; i < ndevs_to_allocate; i++) {
4065 		if (i >= HPSA_MAX_DEVICES) {
4066 			dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4067 				"  %d devices ignored.\n", HPSA_MAX_DEVICES,
4068 				ndevs_to_allocate - HPSA_MAX_DEVICES);
4069 			break;
4070 		}
4071 
4072 		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4073 		if (!currentsd[i]) {
4074 			dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
4075 				__FILE__, __LINE__);
4076 			h->drv_req_rescan = 1;
4077 			goto out;
4078 		}
4079 		ndev_allocated++;
4080 	}
4081 
4082 	if (is_scsi_rev_5(h))
4083 		raid_ctlr_position = 0;
4084 	else
4085 		raid_ctlr_position = nphysicals + nlogicals;
4086 
4087 	/* adjust our table of devices */
4088 	n_ext_target_devs = 0;
4089 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4090 		u8 *lunaddrbytes, is_OBDR = 0;
4091 		int rc = 0;
4092 		int phys_dev_index = i - (raid_ctlr_position == 0);
4093 		bool skip_device = false;
4094 
4095 		physical_device = i < nphysicals + (raid_ctlr_position == 0);
4096 
4097 		/* Figure out where the LUN ID info is coming from */
4098 		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4099 			i, nphysicals, nlogicals, physdev_list, logdev_list);
4100 
4101 		/*
4102 		 * Skip over some devices such as a spare.
4103 		 */
4104 		if (!tmpdevice->external && physical_device) {
4105 			skip_device = hpsa_skip_device(h, lunaddrbytes,
4106 					&physdev_list->LUN[phys_dev_index]);
4107 			if (skip_device)
4108 				continue;
4109 		}
4110 
4111 		/* Get device type, vendor, model, device id */
4112 		rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4113 							&is_OBDR);
4114 		if (rc == -ENOMEM) {
4115 			dev_warn(&h->pdev->dev,
4116 				"Out of memory, rescan deferred.\n");
4117 			h->drv_req_rescan = 1;
4118 			goto out;
4119 		}
4120 		if (rc) {
4121 			h->drv_req_rescan = 1;
4122 			continue;
4123 		}
4124 
4125 		/* Determine if this is a lun from an external target array */
4126 		tmpdevice->external =
4127 			figure_external_status(h, raid_ctlr_position, i,
4128 						nphysicals, nlocal_logicals);
4129 
4130 		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4131 		hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4132 		this_device = currentsd[ncurrent];
4133 
4134 		/* Turn on discovery_polling if there are ext target devices.
4135 		 * Event-based change notification is unreliable for those.
4136 		 */
4137 		if (!h->discovery_polling) {
4138 			if (tmpdevice->external) {
4139 				h->discovery_polling = 1;
4140 				dev_info(&h->pdev->dev,
4141 					"External target, activate discovery polling.\n");
4142 			}
4143 		}
4144 
4145 
4146 		*this_device = *tmpdevice;
4147 		this_device->physical_device = physical_device;
4148 
4149 		/*
4150 		 * Expose all devices except for physical devices that
4151 		 * are masked.
4152 		 */
4153 		if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4154 			this_device->expose_device = 0;
4155 		else
4156 			this_device->expose_device = 1;
4157 
4158 
4159 		/*
4160 		 * Get the SAS address for physical devices that are exposed.
4161 		 */
4162 		if (this_device->physical_device && this_device->expose_device)
4163 			hpsa_get_sas_address(h, lunaddrbytes, this_device);
4164 
4165 		switch (this_device->devtype) {
4166 		case TYPE_ROM:
4167 			/* We don't *really* support actual CD-ROM devices,
4168 			 * just "One Button Disaster Recovery" tape drive
4169 			 * which temporarily pretends to be a CD-ROM drive.
4170 			 * So we check that the device is really an OBDR tape
4171 			 * device by checking for "$DR-10" in bytes 43-48 of
4172 			 * the inquiry data.
4173 			 */
4174 			if (is_OBDR)
4175 				ncurrent++;
4176 			break;
4177 		case TYPE_DISK:
4178 			if (this_device->physical_device) {
4179 				/* The disk is in HBA mode. */
4180 				/* Never use RAID mapper in HBA mode. */
4181 				this_device->offload_enabled = 0;
4182 				hpsa_get_ioaccel_drive_info(h, this_device,
4183 					physdev_list, phys_dev_index, id_phys);
4184 				hpsa_get_path_info(this_device,
4185 					physdev_list, phys_dev_index, id_phys);
4186 			}
4187 			ncurrent++;
4188 			break;
4189 		case TYPE_TAPE:
4190 		case TYPE_MEDIUM_CHANGER:
4191 		case TYPE_ENCLOSURE:
4192 			ncurrent++;
4193 			break;
4194 		case TYPE_RAID:
4195 			/* Only present the Smartarray HBA as a RAID controller.
4196 			 * If it's a RAID controller other than the HBA itself
4197 			 * (an external RAID controller, MSA500 or similar)
4198 			 * don't present it.
4199 			 */
4200 			if (!is_hba_lunid(lunaddrbytes))
4201 				break;
4202 			ncurrent++;
4203 			break;
4204 		default:
4205 			break;
4206 		}
4207 		if (ncurrent >= HPSA_MAX_DEVICES)
4208 			break;
4209 	}
4210 
4211 	if (h->sas_host == NULL) {
4212 		int rc = 0;
4213 
4214 		rc = hpsa_add_sas_host(h);
4215 		if (rc) {
4216 			dev_warn(&h->pdev->dev,
4217 				"Could not add sas host %d\n", rc);
4218 			goto out;
4219 		}
4220 	}
4221 
4222 	adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4223 out:
4224 	kfree(tmpdevice);
4225 	for (i = 0; i < ndev_allocated; i++)
4226 		kfree(currentsd[i]);
4227 	kfree(currentsd);
4228 	kfree(physdev_list);
4229 	kfree(logdev_list);
4230 	kfree(id_ctlr);
4231 	kfree(id_phys);
4232 }
4233 
hpsa_set_sg_descriptor(struct SGDescriptor * desc,struct scatterlist * sg)4234 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4235 				   struct scatterlist *sg)
4236 {
4237 	u64 addr64 = (u64) sg_dma_address(sg);
4238 	unsigned int len = sg_dma_len(sg);
4239 
4240 	desc->Addr = cpu_to_le64(addr64);
4241 	desc->Len = cpu_to_le32(len);
4242 	desc->Ext = 0;
4243 }
4244 
4245 /*
4246  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4247  * dma mapping  and fills in the scatter gather entries of the
4248  * hpsa command, cp.
4249  */
hpsa_scatter_gather(struct ctlr_info * h,struct CommandList * cp,struct scsi_cmnd * cmd)4250 static int hpsa_scatter_gather(struct ctlr_info *h,
4251 		struct CommandList *cp,
4252 		struct scsi_cmnd *cmd)
4253 {
4254 	struct scatterlist *sg;
4255 	int use_sg, i, sg_limit, chained, last_sg;
4256 	struct SGDescriptor *curr_sg;
4257 
4258 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4259 
4260 	use_sg = scsi_dma_map(cmd);
4261 	if (use_sg < 0)
4262 		return use_sg;
4263 
4264 	if (!use_sg)
4265 		goto sglist_finished;
4266 
4267 	/*
4268 	 * If the number of entries is greater than the max for a single list,
4269 	 * then we have a chained list; we will set up all but one entry in the
4270 	 * first list (the last entry is saved for link information);
4271 	 * otherwise, we don't have a chained list and we'll set up at each of
4272 	 * the entries in the one list.
4273 	 */
4274 	curr_sg = cp->SG;
4275 	chained = use_sg > h->max_cmd_sg_entries;
4276 	sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4277 	last_sg = scsi_sg_count(cmd) - 1;
4278 	scsi_for_each_sg(cmd, sg, sg_limit, i) {
4279 		hpsa_set_sg_descriptor(curr_sg, sg);
4280 		curr_sg++;
4281 	}
4282 
4283 	if (chained) {
4284 		/*
4285 		 * Continue with the chained list.  Set curr_sg to the chained
4286 		 * list.  Modify the limit to the total count less the entries
4287 		 * we've already set up.  Resume the scan at the list entry
4288 		 * where the previous loop left off.
4289 		 */
4290 		curr_sg = h->cmd_sg_list[cp->cmdindex];
4291 		sg_limit = use_sg - sg_limit;
4292 		for_each_sg(sg, sg, sg_limit, i) {
4293 			hpsa_set_sg_descriptor(curr_sg, sg);
4294 			curr_sg++;
4295 		}
4296 	}
4297 
4298 	/* Back the pointer up to the last entry and mark it as "last". */
4299 	(curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4300 
4301 	if (use_sg + chained > h->maxSG)
4302 		h->maxSG = use_sg + chained;
4303 
4304 	if (chained) {
4305 		cp->Header.SGList = h->max_cmd_sg_entries;
4306 		cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4307 		if (hpsa_map_sg_chain_block(h, cp)) {
4308 			scsi_dma_unmap(cmd);
4309 			return -1;
4310 		}
4311 		return 0;
4312 	}
4313 
4314 sglist_finished:
4315 
4316 	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
4317 	cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4318 	return 0;
4319 }
4320 
4321 #define IO_ACCEL_INELIGIBLE (1)
fixup_ioaccel_cdb(u8 * cdb,int * cdb_len)4322 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4323 {
4324 	int is_write = 0;
4325 	u32 block;
4326 	u32 block_cnt;
4327 
4328 	/* Perform some CDB fixups if needed using 10 byte reads/writes only */
4329 	switch (cdb[0]) {
4330 	case WRITE_6:
4331 	case WRITE_12:
4332 		is_write = 1;
4333 	case READ_6:
4334 	case READ_12:
4335 		if (*cdb_len == 6) {
4336 			block = get_unaligned_be16(&cdb[2]);
4337 			block_cnt = cdb[4];
4338 			if (block_cnt == 0)
4339 				block_cnt = 256;
4340 		} else {
4341 			BUG_ON(*cdb_len != 12);
4342 			block = get_unaligned_be32(&cdb[2]);
4343 			block_cnt = get_unaligned_be32(&cdb[6]);
4344 		}
4345 		if (block_cnt > 0xffff)
4346 			return IO_ACCEL_INELIGIBLE;
4347 
4348 		cdb[0] = is_write ? WRITE_10 : READ_10;
4349 		cdb[1] = 0;
4350 		cdb[2] = (u8) (block >> 24);
4351 		cdb[3] = (u8) (block >> 16);
4352 		cdb[4] = (u8) (block >> 8);
4353 		cdb[5] = (u8) (block);
4354 		cdb[6] = 0;
4355 		cdb[7] = (u8) (block_cnt >> 8);
4356 		cdb[8] = (u8) (block_cnt);
4357 		cdb[9] = 0;
4358 		*cdb_len = 10;
4359 		break;
4360 	}
4361 	return 0;
4362 }
4363 
hpsa_scsi_ioaccel1_queue_command(struct ctlr_info * h,struct CommandList * c,u32 ioaccel_handle,u8 * cdb,int cdb_len,u8 * scsi3addr,struct hpsa_scsi_dev_t * phys_disk)4364 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4365 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4366 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4367 {
4368 	struct scsi_cmnd *cmd = c->scsi_cmd;
4369 	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4370 	unsigned int len;
4371 	unsigned int total_len = 0;
4372 	struct scatterlist *sg;
4373 	u64 addr64;
4374 	int use_sg, i;
4375 	struct SGDescriptor *curr_sg;
4376 	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4377 
4378 	/* TODO: implement chaining support */
4379 	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4380 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4381 		return IO_ACCEL_INELIGIBLE;
4382 	}
4383 
4384 	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4385 
4386 	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4387 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4388 		return IO_ACCEL_INELIGIBLE;
4389 	}
4390 
4391 	c->cmd_type = CMD_IOACCEL1;
4392 
4393 	/* Adjust the DMA address to point to the accelerated command buffer */
4394 	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4395 				(c->cmdindex * sizeof(*cp));
4396 	BUG_ON(c->busaddr & 0x0000007F);
4397 
4398 	use_sg = scsi_dma_map(cmd);
4399 	if (use_sg < 0) {
4400 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4401 		return use_sg;
4402 	}
4403 
4404 	if (use_sg) {
4405 		curr_sg = cp->SG;
4406 		scsi_for_each_sg(cmd, sg, use_sg, i) {
4407 			addr64 = (u64) sg_dma_address(sg);
4408 			len  = sg_dma_len(sg);
4409 			total_len += len;
4410 			curr_sg->Addr = cpu_to_le64(addr64);
4411 			curr_sg->Len = cpu_to_le32(len);
4412 			curr_sg->Ext = cpu_to_le32(0);
4413 			curr_sg++;
4414 		}
4415 		(--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4416 
4417 		switch (cmd->sc_data_direction) {
4418 		case DMA_TO_DEVICE:
4419 			control |= IOACCEL1_CONTROL_DATA_OUT;
4420 			break;
4421 		case DMA_FROM_DEVICE:
4422 			control |= IOACCEL1_CONTROL_DATA_IN;
4423 			break;
4424 		case DMA_NONE:
4425 			control |= IOACCEL1_CONTROL_NODATAXFER;
4426 			break;
4427 		default:
4428 			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4429 			cmd->sc_data_direction);
4430 			BUG();
4431 			break;
4432 		}
4433 	} else {
4434 		control |= IOACCEL1_CONTROL_NODATAXFER;
4435 	}
4436 
4437 	c->Header.SGList = use_sg;
4438 	/* Fill out the command structure to submit */
4439 	cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4440 	cp->transfer_len = cpu_to_le32(total_len);
4441 	cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4442 			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4443 	cp->control = cpu_to_le32(control);
4444 	memcpy(cp->CDB, cdb, cdb_len);
4445 	memcpy(cp->CISS_LUN, scsi3addr, 8);
4446 	/* Tag was already set at init time. */
4447 	enqueue_cmd_and_start_io(h, c);
4448 	return 0;
4449 }
4450 
4451 /*
4452  * Queue a command directly to a device behind the controller using the
4453  * I/O accelerator path.
4454  */
hpsa_scsi_ioaccel_direct_map(struct ctlr_info * h,struct CommandList * c)4455 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4456 	struct CommandList *c)
4457 {
4458 	struct scsi_cmnd *cmd = c->scsi_cmd;
4459 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4460 
4461 	c->phys_disk = dev;
4462 
4463 	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4464 		cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4465 }
4466 
4467 /*
4468  * Set encryption parameters for the ioaccel2 request
4469  */
set_encrypt_ioaccel2(struct ctlr_info * h,struct CommandList * c,struct io_accel2_cmd * cp)4470 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4471 	struct CommandList *c, struct io_accel2_cmd *cp)
4472 {
4473 	struct scsi_cmnd *cmd = c->scsi_cmd;
4474 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4475 	struct raid_map_data *map = &dev->raid_map;
4476 	u64 first_block;
4477 
4478 	/* Are we doing encryption on this device */
4479 	if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4480 		return;
4481 	/* Set the data encryption key index. */
4482 	cp->dekindex = map->dekindex;
4483 
4484 	/* Set the encryption enable flag, encoded into direction field. */
4485 	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4486 
4487 	/* Set encryption tweak values based on logical block address
4488 	 * If block size is 512, tweak value is LBA.
4489 	 * For other block sizes, tweak is (LBA * block size)/ 512)
4490 	 */
4491 	switch (cmd->cmnd[0]) {
4492 	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4493 	case WRITE_6:
4494 	case READ_6:
4495 		first_block = get_unaligned_be16(&cmd->cmnd[2]);
4496 		break;
4497 	case WRITE_10:
4498 	case READ_10:
4499 	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4500 	case WRITE_12:
4501 	case READ_12:
4502 		first_block = get_unaligned_be32(&cmd->cmnd[2]);
4503 		break;
4504 	case WRITE_16:
4505 	case READ_16:
4506 		first_block = get_unaligned_be64(&cmd->cmnd[2]);
4507 		break;
4508 	default:
4509 		dev_err(&h->pdev->dev,
4510 			"ERROR: %s: size (0x%x) not supported for encryption\n",
4511 			__func__, cmd->cmnd[0]);
4512 		BUG();
4513 		break;
4514 	}
4515 
4516 	if (le32_to_cpu(map->volume_blk_size) != 512)
4517 		first_block = first_block *
4518 				le32_to_cpu(map->volume_blk_size)/512;
4519 
4520 	cp->tweak_lower = cpu_to_le32(first_block);
4521 	cp->tweak_upper = cpu_to_le32(first_block >> 32);
4522 }
4523 
hpsa_scsi_ioaccel2_queue_command(struct ctlr_info * h,struct CommandList * c,u32 ioaccel_handle,u8 * cdb,int cdb_len,u8 * scsi3addr,struct hpsa_scsi_dev_t * phys_disk)4524 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4525 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4526 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4527 {
4528 	struct scsi_cmnd *cmd = c->scsi_cmd;
4529 	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4530 	struct ioaccel2_sg_element *curr_sg;
4531 	int use_sg, i;
4532 	struct scatterlist *sg;
4533 	u64 addr64;
4534 	u32 len;
4535 	u32 total_len = 0;
4536 
4537 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4538 
4539 	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4540 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4541 		return IO_ACCEL_INELIGIBLE;
4542 	}
4543 
4544 	c->cmd_type = CMD_IOACCEL2;
4545 	/* Adjust the DMA address to point to the accelerated command buffer */
4546 	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4547 				(c->cmdindex * sizeof(*cp));
4548 	BUG_ON(c->busaddr & 0x0000007F);
4549 
4550 	memset(cp, 0, sizeof(*cp));
4551 	cp->IU_type = IOACCEL2_IU_TYPE;
4552 
4553 	use_sg = scsi_dma_map(cmd);
4554 	if (use_sg < 0) {
4555 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4556 		return use_sg;
4557 	}
4558 
4559 	if (use_sg) {
4560 		curr_sg = cp->sg;
4561 		if (use_sg > h->ioaccel_maxsg) {
4562 			addr64 = le64_to_cpu(
4563 				h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4564 			curr_sg->address = cpu_to_le64(addr64);
4565 			curr_sg->length = 0;
4566 			curr_sg->reserved[0] = 0;
4567 			curr_sg->reserved[1] = 0;
4568 			curr_sg->reserved[2] = 0;
4569 			curr_sg->chain_indicator = IOACCEL2_CHAIN;
4570 
4571 			curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4572 		}
4573 		scsi_for_each_sg(cmd, sg, use_sg, i) {
4574 			addr64 = (u64) sg_dma_address(sg);
4575 			len  = sg_dma_len(sg);
4576 			total_len += len;
4577 			curr_sg->address = cpu_to_le64(addr64);
4578 			curr_sg->length = cpu_to_le32(len);
4579 			curr_sg->reserved[0] = 0;
4580 			curr_sg->reserved[1] = 0;
4581 			curr_sg->reserved[2] = 0;
4582 			curr_sg->chain_indicator = 0;
4583 			curr_sg++;
4584 		}
4585 
4586 		/*
4587 		 * Set the last s/g element bit
4588 		 */
4589 		(curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
4590 
4591 		switch (cmd->sc_data_direction) {
4592 		case DMA_TO_DEVICE:
4593 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4594 			cp->direction |= IOACCEL2_DIR_DATA_OUT;
4595 			break;
4596 		case DMA_FROM_DEVICE:
4597 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4598 			cp->direction |= IOACCEL2_DIR_DATA_IN;
4599 			break;
4600 		case DMA_NONE:
4601 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4602 			cp->direction |= IOACCEL2_DIR_NO_DATA;
4603 			break;
4604 		default:
4605 			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4606 				cmd->sc_data_direction);
4607 			BUG();
4608 			break;
4609 		}
4610 	} else {
4611 		cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4612 		cp->direction |= IOACCEL2_DIR_NO_DATA;
4613 	}
4614 
4615 	/* Set encryption parameters, if necessary */
4616 	set_encrypt_ioaccel2(h, c, cp);
4617 
4618 	cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4619 	cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4620 	memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4621 
4622 	cp->data_len = cpu_to_le32(total_len);
4623 	cp->err_ptr = cpu_to_le64(c->busaddr +
4624 			offsetof(struct io_accel2_cmd, error_data));
4625 	cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4626 
4627 	/* fill in sg elements */
4628 	if (use_sg > h->ioaccel_maxsg) {
4629 		cp->sg_count = 1;
4630 		cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4631 		if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4632 			atomic_dec(&phys_disk->ioaccel_cmds_out);
4633 			scsi_dma_unmap(cmd);
4634 			return -1;
4635 		}
4636 	} else
4637 		cp->sg_count = (u8) use_sg;
4638 
4639 	enqueue_cmd_and_start_io(h, c);
4640 	return 0;
4641 }
4642 
4643 /*
4644  * Queue a command to the correct I/O accelerator path.
4645  */
hpsa_scsi_ioaccel_queue_command(struct ctlr_info * h,struct CommandList * c,u32 ioaccel_handle,u8 * cdb,int cdb_len,u8 * scsi3addr,struct hpsa_scsi_dev_t * phys_disk)4646 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4647 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4648 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4649 {
4650 	/* Try to honor the device's queue depth */
4651 	if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4652 					phys_disk->queue_depth) {
4653 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4654 		return IO_ACCEL_INELIGIBLE;
4655 	}
4656 	if (h->transMethod & CFGTBL_Trans_io_accel1)
4657 		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4658 						cdb, cdb_len, scsi3addr,
4659 						phys_disk);
4660 	else
4661 		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4662 						cdb, cdb_len, scsi3addr,
4663 						phys_disk);
4664 }
4665 
raid_map_helper(struct raid_map_data * map,int offload_to_mirror,u32 * map_index,u32 * current_group)4666 static void raid_map_helper(struct raid_map_data *map,
4667 		int offload_to_mirror, u32 *map_index, u32 *current_group)
4668 {
4669 	if (offload_to_mirror == 0)  {
4670 		/* use physical disk in the first mirrored group. */
4671 		*map_index %= le16_to_cpu(map->data_disks_per_row);
4672 		return;
4673 	}
4674 	do {
4675 		/* determine mirror group that *map_index indicates */
4676 		*current_group = *map_index /
4677 			le16_to_cpu(map->data_disks_per_row);
4678 		if (offload_to_mirror == *current_group)
4679 			continue;
4680 		if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4681 			/* select map index from next group */
4682 			*map_index += le16_to_cpu(map->data_disks_per_row);
4683 			(*current_group)++;
4684 		} else {
4685 			/* select map index from first group */
4686 			*map_index %= le16_to_cpu(map->data_disks_per_row);
4687 			*current_group = 0;
4688 		}
4689 	} while (offload_to_mirror != *current_group);
4690 }
4691 
4692 /*
4693  * Attempt to perform offload RAID mapping for a logical volume I/O.
4694  */
hpsa_scsi_ioaccel_raid_map(struct ctlr_info * h,struct CommandList * c)4695 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4696 	struct CommandList *c)
4697 {
4698 	struct scsi_cmnd *cmd = c->scsi_cmd;
4699 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4700 	struct raid_map_data *map = &dev->raid_map;
4701 	struct raid_map_disk_data *dd = &map->data[0];
4702 	int is_write = 0;
4703 	u32 map_index;
4704 	u64 first_block, last_block;
4705 	u32 block_cnt;
4706 	u32 blocks_per_row;
4707 	u64 first_row, last_row;
4708 	u32 first_row_offset, last_row_offset;
4709 	u32 first_column, last_column;
4710 	u64 r0_first_row, r0_last_row;
4711 	u32 r5or6_blocks_per_row;
4712 	u64 r5or6_first_row, r5or6_last_row;
4713 	u32 r5or6_first_row_offset, r5or6_last_row_offset;
4714 	u32 r5or6_first_column, r5or6_last_column;
4715 	u32 total_disks_per_row;
4716 	u32 stripesize;
4717 	u32 first_group, last_group, current_group;
4718 	u32 map_row;
4719 	u32 disk_handle;
4720 	u64 disk_block;
4721 	u32 disk_block_cnt;
4722 	u8 cdb[16];
4723 	u8 cdb_len;
4724 	u16 strip_size;
4725 #if BITS_PER_LONG == 32
4726 	u64 tmpdiv;
4727 #endif
4728 	int offload_to_mirror;
4729 
4730 	/* check for valid opcode, get LBA and block count */
4731 	switch (cmd->cmnd[0]) {
4732 	case WRITE_6:
4733 		is_write = 1;
4734 	case READ_6:
4735 		first_block = get_unaligned_be16(&cmd->cmnd[2]);
4736 		block_cnt = cmd->cmnd[4];
4737 		if (block_cnt == 0)
4738 			block_cnt = 256;
4739 		break;
4740 	case WRITE_10:
4741 		is_write = 1;
4742 	case READ_10:
4743 		first_block =
4744 			(((u64) cmd->cmnd[2]) << 24) |
4745 			(((u64) cmd->cmnd[3]) << 16) |
4746 			(((u64) cmd->cmnd[4]) << 8) |
4747 			cmd->cmnd[5];
4748 		block_cnt =
4749 			(((u32) cmd->cmnd[7]) << 8) |
4750 			cmd->cmnd[8];
4751 		break;
4752 	case WRITE_12:
4753 		is_write = 1;
4754 	case READ_12:
4755 		first_block =
4756 			(((u64) cmd->cmnd[2]) << 24) |
4757 			(((u64) cmd->cmnd[3]) << 16) |
4758 			(((u64) cmd->cmnd[4]) << 8) |
4759 			cmd->cmnd[5];
4760 		block_cnt =
4761 			(((u32) cmd->cmnd[6]) << 24) |
4762 			(((u32) cmd->cmnd[7]) << 16) |
4763 			(((u32) cmd->cmnd[8]) << 8) |
4764 		cmd->cmnd[9];
4765 		break;
4766 	case WRITE_16:
4767 		is_write = 1;
4768 	case READ_16:
4769 		first_block =
4770 			(((u64) cmd->cmnd[2]) << 56) |
4771 			(((u64) cmd->cmnd[3]) << 48) |
4772 			(((u64) cmd->cmnd[4]) << 40) |
4773 			(((u64) cmd->cmnd[5]) << 32) |
4774 			(((u64) cmd->cmnd[6]) << 24) |
4775 			(((u64) cmd->cmnd[7]) << 16) |
4776 			(((u64) cmd->cmnd[8]) << 8) |
4777 			cmd->cmnd[9];
4778 		block_cnt =
4779 			(((u32) cmd->cmnd[10]) << 24) |
4780 			(((u32) cmd->cmnd[11]) << 16) |
4781 			(((u32) cmd->cmnd[12]) << 8) |
4782 			cmd->cmnd[13];
4783 		break;
4784 	default:
4785 		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4786 	}
4787 	last_block = first_block + block_cnt - 1;
4788 
4789 	/* check for write to non-RAID-0 */
4790 	if (is_write && dev->raid_level != 0)
4791 		return IO_ACCEL_INELIGIBLE;
4792 
4793 	/* check for invalid block or wraparound */
4794 	if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4795 		last_block < first_block)
4796 		return IO_ACCEL_INELIGIBLE;
4797 
4798 	/* calculate stripe information for the request */
4799 	blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4800 				le16_to_cpu(map->strip_size);
4801 	strip_size = le16_to_cpu(map->strip_size);
4802 #if BITS_PER_LONG == 32
4803 	tmpdiv = first_block;
4804 	(void) do_div(tmpdiv, blocks_per_row);
4805 	first_row = tmpdiv;
4806 	tmpdiv = last_block;
4807 	(void) do_div(tmpdiv, blocks_per_row);
4808 	last_row = tmpdiv;
4809 	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4810 	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4811 	tmpdiv = first_row_offset;
4812 	(void) do_div(tmpdiv, strip_size);
4813 	first_column = tmpdiv;
4814 	tmpdiv = last_row_offset;
4815 	(void) do_div(tmpdiv, strip_size);
4816 	last_column = tmpdiv;
4817 #else
4818 	first_row = first_block / blocks_per_row;
4819 	last_row = last_block / blocks_per_row;
4820 	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4821 	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4822 	first_column = first_row_offset / strip_size;
4823 	last_column = last_row_offset / strip_size;
4824 #endif
4825 
4826 	/* if this isn't a single row/column then give to the controller */
4827 	if ((first_row != last_row) || (first_column != last_column))
4828 		return IO_ACCEL_INELIGIBLE;
4829 
4830 	/* proceeding with driver mapping */
4831 	total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4832 				le16_to_cpu(map->metadata_disks_per_row);
4833 	map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4834 				le16_to_cpu(map->row_cnt);
4835 	map_index = (map_row * total_disks_per_row) + first_column;
4836 
4837 	switch (dev->raid_level) {
4838 	case HPSA_RAID_0:
4839 		break; /* nothing special to do */
4840 	case HPSA_RAID_1:
4841 		/* Handles load balance across RAID 1 members.
4842 		 * (2-drive R1 and R10 with even # of drives.)
4843 		 * Appropriate for SSDs, not optimal for HDDs
4844 		 */
4845 		BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4846 		if (dev->offload_to_mirror)
4847 			map_index += le16_to_cpu(map->data_disks_per_row);
4848 		dev->offload_to_mirror = !dev->offload_to_mirror;
4849 		break;
4850 	case HPSA_RAID_ADM:
4851 		/* Handles N-way mirrors  (R1-ADM)
4852 		 * and R10 with # of drives divisible by 3.)
4853 		 */
4854 		BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4855 
4856 		offload_to_mirror = dev->offload_to_mirror;
4857 		raid_map_helper(map, offload_to_mirror,
4858 				&map_index, &current_group);
4859 		/* set mirror group to use next time */
4860 		offload_to_mirror =
4861 			(offload_to_mirror >=
4862 			le16_to_cpu(map->layout_map_count) - 1)
4863 			? 0 : offload_to_mirror + 1;
4864 		dev->offload_to_mirror = offload_to_mirror;
4865 		/* Avoid direct use of dev->offload_to_mirror within this
4866 		 * function since multiple threads might simultaneously
4867 		 * increment it beyond the range of dev->layout_map_count -1.
4868 		 */
4869 		break;
4870 	case HPSA_RAID_5:
4871 	case HPSA_RAID_6:
4872 		if (le16_to_cpu(map->layout_map_count) <= 1)
4873 			break;
4874 
4875 		/* Verify first and last block are in same RAID group */
4876 		r5or6_blocks_per_row =
4877 			le16_to_cpu(map->strip_size) *
4878 			le16_to_cpu(map->data_disks_per_row);
4879 		BUG_ON(r5or6_blocks_per_row == 0);
4880 		stripesize = r5or6_blocks_per_row *
4881 			le16_to_cpu(map->layout_map_count);
4882 #if BITS_PER_LONG == 32
4883 		tmpdiv = first_block;
4884 		first_group = do_div(tmpdiv, stripesize);
4885 		tmpdiv = first_group;
4886 		(void) do_div(tmpdiv, r5or6_blocks_per_row);
4887 		first_group = tmpdiv;
4888 		tmpdiv = last_block;
4889 		last_group = do_div(tmpdiv, stripesize);
4890 		tmpdiv = last_group;
4891 		(void) do_div(tmpdiv, r5or6_blocks_per_row);
4892 		last_group = tmpdiv;
4893 #else
4894 		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4895 		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4896 #endif
4897 		if (first_group != last_group)
4898 			return IO_ACCEL_INELIGIBLE;
4899 
4900 		/* Verify request is in a single row of RAID 5/6 */
4901 #if BITS_PER_LONG == 32
4902 		tmpdiv = first_block;
4903 		(void) do_div(tmpdiv, stripesize);
4904 		first_row = r5or6_first_row = r0_first_row = tmpdiv;
4905 		tmpdiv = last_block;
4906 		(void) do_div(tmpdiv, stripesize);
4907 		r5or6_last_row = r0_last_row = tmpdiv;
4908 #else
4909 		first_row = r5or6_first_row = r0_first_row =
4910 						first_block / stripesize;
4911 		r5or6_last_row = r0_last_row = last_block / stripesize;
4912 #endif
4913 		if (r5or6_first_row != r5or6_last_row)
4914 			return IO_ACCEL_INELIGIBLE;
4915 
4916 
4917 		/* Verify request is in a single column */
4918 #if BITS_PER_LONG == 32
4919 		tmpdiv = first_block;
4920 		first_row_offset = do_div(tmpdiv, stripesize);
4921 		tmpdiv = first_row_offset;
4922 		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4923 		r5or6_first_row_offset = first_row_offset;
4924 		tmpdiv = last_block;
4925 		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4926 		tmpdiv = r5or6_last_row_offset;
4927 		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4928 		tmpdiv = r5or6_first_row_offset;
4929 		(void) do_div(tmpdiv, map->strip_size);
4930 		first_column = r5or6_first_column = tmpdiv;
4931 		tmpdiv = r5or6_last_row_offset;
4932 		(void) do_div(tmpdiv, map->strip_size);
4933 		r5or6_last_column = tmpdiv;
4934 #else
4935 		first_row_offset = r5or6_first_row_offset =
4936 			(u32)((first_block % stripesize) %
4937 						r5or6_blocks_per_row);
4938 
4939 		r5or6_last_row_offset =
4940 			(u32)((last_block % stripesize) %
4941 						r5or6_blocks_per_row);
4942 
4943 		first_column = r5or6_first_column =
4944 			r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4945 		r5or6_last_column =
4946 			r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4947 #endif
4948 		if (r5or6_first_column != r5or6_last_column)
4949 			return IO_ACCEL_INELIGIBLE;
4950 
4951 		/* Request is eligible */
4952 		map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4953 			le16_to_cpu(map->row_cnt);
4954 
4955 		map_index = (first_group *
4956 			(le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4957 			(map_row * total_disks_per_row) + first_column;
4958 		break;
4959 	default:
4960 		return IO_ACCEL_INELIGIBLE;
4961 	}
4962 
4963 	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4964 		return IO_ACCEL_INELIGIBLE;
4965 
4966 	c->phys_disk = dev->phys_disk[map_index];
4967 
4968 	disk_handle = dd[map_index].ioaccel_handle;
4969 	disk_block = le64_to_cpu(map->disk_starting_blk) +
4970 			first_row * le16_to_cpu(map->strip_size) +
4971 			(first_row_offset - first_column *
4972 			le16_to_cpu(map->strip_size));
4973 	disk_block_cnt = block_cnt;
4974 
4975 	/* handle differing logical/physical block sizes */
4976 	if (map->phys_blk_shift) {
4977 		disk_block <<= map->phys_blk_shift;
4978 		disk_block_cnt <<= map->phys_blk_shift;
4979 	}
4980 	BUG_ON(disk_block_cnt > 0xffff);
4981 
4982 	/* build the new CDB for the physical disk I/O */
4983 	if (disk_block > 0xffffffff) {
4984 		cdb[0] = is_write ? WRITE_16 : READ_16;
4985 		cdb[1] = 0;
4986 		cdb[2] = (u8) (disk_block >> 56);
4987 		cdb[3] = (u8) (disk_block >> 48);
4988 		cdb[4] = (u8) (disk_block >> 40);
4989 		cdb[5] = (u8) (disk_block >> 32);
4990 		cdb[6] = (u8) (disk_block >> 24);
4991 		cdb[7] = (u8) (disk_block >> 16);
4992 		cdb[8] = (u8) (disk_block >> 8);
4993 		cdb[9] = (u8) (disk_block);
4994 		cdb[10] = (u8) (disk_block_cnt >> 24);
4995 		cdb[11] = (u8) (disk_block_cnt >> 16);
4996 		cdb[12] = (u8) (disk_block_cnt >> 8);
4997 		cdb[13] = (u8) (disk_block_cnt);
4998 		cdb[14] = 0;
4999 		cdb[15] = 0;
5000 		cdb_len = 16;
5001 	} else {
5002 		cdb[0] = is_write ? WRITE_10 : READ_10;
5003 		cdb[1] = 0;
5004 		cdb[2] = (u8) (disk_block >> 24);
5005 		cdb[3] = (u8) (disk_block >> 16);
5006 		cdb[4] = (u8) (disk_block >> 8);
5007 		cdb[5] = (u8) (disk_block);
5008 		cdb[6] = 0;
5009 		cdb[7] = (u8) (disk_block_cnt >> 8);
5010 		cdb[8] = (u8) (disk_block_cnt);
5011 		cdb[9] = 0;
5012 		cdb_len = 10;
5013 	}
5014 	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5015 						dev->scsi3addr,
5016 						dev->phys_disk[map_index]);
5017 }
5018 
5019 /*
5020  * Submit commands down the "normal" RAID stack path
5021  * All callers to hpsa_ciss_submit must check lockup_detected
5022  * beforehand, before (opt.) and after calling cmd_alloc
5023  */
hpsa_ciss_submit(struct ctlr_info * h,struct CommandList * c,struct scsi_cmnd * cmd,unsigned char scsi3addr[])5024 static int hpsa_ciss_submit(struct ctlr_info *h,
5025 	struct CommandList *c, struct scsi_cmnd *cmd,
5026 	unsigned char scsi3addr[])
5027 {
5028 	cmd->host_scribble = (unsigned char *) c;
5029 	c->cmd_type = CMD_SCSI;
5030 	c->scsi_cmd = cmd;
5031 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
5032 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5033 	c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5034 
5035 	/* Fill in the request block... */
5036 
5037 	c->Request.Timeout = 0;
5038 	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5039 	c->Request.CDBLen = cmd->cmd_len;
5040 	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5041 	switch (cmd->sc_data_direction) {
5042 	case DMA_TO_DEVICE:
5043 		c->Request.type_attr_dir =
5044 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5045 		break;
5046 	case DMA_FROM_DEVICE:
5047 		c->Request.type_attr_dir =
5048 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5049 		break;
5050 	case DMA_NONE:
5051 		c->Request.type_attr_dir =
5052 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5053 		break;
5054 	case DMA_BIDIRECTIONAL:
5055 		/* This can happen if a buggy application does a scsi passthru
5056 		 * and sets both inlen and outlen to non-zero. ( see
5057 		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5058 		 */
5059 
5060 		c->Request.type_attr_dir =
5061 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5062 		/* This is technically wrong, and hpsa controllers should
5063 		 * reject it with CMD_INVALID, which is the most correct
5064 		 * response, but non-fibre backends appear to let it
5065 		 * slide by, and give the same results as if this field
5066 		 * were set correctly.  Either way is acceptable for
5067 		 * our purposes here.
5068 		 */
5069 
5070 		break;
5071 
5072 	default:
5073 		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5074 			cmd->sc_data_direction);
5075 		BUG();
5076 		break;
5077 	}
5078 
5079 	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5080 		hpsa_cmd_resolve_and_free(h, c);
5081 		return SCSI_MLQUEUE_HOST_BUSY;
5082 	}
5083 	enqueue_cmd_and_start_io(h, c);
5084 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
5085 	return 0;
5086 }
5087 
hpsa_cmd_init(struct ctlr_info * h,int index,struct CommandList * c)5088 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5089 				struct CommandList *c)
5090 {
5091 	dma_addr_t cmd_dma_handle, err_dma_handle;
5092 
5093 	/* Zero out all of commandlist except the last field, refcount */
5094 	memset(c, 0, offsetof(struct CommandList, refcount));
5095 	c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5096 	cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5097 	c->err_info = h->errinfo_pool + index;
5098 	memset(c->err_info, 0, sizeof(*c->err_info));
5099 	err_dma_handle = h->errinfo_pool_dhandle
5100 	    + index * sizeof(*c->err_info);
5101 	c->cmdindex = index;
5102 	c->busaddr = (u32) cmd_dma_handle;
5103 	c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5104 	c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5105 	c->h = h;
5106 	c->scsi_cmd = SCSI_CMD_IDLE;
5107 }
5108 
hpsa_preinitialize_commands(struct ctlr_info * h)5109 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5110 {
5111 	int i;
5112 
5113 	for (i = 0; i < h->nr_cmds; i++) {
5114 		struct CommandList *c = h->cmd_pool + i;
5115 
5116 		hpsa_cmd_init(h, i, c);
5117 		atomic_set(&c->refcount, 0);
5118 	}
5119 }
5120 
hpsa_cmd_partial_init(struct ctlr_info * h,int index,struct CommandList * c)5121 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5122 				struct CommandList *c)
5123 {
5124 	dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5125 
5126 	BUG_ON(c->cmdindex != index);
5127 
5128 	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5129 	memset(c->err_info, 0, sizeof(*c->err_info));
5130 	c->busaddr = (u32) cmd_dma_handle;
5131 }
5132 
hpsa_ioaccel_submit(struct ctlr_info * h,struct CommandList * c,struct scsi_cmnd * cmd,unsigned char * scsi3addr)5133 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5134 		struct CommandList *c, struct scsi_cmnd *cmd,
5135 		unsigned char *scsi3addr)
5136 {
5137 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5138 	int rc = IO_ACCEL_INELIGIBLE;
5139 
5140 	cmd->host_scribble = (unsigned char *) c;
5141 
5142 	if (dev->offload_enabled) {
5143 		hpsa_cmd_init(h, c->cmdindex, c);
5144 		c->cmd_type = CMD_SCSI;
5145 		c->scsi_cmd = cmd;
5146 		rc = hpsa_scsi_ioaccel_raid_map(h, c);
5147 		if (rc < 0)     /* scsi_dma_map failed. */
5148 			rc = SCSI_MLQUEUE_HOST_BUSY;
5149 	} else if (dev->hba_ioaccel_enabled) {
5150 		hpsa_cmd_init(h, c->cmdindex, c);
5151 		c->cmd_type = CMD_SCSI;
5152 		c->scsi_cmd = cmd;
5153 		rc = hpsa_scsi_ioaccel_direct_map(h, c);
5154 		if (rc < 0)     /* scsi_dma_map failed. */
5155 			rc = SCSI_MLQUEUE_HOST_BUSY;
5156 	}
5157 	return rc;
5158 }
5159 
hpsa_command_resubmit_worker(struct work_struct * work)5160 static void hpsa_command_resubmit_worker(struct work_struct *work)
5161 {
5162 	struct scsi_cmnd *cmd;
5163 	struct hpsa_scsi_dev_t *dev;
5164 	struct CommandList *c = container_of(work, struct CommandList, work);
5165 
5166 	cmd = c->scsi_cmd;
5167 	dev = cmd->device->hostdata;
5168 	if (!dev) {
5169 		cmd->result = DID_NO_CONNECT << 16;
5170 		return hpsa_cmd_free_and_done(c->h, c, cmd);
5171 	}
5172 	if (c->reset_pending)
5173 		return hpsa_cmd_resolve_and_free(c->h, c);
5174 	if (c->abort_pending)
5175 		return hpsa_cmd_abort_and_free(c->h, c, cmd);
5176 	if (c->cmd_type == CMD_IOACCEL2) {
5177 		struct ctlr_info *h = c->h;
5178 		struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5179 		int rc;
5180 
5181 		if (c2->error_data.serv_response ==
5182 				IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5183 			rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5184 			if (rc == 0)
5185 				return;
5186 			if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5187 				/*
5188 				 * If we get here, it means dma mapping failed.
5189 				 * Try again via scsi mid layer, which will
5190 				 * then get SCSI_MLQUEUE_HOST_BUSY.
5191 				 */
5192 				cmd->result = DID_IMM_RETRY << 16;
5193 				return hpsa_cmd_free_and_done(h, c, cmd);
5194 			}
5195 			/* else, fall thru and resubmit down CISS path */
5196 		}
5197 	}
5198 	hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5199 	if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5200 		/*
5201 		 * If we get here, it means dma mapping failed. Try
5202 		 * again via scsi mid layer, which will then get
5203 		 * SCSI_MLQUEUE_HOST_BUSY.
5204 		 *
5205 		 * hpsa_ciss_submit will have already freed c
5206 		 * if it encountered a dma mapping failure.
5207 		 */
5208 		cmd->result = DID_IMM_RETRY << 16;
5209 		cmd->scsi_done(cmd);
5210 	}
5211 }
5212 
5213 /* Running in struct Scsi_Host->host_lock less mode */
hpsa_scsi_queue_command(struct Scsi_Host * sh,struct scsi_cmnd * cmd)5214 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5215 {
5216 	struct ctlr_info *h;
5217 	struct hpsa_scsi_dev_t *dev;
5218 	unsigned char scsi3addr[8];
5219 	struct CommandList *c;
5220 	int rc = 0;
5221 
5222 	/* Get the ptr to our adapter structure out of cmd->host. */
5223 	h = sdev_to_hba(cmd->device);
5224 
5225 	BUG_ON(cmd->request->tag < 0);
5226 
5227 	dev = cmd->device->hostdata;
5228 	if (!dev) {
5229 		cmd->result = DID_NO_CONNECT << 16;
5230 		cmd->scsi_done(cmd);
5231 		return 0;
5232 	}
5233 
5234 	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5235 
5236 	if (unlikely(lockup_detected(h))) {
5237 		cmd->result = DID_NO_CONNECT << 16;
5238 		cmd->scsi_done(cmd);
5239 		return 0;
5240 	}
5241 	c = cmd_tagged_alloc(h, cmd);
5242 
5243 	/*
5244 	 * This is necessary because the SML doesn't zero out this field during
5245 	 * error recovery.
5246 	 */
5247 	cmd->result = 0;
5248 
5249 	/*
5250 	 * Call alternate submit routine for I/O accelerated commands.
5251 	 * Retries always go down the normal I/O path.
5252 	 */
5253 	if (likely(cmd->retries == 0 &&
5254 		cmd->request->cmd_type == REQ_TYPE_FS &&
5255 		h->acciopath_status)) {
5256 		rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5257 		if (rc == 0)
5258 			return 0;
5259 		if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5260 			hpsa_cmd_resolve_and_free(h, c);
5261 			return SCSI_MLQUEUE_HOST_BUSY;
5262 		}
5263 	}
5264 	return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5265 }
5266 
hpsa_scan_complete(struct ctlr_info * h)5267 static void hpsa_scan_complete(struct ctlr_info *h)
5268 {
5269 	unsigned long flags;
5270 
5271 	spin_lock_irqsave(&h->scan_lock, flags);
5272 	h->scan_finished = 1;
5273 	wake_up(&h->scan_wait_queue);
5274 	spin_unlock_irqrestore(&h->scan_lock, flags);
5275 }
5276 
hpsa_scan_start(struct Scsi_Host * sh)5277 static void hpsa_scan_start(struct Scsi_Host *sh)
5278 {
5279 	struct ctlr_info *h = shost_to_hba(sh);
5280 	unsigned long flags;
5281 
5282 	/*
5283 	 * Don't let rescans be initiated on a controller known to be locked
5284 	 * up.  If the controller locks up *during* a rescan, that thread is
5285 	 * probably hosed, but at least we can prevent new rescan threads from
5286 	 * piling up on a locked up controller.
5287 	 */
5288 	if (unlikely(lockup_detected(h)))
5289 		return hpsa_scan_complete(h);
5290 
5291 	/*
5292 	 * If a scan is already waiting to run, no need to add another
5293 	 */
5294 	spin_lock_irqsave(&h->scan_lock, flags);
5295 	if (h->scan_waiting) {
5296 		spin_unlock_irqrestore(&h->scan_lock, flags);
5297 		return;
5298 	}
5299 
5300 	spin_unlock_irqrestore(&h->scan_lock, flags);
5301 
5302 	/* wait until any scan already in progress is finished. */
5303 	while (1) {
5304 		spin_lock_irqsave(&h->scan_lock, flags);
5305 		if (h->scan_finished)
5306 			break;
5307 		h->scan_waiting = 1;
5308 		spin_unlock_irqrestore(&h->scan_lock, flags);
5309 		wait_event(h->scan_wait_queue, h->scan_finished);
5310 		/* Note: We don't need to worry about a race between this
5311 		 * thread and driver unload because the midlayer will
5312 		 * have incremented the reference count, so unload won't
5313 		 * happen if we're in here.
5314 		 */
5315 	}
5316 	h->scan_finished = 0; /* mark scan as in progress */
5317 	h->scan_waiting = 0;
5318 	spin_unlock_irqrestore(&h->scan_lock, flags);
5319 
5320 	if (unlikely(lockup_detected(h)))
5321 		return hpsa_scan_complete(h);
5322 
5323 	hpsa_update_scsi_devices(h);
5324 
5325 	hpsa_scan_complete(h);
5326 }
5327 
hpsa_change_queue_depth(struct scsi_device * sdev,int qdepth)5328 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5329 {
5330 	struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5331 
5332 	if (!logical_drive)
5333 		return -ENODEV;
5334 
5335 	if (qdepth < 1)
5336 		qdepth = 1;
5337 	else if (qdepth > logical_drive->queue_depth)
5338 		qdepth = logical_drive->queue_depth;
5339 
5340 	return scsi_change_queue_depth(sdev, qdepth);
5341 }
5342 
hpsa_scan_finished(struct Scsi_Host * sh,unsigned long elapsed_time)5343 static int hpsa_scan_finished(struct Scsi_Host *sh,
5344 	unsigned long elapsed_time)
5345 {
5346 	struct ctlr_info *h = shost_to_hba(sh);
5347 	unsigned long flags;
5348 	int finished;
5349 
5350 	spin_lock_irqsave(&h->scan_lock, flags);
5351 	finished = h->scan_finished;
5352 	spin_unlock_irqrestore(&h->scan_lock, flags);
5353 	return finished;
5354 }
5355 
hpsa_scsi_host_alloc(struct ctlr_info * h)5356 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5357 {
5358 	struct Scsi_Host *sh;
5359 
5360 	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5361 	if (sh == NULL) {
5362 		dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5363 		return -ENOMEM;
5364 	}
5365 
5366 	sh->io_port = 0;
5367 	sh->n_io_port = 0;
5368 	sh->this_id = -1;
5369 	sh->max_channel = 3;
5370 	sh->max_cmd_len = MAX_COMMAND_SIZE;
5371 	sh->max_lun = HPSA_MAX_LUN;
5372 	sh->max_id = HPSA_MAX_LUN;
5373 	sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5374 	sh->cmd_per_lun = sh->can_queue;
5375 	sh->sg_tablesize = h->maxsgentries;
5376 	sh->transportt = hpsa_sas_transport_template;
5377 	sh->hostdata[0] = (unsigned long) h;
5378 	sh->irq = h->intr[h->intr_mode];
5379 	sh->unique_id = sh->irq;
5380 
5381 	h->scsi_host = sh;
5382 	return 0;
5383 }
5384 
hpsa_scsi_add_host(struct ctlr_info * h)5385 static int hpsa_scsi_add_host(struct ctlr_info *h)
5386 {
5387 	int rv;
5388 
5389 	rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5390 	if (rv) {
5391 		dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5392 		return rv;
5393 	}
5394 	scsi_scan_host(h->scsi_host);
5395 	return 0;
5396 }
5397 
5398 /*
5399  * The block layer has already gone to the trouble of picking out a unique,
5400  * small-integer tag for this request.  We use an offset from that value as
5401  * an index to select our command block.  (The offset allows us to reserve the
5402  * low-numbered entries for our own uses.)
5403  */
hpsa_get_cmd_index(struct scsi_cmnd * scmd)5404 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5405 {
5406 	int idx = scmd->request->tag;
5407 
5408 	if (idx < 0)
5409 		return idx;
5410 
5411 	/* Offset to leave space for internal cmds. */
5412 	return idx += HPSA_NRESERVED_CMDS;
5413 }
5414 
5415 /*
5416  * Send a TEST_UNIT_READY command to the specified LUN using the specified
5417  * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5418  */
hpsa_send_test_unit_ready(struct ctlr_info * h,struct CommandList * c,unsigned char lunaddr[],int reply_queue)5419 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5420 				struct CommandList *c, unsigned char lunaddr[],
5421 				int reply_queue)
5422 {
5423 	int rc;
5424 
5425 	/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5426 	(void) fill_cmd(c, TEST_UNIT_READY, h,
5427 			NULL, 0, 0, lunaddr, TYPE_CMD);
5428 	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5429 	if (rc)
5430 		return rc;
5431 	/* no unmap needed here because no data xfer. */
5432 
5433 	/* Check if the unit is already ready. */
5434 	if (c->err_info->CommandStatus == CMD_SUCCESS)
5435 		return 0;
5436 
5437 	/*
5438 	 * The first command sent after reset will receive "unit attention" to
5439 	 * indicate that the LUN has been reset...this is actually what we're
5440 	 * looking for (but, success is good too).
5441 	 */
5442 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5443 		c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5444 			(c->err_info->SenseInfo[2] == NO_SENSE ||
5445 			 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5446 		return 0;
5447 
5448 	return 1;
5449 }
5450 
5451 /*
5452  * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5453  * returns zero when the unit is ready, and non-zero when giving up.
5454  */
hpsa_wait_for_test_unit_ready(struct ctlr_info * h,struct CommandList * c,unsigned char lunaddr[],int reply_queue)5455 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5456 				struct CommandList *c,
5457 				unsigned char lunaddr[], int reply_queue)
5458 {
5459 	int rc;
5460 	int count = 0;
5461 	int waittime = 1; /* seconds */
5462 
5463 	/* Send test unit ready until device ready, or give up. */
5464 	for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5465 
5466 		/*
5467 		 * Wait for a bit.  do this first, because if we send
5468 		 * the TUR right away, the reset will just abort it.
5469 		 */
5470 		msleep(1000 * waittime);
5471 
5472 		rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5473 		if (!rc)
5474 			break;
5475 
5476 		/* Increase wait time with each try, up to a point. */
5477 		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5478 			waittime *= 2;
5479 
5480 		dev_warn(&h->pdev->dev,
5481 			 "waiting %d secs for device to become ready.\n",
5482 			 waittime);
5483 	}
5484 
5485 	return rc;
5486 }
5487 
wait_for_device_to_become_ready(struct ctlr_info * h,unsigned char lunaddr[],int reply_queue)5488 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5489 					   unsigned char lunaddr[],
5490 					   int reply_queue)
5491 {
5492 	int first_queue;
5493 	int last_queue;
5494 	int rq;
5495 	int rc = 0;
5496 	struct CommandList *c;
5497 
5498 	c = cmd_alloc(h);
5499 
5500 	/*
5501 	 * If no specific reply queue was requested, then send the TUR
5502 	 * repeatedly, requesting a reply on each reply queue; otherwise execute
5503 	 * the loop exactly once using only the specified queue.
5504 	 */
5505 	if (reply_queue == DEFAULT_REPLY_QUEUE) {
5506 		first_queue = 0;
5507 		last_queue = h->nreply_queues - 1;
5508 	} else {
5509 		first_queue = reply_queue;
5510 		last_queue = reply_queue;
5511 	}
5512 
5513 	for (rq = first_queue; rq <= last_queue; rq++) {
5514 		rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5515 		if (rc)
5516 			break;
5517 	}
5518 
5519 	if (rc)
5520 		dev_warn(&h->pdev->dev, "giving up on device.\n");
5521 	else
5522 		dev_warn(&h->pdev->dev, "device is ready.\n");
5523 
5524 	cmd_free(h, c);
5525 	return rc;
5526 }
5527 
5528 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5529  * complaining.  Doing a host- or bus-reset can't do anything good here.
5530  */
hpsa_eh_device_reset_handler(struct scsi_cmnd * scsicmd)5531 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5532 {
5533 	int rc;
5534 	struct ctlr_info *h;
5535 	struct hpsa_scsi_dev_t *dev;
5536 	u8 reset_type;
5537 	char msg[48];
5538 
5539 	/* find the controller to which the command to be aborted was sent */
5540 	h = sdev_to_hba(scsicmd->device);
5541 	if (h == NULL) /* paranoia */
5542 		return FAILED;
5543 
5544 	if (lockup_detected(h))
5545 		return FAILED;
5546 
5547 	dev = scsicmd->device->hostdata;
5548 	if (!dev) {
5549 		dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5550 		return FAILED;
5551 	}
5552 
5553 	/* if controller locked up, we can guarantee command won't complete */
5554 	if (lockup_detected(h)) {
5555 		snprintf(msg, sizeof(msg),
5556 			 "cmd %d RESET FAILED, lockup detected",
5557 			 hpsa_get_cmd_index(scsicmd));
5558 		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5559 		return FAILED;
5560 	}
5561 
5562 	/* this reset request might be the result of a lockup; check */
5563 	if (detect_controller_lockup(h)) {
5564 		snprintf(msg, sizeof(msg),
5565 			 "cmd %d RESET FAILED, new lockup detected",
5566 			 hpsa_get_cmd_index(scsicmd));
5567 		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5568 		return FAILED;
5569 	}
5570 
5571 	/* Do not attempt on controller */
5572 	if (is_hba_lunid(dev->scsi3addr))
5573 		return SUCCESS;
5574 
5575 	if (is_logical_dev_addr_mode(dev->scsi3addr))
5576 		reset_type = HPSA_DEVICE_RESET_MSG;
5577 	else
5578 		reset_type = HPSA_PHYS_TARGET_RESET;
5579 
5580 	sprintf(msg, "resetting %s",
5581 		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5582 	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5583 
5584 	h->reset_in_progress = 1;
5585 
5586 	/* send a reset to the SCSI LUN which the command was sent to */
5587 	rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5588 			   DEFAULT_REPLY_QUEUE);
5589 	sprintf(msg, "reset %s %s",
5590 		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5591 		rc == 0 ? "completed successfully" : "failed");
5592 	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5593 	h->reset_in_progress = 0;
5594 	return rc == 0 ? SUCCESS : FAILED;
5595 }
5596 
swizzle_abort_tag(u8 * tag)5597 static void swizzle_abort_tag(u8 *tag)
5598 {
5599 	u8 original_tag[8];
5600 
5601 	memcpy(original_tag, tag, 8);
5602 	tag[0] = original_tag[3];
5603 	tag[1] = original_tag[2];
5604 	tag[2] = original_tag[1];
5605 	tag[3] = original_tag[0];
5606 	tag[4] = original_tag[7];
5607 	tag[5] = original_tag[6];
5608 	tag[6] = original_tag[5];
5609 	tag[7] = original_tag[4];
5610 }
5611 
hpsa_get_tag(struct ctlr_info * h,struct CommandList * c,__le32 * taglower,__le32 * tagupper)5612 static void hpsa_get_tag(struct ctlr_info *h,
5613 	struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5614 {
5615 	u64 tag;
5616 	if (c->cmd_type == CMD_IOACCEL1) {
5617 		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5618 			&h->ioaccel_cmd_pool[c->cmdindex];
5619 		tag = le64_to_cpu(cm1->tag);
5620 		*tagupper = cpu_to_le32(tag >> 32);
5621 		*taglower = cpu_to_le32(tag);
5622 		return;
5623 	}
5624 	if (c->cmd_type == CMD_IOACCEL2) {
5625 		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5626 			&h->ioaccel2_cmd_pool[c->cmdindex];
5627 		/* upper tag not used in ioaccel2 mode */
5628 		memset(tagupper, 0, sizeof(*tagupper));
5629 		*taglower = cm2->Tag;
5630 		return;
5631 	}
5632 	tag = le64_to_cpu(c->Header.tag);
5633 	*tagupper = cpu_to_le32(tag >> 32);
5634 	*taglower = cpu_to_le32(tag);
5635 }
5636 
hpsa_send_abort(struct ctlr_info * h,unsigned char * scsi3addr,struct CommandList * abort,int reply_queue)5637 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5638 	struct CommandList *abort, int reply_queue)
5639 {
5640 	int rc = IO_OK;
5641 	struct CommandList *c;
5642 	struct ErrorInfo *ei;
5643 	__le32 tagupper, taglower;
5644 
5645 	c = cmd_alloc(h);
5646 
5647 	/* fill_cmd can't fail here, no buffer to map */
5648 	(void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5649 		0, 0, scsi3addr, TYPE_MSG);
5650 	if (h->needs_abort_tags_swizzled)
5651 		swizzle_abort_tag(&c->Request.CDB[4]);
5652 	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5653 	hpsa_get_tag(h, abort, &taglower, &tagupper);
5654 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5655 		__func__, tagupper, taglower);
5656 	/* no unmap needed here because no data xfer. */
5657 
5658 	ei = c->err_info;
5659 	switch (ei->CommandStatus) {
5660 	case CMD_SUCCESS:
5661 		break;
5662 	case CMD_TMF_STATUS:
5663 		rc = hpsa_evaluate_tmf_status(h, c);
5664 		break;
5665 	case CMD_UNABORTABLE: /* Very common, don't make noise. */
5666 		rc = -1;
5667 		break;
5668 	default:
5669 		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5670 			__func__, tagupper, taglower);
5671 		hpsa_scsi_interpret_error(h, c);
5672 		rc = -1;
5673 		break;
5674 	}
5675 	cmd_free(h, c);
5676 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5677 		__func__, tagupper, taglower);
5678 	return rc;
5679 }
5680 
setup_ioaccel2_abort_cmd(struct CommandList * c,struct ctlr_info * h,struct CommandList * command_to_abort,int reply_queue)5681 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5682 	struct CommandList *command_to_abort, int reply_queue)
5683 {
5684 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5685 	struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5686 	struct io_accel2_cmd *c2a =
5687 		&h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5688 	struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5689 	struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5690 
5691 	/*
5692 	 * We're overlaying struct hpsa_tmf_struct on top of something which
5693 	 * was allocated as a struct io_accel2_cmd, so we better be sure it
5694 	 * actually fits, and doesn't overrun the error info space.
5695 	 */
5696 	BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5697 			sizeof(struct io_accel2_cmd));
5698 	BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5699 			offsetof(struct hpsa_tmf_struct, error_len) +
5700 				sizeof(ac->error_len));
5701 
5702 	c->cmd_type = IOACCEL2_TMF;
5703 	c->scsi_cmd = SCSI_CMD_BUSY;
5704 
5705 	/* Adjust the DMA address to point to the accelerated command buffer */
5706 	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5707 				(c->cmdindex * sizeof(struct io_accel2_cmd));
5708 	BUG_ON(c->busaddr & 0x0000007F);
5709 
5710 	memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5711 	ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5712 	ac->reply_queue = reply_queue;
5713 	ac->tmf = IOACCEL2_TMF_ABORT;
5714 	ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5715 	memset(ac->lun_id, 0, sizeof(ac->lun_id));
5716 	ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5717 	ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5718 	ac->error_ptr = cpu_to_le64(c->busaddr +
5719 			offsetof(struct io_accel2_cmd, error_data));
5720 	ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5721 }
5722 
5723 /* ioaccel2 path firmware cannot handle abort task requests.
5724  * Change abort requests to physical target reset, and send to the
5725  * address of the physical disk used for the ioaccel 2 command.
5726  * Return 0 on success (IO_OK)
5727  *	 -1 on failure
5728  */
5729 
hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info * h,unsigned char * scsi3addr,struct CommandList * abort,int reply_queue)5730 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5731 	unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5732 {
5733 	int rc = IO_OK;
5734 	struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5735 	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5736 	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5737 	unsigned char *psa = &phys_scsi3addr[0];
5738 
5739 	/* Get a pointer to the hpsa logical device. */
5740 	scmd = abort->scsi_cmd;
5741 	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5742 	if (dev == NULL) {
5743 		dev_warn(&h->pdev->dev,
5744 			"Cannot abort: no device pointer for command.\n");
5745 			return -1; /* not abortable */
5746 	}
5747 
5748 	if (h->raid_offload_debug > 0)
5749 		dev_info(&h->pdev->dev,
5750 			"scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5751 			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5752 			"Reset as abort",
5753 			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5754 			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5755 
5756 	if (!dev->offload_enabled) {
5757 		dev_warn(&h->pdev->dev,
5758 			"Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5759 		return -1; /* not abortable */
5760 	}
5761 
5762 	/* Incoming scsi3addr is logical addr. We need physical disk addr. */
5763 	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5764 		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5765 		return -1; /* not abortable */
5766 	}
5767 
5768 	/* send the reset */
5769 	if (h->raid_offload_debug > 0)
5770 		dev_info(&h->pdev->dev,
5771 			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5772 			psa[0], psa[1], psa[2], psa[3],
5773 			psa[4], psa[5], psa[6], psa[7]);
5774 	rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5775 	if (rc != 0) {
5776 		dev_warn(&h->pdev->dev,
5777 			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5778 			psa[0], psa[1], psa[2], psa[3],
5779 			psa[4], psa[5], psa[6], psa[7]);
5780 		return rc; /* failed to reset */
5781 	}
5782 
5783 	/* wait for device to recover */
5784 	if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5785 		dev_warn(&h->pdev->dev,
5786 			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5787 			psa[0], psa[1], psa[2], psa[3],
5788 			psa[4], psa[5], psa[6], psa[7]);
5789 		return -1;  /* failed to recover */
5790 	}
5791 
5792 	/* device recovered */
5793 	dev_info(&h->pdev->dev,
5794 		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5795 		psa[0], psa[1], psa[2], psa[3],
5796 		psa[4], psa[5], psa[6], psa[7]);
5797 
5798 	return rc; /* success */
5799 }
5800 
hpsa_send_abort_ioaccel2(struct ctlr_info * h,struct CommandList * abort,int reply_queue)5801 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5802 	struct CommandList *abort, int reply_queue)
5803 {
5804 	int rc = IO_OK;
5805 	struct CommandList *c;
5806 	__le32 taglower, tagupper;
5807 	struct hpsa_scsi_dev_t *dev;
5808 	struct io_accel2_cmd *c2;
5809 
5810 	dev = abort->scsi_cmd->device->hostdata;
5811 	if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5812 		return -1;
5813 
5814 	c = cmd_alloc(h);
5815 	setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5816 	c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5817 	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5818 	hpsa_get_tag(h, abort, &taglower, &tagupper);
5819 	dev_dbg(&h->pdev->dev,
5820 		"%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5821 		__func__, tagupper, taglower);
5822 	/* no unmap needed here because no data xfer. */
5823 
5824 	dev_dbg(&h->pdev->dev,
5825 		"%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5826 		__func__, tagupper, taglower, c2->error_data.serv_response);
5827 	switch (c2->error_data.serv_response) {
5828 	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5829 	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5830 		rc = 0;
5831 		break;
5832 	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5833 	case IOACCEL2_SERV_RESPONSE_FAILURE:
5834 	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5835 		rc = -1;
5836 		break;
5837 	default:
5838 		dev_warn(&h->pdev->dev,
5839 			"%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5840 			__func__, tagupper, taglower,
5841 			c2->error_data.serv_response);
5842 		rc = -1;
5843 	}
5844 	cmd_free(h, c);
5845 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5846 		tagupper, taglower);
5847 	return rc;
5848 }
5849 
hpsa_send_abort_both_ways(struct ctlr_info * h,unsigned char * scsi3addr,struct CommandList * abort,int reply_queue)5850 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5851 	unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5852 {
5853 	/*
5854 	 * ioccelerator mode 2 commands should be aborted via the
5855 	 * accelerated path, since RAID path is unaware of these commands,
5856 	 * but not all underlying firmware can handle abort TMF.
5857 	 * Change abort to physical device reset when abort TMF is unsupported.
5858 	 */
5859 	if (abort->cmd_type == CMD_IOACCEL2) {
5860 		if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5861 			return hpsa_send_abort_ioaccel2(h, abort,
5862 						reply_queue);
5863 		else
5864 			return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5865 							abort, reply_queue);
5866 	}
5867 	return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5868 }
5869 
5870 /* Find out which reply queue a command was meant to return on */
hpsa_extract_reply_queue(struct ctlr_info * h,struct CommandList * c)5871 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5872 					struct CommandList *c)
5873 {
5874 	if (c->cmd_type == CMD_IOACCEL2)
5875 		return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5876 	return c->Header.ReplyQueue;
5877 }
5878 
5879 /*
5880  * Limit concurrency of abort commands to prevent
5881  * over-subscription of commands
5882  */
wait_for_available_abort_cmd(struct ctlr_info * h)5883 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5884 {
5885 #define ABORT_CMD_WAIT_MSECS 5000
5886 	return !wait_event_timeout(h->abort_cmd_wait_queue,
5887 			atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5888 			msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5889 }
5890 
5891 /* Send an abort for the specified command.
5892  *	If the device and controller support it,
5893  *		send a task abort request.
5894  */
hpsa_eh_abort_handler(struct scsi_cmnd * sc)5895 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5896 {
5897 
5898 	int rc;
5899 	struct ctlr_info *h;
5900 	struct hpsa_scsi_dev_t *dev;
5901 	struct CommandList *abort; /* pointer to command to be aborted */
5902 	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */
5903 	char msg[256];		/* For debug messaging. */
5904 	int ml = 0;
5905 	__le32 tagupper, taglower;
5906 	int refcount, reply_queue;
5907 
5908 	if (sc == NULL)
5909 		return FAILED;
5910 
5911 	if (sc->device == NULL)
5912 		return FAILED;
5913 
5914 	/* Find the controller of the command to be aborted */
5915 	h = sdev_to_hba(sc->device);
5916 	if (h == NULL)
5917 		return FAILED;
5918 
5919 	/* Find the device of the command to be aborted */
5920 	dev = sc->device->hostdata;
5921 	if (!dev) {
5922 		dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5923 				msg);
5924 		return FAILED;
5925 	}
5926 
5927 	/* If controller locked up, we can guarantee command won't complete */
5928 	if (lockup_detected(h)) {
5929 		hpsa_show_dev_msg(KERN_WARNING, h, dev,
5930 					"ABORT FAILED, lockup detected");
5931 		return FAILED;
5932 	}
5933 
5934 	/* This is a good time to check if controller lockup has occurred */
5935 	if (detect_controller_lockup(h)) {
5936 		hpsa_show_dev_msg(KERN_WARNING, h, dev,
5937 					"ABORT FAILED, new lockup detected");
5938 		return FAILED;
5939 	}
5940 
5941 	/* Check that controller supports some kind of task abort */
5942 	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5943 		!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5944 		return FAILED;
5945 
5946 	memset(msg, 0, sizeof(msg));
5947 	ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5948 		h->scsi_host->host_no, sc->device->channel,
5949 		sc->device->id, sc->device->lun,
5950 		"Aborting command", sc);
5951 
5952 	/* Get SCSI command to be aborted */
5953 	abort = (struct CommandList *) sc->host_scribble;
5954 	if (abort == NULL) {
5955 		/* This can happen if the command already completed. */
5956 		return SUCCESS;
5957 	}
5958 	refcount = atomic_inc_return(&abort->refcount);
5959 	if (refcount == 1) { /* Command is done already. */
5960 		cmd_free(h, abort);
5961 		return SUCCESS;
5962 	}
5963 
5964 	/* Don't bother trying the abort if we know it won't work. */
5965 	if (abort->cmd_type != CMD_IOACCEL2 &&
5966 		abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5967 		cmd_free(h, abort);
5968 		return FAILED;
5969 	}
5970 
5971 	/*
5972 	 * Check that we're aborting the right command.
5973 	 * It's possible the CommandList already completed and got re-used.
5974 	 */
5975 	if (abort->scsi_cmd != sc) {
5976 		cmd_free(h, abort);
5977 		return SUCCESS;
5978 	}
5979 
5980 	abort->abort_pending = true;
5981 	hpsa_get_tag(h, abort, &taglower, &tagupper);
5982 	reply_queue = hpsa_extract_reply_queue(h, abort);
5983 	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5984 	as  = abort->scsi_cmd;
5985 	if (as != NULL)
5986 		ml += sprintf(msg+ml,
5987 			"CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5988 			as->cmd_len, as->cmnd[0], as->cmnd[1],
5989 			as->serial_number);
5990 	dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5991 	hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5992 
5993 	/*
5994 	 * Command is in flight, or possibly already completed
5995 	 * by the firmware (but not to the scsi mid layer) but we can't
5996 	 * distinguish which.  Send the abort down.
5997 	 */
5998 	if (wait_for_available_abort_cmd(h)) {
5999 		dev_warn(&h->pdev->dev,
6000 			"%s FAILED, timeout waiting for an abort command to become available.\n",
6001 			msg);
6002 		cmd_free(h, abort);
6003 		return FAILED;
6004 	}
6005 	rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
6006 	atomic_inc(&h->abort_cmds_available);
6007 	wake_up_all(&h->abort_cmd_wait_queue);
6008 	if (rc != 0) {
6009 		dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
6010 		hpsa_show_dev_msg(KERN_WARNING, h, dev,
6011 				"FAILED to abort command");
6012 		cmd_free(h, abort);
6013 		return FAILED;
6014 	}
6015 	dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
6016 	wait_event(h->event_sync_wait_queue,
6017 		   abort->scsi_cmd != sc || lockup_detected(h));
6018 	cmd_free(h, abort);
6019 	return !lockup_detected(h) ? SUCCESS : FAILED;
6020 }
6021 
6022 /*
6023  * For operations with an associated SCSI command, a command block is allocated
6024  * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6025  * block request tag as an index into a table of entries.  cmd_tagged_free() is
6026  * the complement, although cmd_free() may be called instead.
6027  */
cmd_tagged_alloc(struct ctlr_info * h,struct scsi_cmnd * scmd)6028 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6029 					    struct scsi_cmnd *scmd)
6030 {
6031 	int idx = hpsa_get_cmd_index(scmd);
6032 	struct CommandList *c = h->cmd_pool + idx;
6033 
6034 	if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6035 		dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6036 			idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6037 		/* The index value comes from the block layer, so if it's out of
6038 		 * bounds, it's probably not our bug.
6039 		 */
6040 		BUG();
6041 	}
6042 
6043 	atomic_inc(&c->refcount);
6044 	if (unlikely(!hpsa_is_cmd_idle(c))) {
6045 		/*
6046 		 * We expect that the SCSI layer will hand us a unique tag
6047 		 * value.  Thus, there should never be a collision here between
6048 		 * two requests...because if the selected command isn't idle
6049 		 * then someone is going to be very disappointed.
6050 		 */
6051 		dev_err(&h->pdev->dev,
6052 			"tag collision (tag=%d) in cmd_tagged_alloc().\n",
6053 			idx);
6054 		if (c->scsi_cmd != NULL)
6055 			scsi_print_command(c->scsi_cmd);
6056 		scsi_print_command(scmd);
6057 	}
6058 
6059 	hpsa_cmd_partial_init(h, idx, c);
6060 	return c;
6061 }
6062 
cmd_tagged_free(struct ctlr_info * h,struct CommandList * c)6063 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6064 {
6065 	/*
6066 	 * Release our reference to the block.  We don't need to do anything
6067 	 * else to free it, because it is accessed by index.  (There's no point
6068 	 * in checking the result of the decrement, since we cannot guarantee
6069 	 * that there isn't a concurrent abort which is also accessing it.)
6070 	 */
6071 	(void)atomic_dec(&c->refcount);
6072 }
6073 
6074 /*
6075  * For operations that cannot sleep, a command block is allocated at init,
6076  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6077  * which ones are free or in use.  Lock must be held when calling this.
6078  * cmd_free() is the complement.
6079  * This function never gives up and returns NULL.  If it hangs,
6080  * another thread must call cmd_free() to free some tags.
6081  */
6082 
cmd_alloc(struct ctlr_info * h)6083 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6084 {
6085 	struct CommandList *c;
6086 	int refcount, i;
6087 	int offset = 0;
6088 
6089 	/*
6090 	 * There is some *extremely* small but non-zero chance that that
6091 	 * multiple threads could get in here, and one thread could
6092 	 * be scanning through the list of bits looking for a free
6093 	 * one, but the free ones are always behind him, and other
6094 	 * threads sneak in behind him and eat them before he can
6095 	 * get to them, so that while there is always a free one, a
6096 	 * very unlucky thread might be starved anyway, never able to
6097 	 * beat the other threads.  In reality, this happens so
6098 	 * infrequently as to be indistinguishable from never.
6099 	 *
6100 	 * Note that we start allocating commands before the SCSI host structure
6101 	 * is initialized.  Since the search starts at bit zero, this
6102 	 * all works, since we have at least one command structure available;
6103 	 * however, it means that the structures with the low indexes have to be
6104 	 * reserved for driver-initiated requests, while requests from the block
6105 	 * layer will use the higher indexes.
6106 	 */
6107 
6108 	for (;;) {
6109 		i = find_next_zero_bit(h->cmd_pool_bits,
6110 					HPSA_NRESERVED_CMDS,
6111 					offset);
6112 		if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6113 			offset = 0;
6114 			continue;
6115 		}
6116 		c = h->cmd_pool + i;
6117 		refcount = atomic_inc_return(&c->refcount);
6118 		if (unlikely(refcount > 1)) {
6119 			cmd_free(h, c); /* already in use */
6120 			offset = (i + 1) % HPSA_NRESERVED_CMDS;
6121 			continue;
6122 		}
6123 		set_bit(i & (BITS_PER_LONG - 1),
6124 			h->cmd_pool_bits + (i / BITS_PER_LONG));
6125 		break; /* it's ours now. */
6126 	}
6127 	hpsa_cmd_partial_init(h, i, c);
6128 	return c;
6129 }
6130 
6131 /*
6132  * This is the complementary operation to cmd_alloc().  Note, however, in some
6133  * corner cases it may also be used to free blocks allocated by
6134  * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6135  * the clear-bit is harmless.
6136  */
cmd_free(struct ctlr_info * h,struct CommandList * c)6137 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6138 {
6139 	if (atomic_dec_and_test(&c->refcount)) {
6140 		int i;
6141 
6142 		i = c - h->cmd_pool;
6143 		clear_bit(i & (BITS_PER_LONG - 1),
6144 			  h->cmd_pool_bits + (i / BITS_PER_LONG));
6145 	}
6146 }
6147 
6148 #ifdef CONFIG_COMPAT
6149 
hpsa_ioctl32_passthru(struct scsi_device * dev,int cmd,void __user * arg)6150 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6151 	void __user *arg)
6152 {
6153 	IOCTL32_Command_struct __user *arg32 =
6154 	    (IOCTL32_Command_struct __user *) arg;
6155 	IOCTL_Command_struct arg64;
6156 	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6157 	int err;
6158 	u32 cp;
6159 
6160 	memset(&arg64, 0, sizeof(arg64));
6161 	err = 0;
6162 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6163 			   sizeof(arg64.LUN_info));
6164 	err |= copy_from_user(&arg64.Request, &arg32->Request,
6165 			   sizeof(arg64.Request));
6166 	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6167 			   sizeof(arg64.error_info));
6168 	err |= get_user(arg64.buf_size, &arg32->buf_size);
6169 	err |= get_user(cp, &arg32->buf);
6170 	arg64.buf = compat_ptr(cp);
6171 	err |= copy_to_user(p, &arg64, sizeof(arg64));
6172 
6173 	if (err)
6174 		return -EFAULT;
6175 
6176 	err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6177 	if (err)
6178 		return err;
6179 	err |= copy_in_user(&arg32->error_info, &p->error_info,
6180 			 sizeof(arg32->error_info));
6181 	if (err)
6182 		return -EFAULT;
6183 	return err;
6184 }
6185 
hpsa_ioctl32_big_passthru(struct scsi_device * dev,int cmd,void __user * arg)6186 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6187 	int cmd, void __user *arg)
6188 {
6189 	BIG_IOCTL32_Command_struct __user *arg32 =
6190 	    (BIG_IOCTL32_Command_struct __user *) arg;
6191 	BIG_IOCTL_Command_struct arg64;
6192 	BIG_IOCTL_Command_struct __user *p =
6193 	    compat_alloc_user_space(sizeof(arg64));
6194 	int err;
6195 	u32 cp;
6196 
6197 	memset(&arg64, 0, sizeof(arg64));
6198 	err = 0;
6199 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6200 			   sizeof(arg64.LUN_info));
6201 	err |= copy_from_user(&arg64.Request, &arg32->Request,
6202 			   sizeof(arg64.Request));
6203 	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6204 			   sizeof(arg64.error_info));
6205 	err |= get_user(arg64.buf_size, &arg32->buf_size);
6206 	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6207 	err |= get_user(cp, &arg32->buf);
6208 	arg64.buf = compat_ptr(cp);
6209 	err |= copy_to_user(p, &arg64, sizeof(arg64));
6210 
6211 	if (err)
6212 		return -EFAULT;
6213 
6214 	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6215 	if (err)
6216 		return err;
6217 	err |= copy_in_user(&arg32->error_info, &p->error_info,
6218 			 sizeof(arg32->error_info));
6219 	if (err)
6220 		return -EFAULT;
6221 	return err;
6222 }
6223 
hpsa_compat_ioctl(struct scsi_device * dev,int cmd,void __user * arg)6224 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6225 {
6226 	switch (cmd) {
6227 	case CCISS_GETPCIINFO:
6228 	case CCISS_GETINTINFO:
6229 	case CCISS_SETINTINFO:
6230 	case CCISS_GETNODENAME:
6231 	case CCISS_SETNODENAME:
6232 	case CCISS_GETHEARTBEAT:
6233 	case CCISS_GETBUSTYPES:
6234 	case CCISS_GETFIRMVER:
6235 	case CCISS_GETDRIVVER:
6236 	case CCISS_REVALIDVOLS:
6237 	case CCISS_DEREGDISK:
6238 	case CCISS_REGNEWDISK:
6239 	case CCISS_REGNEWD:
6240 	case CCISS_RESCANDISK:
6241 	case CCISS_GETLUNINFO:
6242 		return hpsa_ioctl(dev, cmd, arg);
6243 
6244 	case CCISS_PASSTHRU32:
6245 		return hpsa_ioctl32_passthru(dev, cmd, arg);
6246 	case CCISS_BIG_PASSTHRU32:
6247 		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6248 
6249 	default:
6250 		return -ENOIOCTLCMD;
6251 	}
6252 }
6253 #endif
6254 
hpsa_getpciinfo_ioctl(struct ctlr_info * h,void __user * argp)6255 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6256 {
6257 	struct hpsa_pci_info pciinfo;
6258 
6259 	if (!argp)
6260 		return -EINVAL;
6261 	pciinfo.domain = pci_domain_nr(h->pdev->bus);
6262 	pciinfo.bus = h->pdev->bus->number;
6263 	pciinfo.dev_fn = h->pdev->devfn;
6264 	pciinfo.board_id = h->board_id;
6265 	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6266 		return -EFAULT;
6267 	return 0;
6268 }
6269 
hpsa_getdrivver_ioctl(struct ctlr_info * h,void __user * argp)6270 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6271 {
6272 	DriverVer_type DriverVer;
6273 	unsigned char vmaj, vmin, vsubmin;
6274 	int rc;
6275 
6276 	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6277 		&vmaj, &vmin, &vsubmin);
6278 	if (rc != 3) {
6279 		dev_info(&h->pdev->dev, "driver version string '%s' "
6280 			"unrecognized.", HPSA_DRIVER_VERSION);
6281 		vmaj = 0;
6282 		vmin = 0;
6283 		vsubmin = 0;
6284 	}
6285 	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6286 	if (!argp)
6287 		return -EINVAL;
6288 	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6289 		return -EFAULT;
6290 	return 0;
6291 }
6292 
hpsa_passthru_ioctl(struct ctlr_info * h,void __user * argp)6293 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6294 {
6295 	IOCTL_Command_struct iocommand;
6296 	struct CommandList *c;
6297 	char *buff = NULL;
6298 	u64 temp64;
6299 	int rc = 0;
6300 
6301 	if (!argp)
6302 		return -EINVAL;
6303 	if (!capable(CAP_SYS_RAWIO))
6304 		return -EPERM;
6305 	if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6306 		return -EFAULT;
6307 	if ((iocommand.buf_size < 1) &&
6308 	    (iocommand.Request.Type.Direction != XFER_NONE)) {
6309 		return -EINVAL;
6310 	}
6311 	if (iocommand.buf_size > 0) {
6312 		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6313 		if (buff == NULL)
6314 			return -ENOMEM;
6315 		if (iocommand.Request.Type.Direction & XFER_WRITE) {
6316 			/* Copy the data into the buffer we created */
6317 			if (copy_from_user(buff, iocommand.buf,
6318 				iocommand.buf_size)) {
6319 				rc = -EFAULT;
6320 				goto out_kfree;
6321 			}
6322 		} else {
6323 			memset(buff, 0, iocommand.buf_size);
6324 		}
6325 	}
6326 	c = cmd_alloc(h);
6327 
6328 	/* Fill in the command type */
6329 	c->cmd_type = CMD_IOCTL_PEND;
6330 	c->scsi_cmd = SCSI_CMD_BUSY;
6331 	/* Fill in Command Header */
6332 	c->Header.ReplyQueue = 0; /* unused in simple mode */
6333 	if (iocommand.buf_size > 0) {	/* buffer to fill */
6334 		c->Header.SGList = 1;
6335 		c->Header.SGTotal = cpu_to_le16(1);
6336 	} else	{ /* no buffers to fill */
6337 		c->Header.SGList = 0;
6338 		c->Header.SGTotal = cpu_to_le16(0);
6339 	}
6340 	memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6341 
6342 	/* Fill in Request block */
6343 	memcpy(&c->Request, &iocommand.Request,
6344 		sizeof(c->Request));
6345 
6346 	/* Fill in the scatter gather information */
6347 	if (iocommand.buf_size > 0) {
6348 		temp64 = pci_map_single(h->pdev, buff,
6349 			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6350 		if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6351 			c->SG[0].Addr = cpu_to_le64(0);
6352 			c->SG[0].Len = cpu_to_le32(0);
6353 			rc = -ENOMEM;
6354 			goto out;
6355 		}
6356 		c->SG[0].Addr = cpu_to_le64(temp64);
6357 		c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6358 		c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6359 	}
6360 	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6361 	if (iocommand.buf_size > 0)
6362 		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6363 	check_ioctl_unit_attention(h, c);
6364 	if (rc) {
6365 		rc = -EIO;
6366 		goto out;
6367 	}
6368 
6369 	/* Copy the error information out */
6370 	memcpy(&iocommand.error_info, c->err_info,
6371 		sizeof(iocommand.error_info));
6372 	if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6373 		rc = -EFAULT;
6374 		goto out;
6375 	}
6376 	if ((iocommand.Request.Type.Direction & XFER_READ) &&
6377 		iocommand.buf_size > 0) {
6378 		/* Copy the data out of the buffer we created */
6379 		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6380 			rc = -EFAULT;
6381 			goto out;
6382 		}
6383 	}
6384 out:
6385 	cmd_free(h, c);
6386 out_kfree:
6387 	kfree(buff);
6388 	return rc;
6389 }
6390 
hpsa_big_passthru_ioctl(struct ctlr_info * h,void __user * argp)6391 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6392 {
6393 	BIG_IOCTL_Command_struct *ioc;
6394 	struct CommandList *c;
6395 	unsigned char **buff = NULL;
6396 	int *buff_size = NULL;
6397 	u64 temp64;
6398 	BYTE sg_used = 0;
6399 	int status = 0;
6400 	u32 left;
6401 	u32 sz;
6402 	BYTE __user *data_ptr;
6403 
6404 	if (!argp)
6405 		return -EINVAL;
6406 	if (!capable(CAP_SYS_RAWIO))
6407 		return -EPERM;
6408 	ioc = (BIG_IOCTL_Command_struct *)
6409 	    kmalloc(sizeof(*ioc), GFP_KERNEL);
6410 	if (!ioc) {
6411 		status = -ENOMEM;
6412 		goto cleanup1;
6413 	}
6414 	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6415 		status = -EFAULT;
6416 		goto cleanup1;
6417 	}
6418 	if ((ioc->buf_size < 1) &&
6419 	    (ioc->Request.Type.Direction != XFER_NONE)) {
6420 		status = -EINVAL;
6421 		goto cleanup1;
6422 	}
6423 	/* Check kmalloc limits  using all SGs */
6424 	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6425 		status = -EINVAL;
6426 		goto cleanup1;
6427 	}
6428 	if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6429 		status = -EINVAL;
6430 		goto cleanup1;
6431 	}
6432 	buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6433 	if (!buff) {
6434 		status = -ENOMEM;
6435 		goto cleanup1;
6436 	}
6437 	buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6438 	if (!buff_size) {
6439 		status = -ENOMEM;
6440 		goto cleanup1;
6441 	}
6442 	left = ioc->buf_size;
6443 	data_ptr = ioc->buf;
6444 	while (left) {
6445 		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6446 		buff_size[sg_used] = sz;
6447 		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6448 		if (buff[sg_used] == NULL) {
6449 			status = -ENOMEM;
6450 			goto cleanup1;
6451 		}
6452 		if (ioc->Request.Type.Direction & XFER_WRITE) {
6453 			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6454 				status = -EFAULT;
6455 				goto cleanup1;
6456 			}
6457 		} else
6458 			memset(buff[sg_used], 0, sz);
6459 		left -= sz;
6460 		data_ptr += sz;
6461 		sg_used++;
6462 	}
6463 	c = cmd_alloc(h);
6464 
6465 	c->cmd_type = CMD_IOCTL_PEND;
6466 	c->scsi_cmd = SCSI_CMD_BUSY;
6467 	c->Header.ReplyQueue = 0;
6468 	c->Header.SGList = (u8) sg_used;
6469 	c->Header.SGTotal = cpu_to_le16(sg_used);
6470 	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6471 	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6472 	if (ioc->buf_size > 0) {
6473 		int i;
6474 		for (i = 0; i < sg_used; i++) {
6475 			temp64 = pci_map_single(h->pdev, buff[i],
6476 				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
6477 			if (dma_mapping_error(&h->pdev->dev,
6478 							(dma_addr_t) temp64)) {
6479 				c->SG[i].Addr = cpu_to_le64(0);
6480 				c->SG[i].Len = cpu_to_le32(0);
6481 				hpsa_pci_unmap(h->pdev, c, i,
6482 					PCI_DMA_BIDIRECTIONAL);
6483 				status = -ENOMEM;
6484 				goto cleanup0;
6485 			}
6486 			c->SG[i].Addr = cpu_to_le64(temp64);
6487 			c->SG[i].Len = cpu_to_le32(buff_size[i]);
6488 			c->SG[i].Ext = cpu_to_le32(0);
6489 		}
6490 		c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6491 	}
6492 	status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6493 	if (sg_used)
6494 		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6495 	check_ioctl_unit_attention(h, c);
6496 	if (status) {
6497 		status = -EIO;
6498 		goto cleanup0;
6499 	}
6500 
6501 	/* Copy the error information out */
6502 	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6503 	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6504 		status = -EFAULT;
6505 		goto cleanup0;
6506 	}
6507 	if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6508 		int i;
6509 
6510 		/* Copy the data out of the buffer we created */
6511 		BYTE __user *ptr = ioc->buf;
6512 		for (i = 0; i < sg_used; i++) {
6513 			if (copy_to_user(ptr, buff[i], buff_size[i])) {
6514 				status = -EFAULT;
6515 				goto cleanup0;
6516 			}
6517 			ptr += buff_size[i];
6518 		}
6519 	}
6520 	status = 0;
6521 cleanup0:
6522 	cmd_free(h, c);
6523 cleanup1:
6524 	if (buff) {
6525 		int i;
6526 
6527 		for (i = 0; i < sg_used; i++)
6528 			kfree(buff[i]);
6529 		kfree(buff);
6530 	}
6531 	kfree(buff_size);
6532 	kfree(ioc);
6533 	return status;
6534 }
6535 
check_ioctl_unit_attention(struct ctlr_info * h,struct CommandList * c)6536 static void check_ioctl_unit_attention(struct ctlr_info *h,
6537 	struct CommandList *c)
6538 {
6539 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6540 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6541 		(void) check_for_unit_attention(h, c);
6542 }
6543 
6544 /*
6545  * ioctl
6546  */
hpsa_ioctl(struct scsi_device * dev,int cmd,void __user * arg)6547 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6548 {
6549 	struct ctlr_info *h;
6550 	void __user *argp = (void __user *)arg;
6551 	int rc;
6552 
6553 	h = sdev_to_hba(dev);
6554 
6555 	switch (cmd) {
6556 	case CCISS_DEREGDISK:
6557 	case CCISS_REGNEWDISK:
6558 	case CCISS_REGNEWD:
6559 		hpsa_scan_start(h->scsi_host);
6560 		return 0;
6561 	case CCISS_GETPCIINFO:
6562 		return hpsa_getpciinfo_ioctl(h, argp);
6563 	case CCISS_GETDRIVVER:
6564 		return hpsa_getdrivver_ioctl(h, argp);
6565 	case CCISS_PASSTHRU:
6566 		if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6567 			return -EAGAIN;
6568 		rc = hpsa_passthru_ioctl(h, argp);
6569 		atomic_inc(&h->passthru_cmds_avail);
6570 		return rc;
6571 	case CCISS_BIG_PASSTHRU:
6572 		if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6573 			return -EAGAIN;
6574 		rc = hpsa_big_passthru_ioctl(h, argp);
6575 		atomic_inc(&h->passthru_cmds_avail);
6576 		return rc;
6577 	default:
6578 		return -ENOTTY;
6579 	}
6580 }
6581 
hpsa_send_host_reset(struct ctlr_info * h,unsigned char * scsi3addr,u8 reset_type)6582 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6583 				u8 reset_type)
6584 {
6585 	struct CommandList *c;
6586 
6587 	c = cmd_alloc(h);
6588 
6589 	/* fill_cmd can't fail here, no data buffer to map */
6590 	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6591 		RAID_CTLR_LUNID, TYPE_MSG);
6592 	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6593 	c->waiting = NULL;
6594 	enqueue_cmd_and_start_io(h, c);
6595 	/* Don't wait for completion, the reset won't complete.  Don't free
6596 	 * the command either.  This is the last command we will send before
6597 	 * re-initializing everything, so it doesn't matter and won't leak.
6598 	 */
6599 	return;
6600 }
6601 
fill_cmd(struct CommandList * c,u8 cmd,struct ctlr_info * h,void * buff,size_t size,u16 page_code,unsigned char * scsi3addr,int cmd_type)6602 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6603 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6604 	int cmd_type)
6605 {
6606 	int pci_dir = XFER_NONE;
6607 	u64 tag; /* for commands to be aborted */
6608 
6609 	c->cmd_type = CMD_IOCTL_PEND;
6610 	c->scsi_cmd = SCSI_CMD_BUSY;
6611 	c->Header.ReplyQueue = 0;
6612 	if (buff != NULL && size > 0) {
6613 		c->Header.SGList = 1;
6614 		c->Header.SGTotal = cpu_to_le16(1);
6615 	} else {
6616 		c->Header.SGList = 0;
6617 		c->Header.SGTotal = cpu_to_le16(0);
6618 	}
6619 	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6620 
6621 	if (cmd_type == TYPE_CMD) {
6622 		switch (cmd) {
6623 		case HPSA_INQUIRY:
6624 			/* are we trying to read a vital product page */
6625 			if (page_code & VPD_PAGE) {
6626 				c->Request.CDB[1] = 0x01;
6627 				c->Request.CDB[2] = (page_code & 0xff);
6628 			}
6629 			c->Request.CDBLen = 6;
6630 			c->Request.type_attr_dir =
6631 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6632 			c->Request.Timeout = 0;
6633 			c->Request.CDB[0] = HPSA_INQUIRY;
6634 			c->Request.CDB[4] = size & 0xFF;
6635 			break;
6636 		case HPSA_REPORT_LOG:
6637 		case HPSA_REPORT_PHYS:
6638 			/* Talking to controller so It's a physical command
6639 			   mode = 00 target = 0.  Nothing to write.
6640 			 */
6641 			c->Request.CDBLen = 12;
6642 			c->Request.type_attr_dir =
6643 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6644 			c->Request.Timeout = 0;
6645 			c->Request.CDB[0] = cmd;
6646 			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6647 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6648 			c->Request.CDB[8] = (size >> 8) & 0xFF;
6649 			c->Request.CDB[9] = size & 0xFF;
6650 			break;
6651 		case BMIC_SENSE_DIAG_OPTIONS:
6652 			c->Request.CDBLen = 16;
6653 			c->Request.type_attr_dir =
6654 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6655 			c->Request.Timeout = 0;
6656 			/* Spec says this should be BMIC_WRITE */
6657 			c->Request.CDB[0] = BMIC_READ;
6658 			c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6659 			break;
6660 		case BMIC_SET_DIAG_OPTIONS:
6661 			c->Request.CDBLen = 16;
6662 			c->Request.type_attr_dir =
6663 					TYPE_ATTR_DIR(cmd_type,
6664 						ATTR_SIMPLE, XFER_WRITE);
6665 			c->Request.Timeout = 0;
6666 			c->Request.CDB[0] = BMIC_WRITE;
6667 			c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6668 			break;
6669 		case HPSA_CACHE_FLUSH:
6670 			c->Request.CDBLen = 12;
6671 			c->Request.type_attr_dir =
6672 					TYPE_ATTR_DIR(cmd_type,
6673 						ATTR_SIMPLE, XFER_WRITE);
6674 			c->Request.Timeout = 0;
6675 			c->Request.CDB[0] = BMIC_WRITE;
6676 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6677 			c->Request.CDB[7] = (size >> 8) & 0xFF;
6678 			c->Request.CDB[8] = size & 0xFF;
6679 			break;
6680 		case TEST_UNIT_READY:
6681 			c->Request.CDBLen = 6;
6682 			c->Request.type_attr_dir =
6683 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6684 			c->Request.Timeout = 0;
6685 			break;
6686 		case HPSA_GET_RAID_MAP:
6687 			c->Request.CDBLen = 12;
6688 			c->Request.type_attr_dir =
6689 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6690 			c->Request.Timeout = 0;
6691 			c->Request.CDB[0] = HPSA_CISS_READ;
6692 			c->Request.CDB[1] = cmd;
6693 			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6694 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6695 			c->Request.CDB[8] = (size >> 8) & 0xFF;
6696 			c->Request.CDB[9] = size & 0xFF;
6697 			break;
6698 		case BMIC_SENSE_CONTROLLER_PARAMETERS:
6699 			c->Request.CDBLen = 10;
6700 			c->Request.type_attr_dir =
6701 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6702 			c->Request.Timeout = 0;
6703 			c->Request.CDB[0] = BMIC_READ;
6704 			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6705 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6706 			c->Request.CDB[8] = (size >> 8) & 0xFF;
6707 			break;
6708 		case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6709 			c->Request.CDBLen = 10;
6710 			c->Request.type_attr_dir =
6711 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6712 			c->Request.Timeout = 0;
6713 			c->Request.CDB[0] = BMIC_READ;
6714 			c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6715 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6716 			c->Request.CDB[8] = (size >> 8) & 0XFF;
6717 			break;
6718 		case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6719 			c->Request.CDBLen = 10;
6720 			c->Request.type_attr_dir =
6721 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6722 			c->Request.Timeout = 0;
6723 			c->Request.CDB[0] = BMIC_READ;
6724 			c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
6725 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6726 			c->Request.CDB[8] = (size >> 8) & 0XFF;
6727 			break;
6728 		case BMIC_IDENTIFY_CONTROLLER:
6729 			c->Request.CDBLen = 10;
6730 			c->Request.type_attr_dir =
6731 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6732 			c->Request.Timeout = 0;
6733 			c->Request.CDB[0] = BMIC_READ;
6734 			c->Request.CDB[1] = 0;
6735 			c->Request.CDB[2] = 0;
6736 			c->Request.CDB[3] = 0;
6737 			c->Request.CDB[4] = 0;
6738 			c->Request.CDB[5] = 0;
6739 			c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
6740 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6741 			c->Request.CDB[8] = (size >> 8) & 0XFF;
6742 			c->Request.CDB[9] = 0;
6743 			break;
6744 		default:
6745 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6746 			BUG();
6747 			return -1;
6748 		}
6749 	} else if (cmd_type == TYPE_MSG) {
6750 		switch (cmd) {
6751 
6752 		case  HPSA_PHYS_TARGET_RESET:
6753 			c->Request.CDBLen = 16;
6754 			c->Request.type_attr_dir =
6755 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6756 			c->Request.Timeout = 0; /* Don't time out */
6757 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6758 			c->Request.CDB[0] = HPSA_RESET;
6759 			c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6760 			/* Physical target reset needs no control bytes 4-7*/
6761 			c->Request.CDB[4] = 0x00;
6762 			c->Request.CDB[5] = 0x00;
6763 			c->Request.CDB[6] = 0x00;
6764 			c->Request.CDB[7] = 0x00;
6765 			break;
6766 		case  HPSA_DEVICE_RESET_MSG:
6767 			c->Request.CDBLen = 16;
6768 			c->Request.type_attr_dir =
6769 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6770 			c->Request.Timeout = 0; /* Don't time out */
6771 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6772 			c->Request.CDB[0] =  cmd;
6773 			c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6774 			/* If bytes 4-7 are zero, it means reset the */
6775 			/* LunID device */
6776 			c->Request.CDB[4] = 0x00;
6777 			c->Request.CDB[5] = 0x00;
6778 			c->Request.CDB[6] = 0x00;
6779 			c->Request.CDB[7] = 0x00;
6780 			break;
6781 		case  HPSA_ABORT_MSG:
6782 			memcpy(&tag, buff, sizeof(tag));
6783 			dev_dbg(&h->pdev->dev,
6784 				"Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6785 				tag, c->Header.tag);
6786 			c->Request.CDBLen = 16;
6787 			c->Request.type_attr_dir =
6788 					TYPE_ATTR_DIR(cmd_type,
6789 						ATTR_SIMPLE, XFER_WRITE);
6790 			c->Request.Timeout = 0; /* Don't time out */
6791 			c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6792 			c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6793 			c->Request.CDB[2] = 0x00; /* reserved */
6794 			c->Request.CDB[3] = 0x00; /* reserved */
6795 			/* Tag to abort goes in CDB[4]-CDB[11] */
6796 			memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6797 			c->Request.CDB[12] = 0x00; /* reserved */
6798 			c->Request.CDB[13] = 0x00; /* reserved */
6799 			c->Request.CDB[14] = 0x00; /* reserved */
6800 			c->Request.CDB[15] = 0x00; /* reserved */
6801 		break;
6802 		default:
6803 			dev_warn(&h->pdev->dev, "unknown message type %d\n",
6804 				cmd);
6805 			BUG();
6806 		}
6807 	} else {
6808 		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6809 		BUG();
6810 	}
6811 
6812 	switch (GET_DIR(c->Request.type_attr_dir)) {
6813 	case XFER_READ:
6814 		pci_dir = PCI_DMA_FROMDEVICE;
6815 		break;
6816 	case XFER_WRITE:
6817 		pci_dir = PCI_DMA_TODEVICE;
6818 		break;
6819 	case XFER_NONE:
6820 		pci_dir = PCI_DMA_NONE;
6821 		break;
6822 	default:
6823 		pci_dir = PCI_DMA_BIDIRECTIONAL;
6824 	}
6825 	if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6826 		return -1;
6827 	return 0;
6828 }
6829 
6830 /*
6831  * Map (physical) PCI mem into (virtual) kernel space
6832  */
remap_pci_mem(ulong base,ulong size)6833 static void __iomem *remap_pci_mem(ulong base, ulong size)
6834 {
6835 	ulong page_base = ((ulong) base) & PAGE_MASK;
6836 	ulong page_offs = ((ulong) base) - page_base;
6837 	void __iomem *page_remapped = ioremap_nocache(page_base,
6838 		page_offs + size);
6839 
6840 	return page_remapped ? (page_remapped + page_offs) : NULL;
6841 }
6842 
get_next_completion(struct ctlr_info * h,u8 q)6843 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6844 {
6845 	return h->access.command_completed(h, q);
6846 }
6847 
interrupt_pending(struct ctlr_info * h)6848 static inline bool interrupt_pending(struct ctlr_info *h)
6849 {
6850 	return h->access.intr_pending(h);
6851 }
6852 
interrupt_not_for_us(struct ctlr_info * h)6853 static inline long interrupt_not_for_us(struct ctlr_info *h)
6854 {
6855 	return (h->access.intr_pending(h) == 0) ||
6856 		(h->interrupts_enabled == 0);
6857 }
6858 
bad_tag(struct ctlr_info * h,u32 tag_index,u32 raw_tag)6859 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6860 	u32 raw_tag)
6861 {
6862 	if (unlikely(tag_index >= h->nr_cmds)) {
6863 		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6864 		return 1;
6865 	}
6866 	return 0;
6867 }
6868 
finish_cmd(struct CommandList * c)6869 static inline void finish_cmd(struct CommandList *c)
6870 {
6871 	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6872 	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6873 			|| c->cmd_type == CMD_IOACCEL2))
6874 		complete_scsi_command(c);
6875 	else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6876 		complete(c->waiting);
6877 }
6878 
6879 /* process completion of an indexed ("direct lookup") command */
process_indexed_cmd(struct ctlr_info * h,u32 raw_tag)6880 static inline void process_indexed_cmd(struct ctlr_info *h,
6881 	u32 raw_tag)
6882 {
6883 	u32 tag_index;
6884 	struct CommandList *c;
6885 
6886 	tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6887 	if (!bad_tag(h, tag_index, raw_tag)) {
6888 		c = h->cmd_pool + tag_index;
6889 		finish_cmd(c);
6890 	}
6891 }
6892 
6893 /* Some controllers, like p400, will give us one interrupt
6894  * after a soft reset, even if we turned interrupts off.
6895  * Only need to check for this in the hpsa_xxx_discard_completions
6896  * functions.
6897  */
ignore_bogus_interrupt(struct ctlr_info * h)6898 static int ignore_bogus_interrupt(struct ctlr_info *h)
6899 {
6900 	if (likely(!reset_devices))
6901 		return 0;
6902 
6903 	if (likely(h->interrupts_enabled))
6904 		return 0;
6905 
6906 	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6907 		"(known firmware bug.)  Ignoring.\n");
6908 
6909 	return 1;
6910 }
6911 
6912 /*
6913  * Convert &h->q[x] (passed to interrupt handlers) back to h.
6914  * Relies on (h-q[x] == x) being true for x such that
6915  * 0 <= x < MAX_REPLY_QUEUES.
6916  */
queue_to_hba(u8 * queue)6917 static struct ctlr_info *queue_to_hba(u8 *queue)
6918 {
6919 	return container_of((queue - *queue), struct ctlr_info, q[0]);
6920 }
6921 
hpsa_intx_discard_completions(int irq,void * queue)6922 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6923 {
6924 	struct ctlr_info *h = queue_to_hba(queue);
6925 	u8 q = *(u8 *) queue;
6926 	u32 raw_tag;
6927 
6928 	if (ignore_bogus_interrupt(h))
6929 		return IRQ_NONE;
6930 
6931 	if (interrupt_not_for_us(h))
6932 		return IRQ_NONE;
6933 	h->last_intr_timestamp = get_jiffies_64();
6934 	while (interrupt_pending(h)) {
6935 		raw_tag = get_next_completion(h, q);
6936 		while (raw_tag != FIFO_EMPTY)
6937 			raw_tag = next_command(h, q);
6938 	}
6939 	return IRQ_HANDLED;
6940 }
6941 
hpsa_msix_discard_completions(int irq,void * queue)6942 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6943 {
6944 	struct ctlr_info *h = queue_to_hba(queue);
6945 	u32 raw_tag;
6946 	u8 q = *(u8 *) queue;
6947 
6948 	if (ignore_bogus_interrupt(h))
6949 		return IRQ_NONE;
6950 
6951 	h->last_intr_timestamp = get_jiffies_64();
6952 	raw_tag = get_next_completion(h, q);
6953 	while (raw_tag != FIFO_EMPTY)
6954 		raw_tag = next_command(h, q);
6955 	return IRQ_HANDLED;
6956 }
6957 
do_hpsa_intr_intx(int irq,void * queue)6958 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6959 {
6960 	struct ctlr_info *h = queue_to_hba((u8 *) queue);
6961 	u32 raw_tag;
6962 	u8 q = *(u8 *) queue;
6963 
6964 	if (interrupt_not_for_us(h))
6965 		return IRQ_NONE;
6966 	h->last_intr_timestamp = get_jiffies_64();
6967 	while (interrupt_pending(h)) {
6968 		raw_tag = get_next_completion(h, q);
6969 		while (raw_tag != FIFO_EMPTY) {
6970 			process_indexed_cmd(h, raw_tag);
6971 			raw_tag = next_command(h, q);
6972 		}
6973 	}
6974 	return IRQ_HANDLED;
6975 }
6976 
do_hpsa_intr_msi(int irq,void * queue)6977 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6978 {
6979 	struct ctlr_info *h = queue_to_hba(queue);
6980 	u32 raw_tag;
6981 	u8 q = *(u8 *) queue;
6982 
6983 	h->last_intr_timestamp = get_jiffies_64();
6984 	raw_tag = get_next_completion(h, q);
6985 	while (raw_tag != FIFO_EMPTY) {
6986 		process_indexed_cmd(h, raw_tag);
6987 		raw_tag = next_command(h, q);
6988 	}
6989 	return IRQ_HANDLED;
6990 }
6991 
6992 /* Send a message CDB to the firmware. Careful, this only works
6993  * in simple mode, not performant mode due to the tag lookup.
6994  * We only ever use this immediately after a controller reset.
6995  */
hpsa_message(struct pci_dev * pdev,unsigned char opcode,unsigned char type)6996 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6997 			unsigned char type)
6998 {
6999 	struct Command {
7000 		struct CommandListHeader CommandHeader;
7001 		struct RequestBlock Request;
7002 		struct ErrDescriptor ErrorDescriptor;
7003 	};
7004 	struct Command *cmd;
7005 	static const size_t cmd_sz = sizeof(*cmd) +
7006 					sizeof(cmd->ErrorDescriptor);
7007 	dma_addr_t paddr64;
7008 	__le32 paddr32;
7009 	u32 tag;
7010 	void __iomem *vaddr;
7011 	int i, err;
7012 
7013 	vaddr = pci_ioremap_bar(pdev, 0);
7014 	if (vaddr == NULL)
7015 		return -ENOMEM;
7016 
7017 	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
7018 	 * CCISS commands, so they must be allocated from the lower 4GiB of
7019 	 * memory.
7020 	 */
7021 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7022 	if (err) {
7023 		iounmap(vaddr);
7024 		return err;
7025 	}
7026 
7027 	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7028 	if (cmd == NULL) {
7029 		iounmap(vaddr);
7030 		return -ENOMEM;
7031 	}
7032 
7033 	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
7034 	 * although there's no guarantee, we assume that the address is at
7035 	 * least 4-byte aligned (most likely, it's page-aligned).
7036 	 */
7037 	paddr32 = cpu_to_le32(paddr64);
7038 
7039 	cmd->CommandHeader.ReplyQueue = 0;
7040 	cmd->CommandHeader.SGList = 0;
7041 	cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7042 	cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7043 	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7044 
7045 	cmd->Request.CDBLen = 16;
7046 	cmd->Request.type_attr_dir =
7047 			TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7048 	cmd->Request.Timeout = 0; /* Don't time out */
7049 	cmd->Request.CDB[0] = opcode;
7050 	cmd->Request.CDB[1] = type;
7051 	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7052 	cmd->ErrorDescriptor.Addr =
7053 			cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7054 	cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7055 
7056 	writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7057 
7058 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7059 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7060 		if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7061 			break;
7062 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7063 	}
7064 
7065 	iounmap(vaddr);
7066 
7067 	/* we leak the DMA buffer here ... no choice since the controller could
7068 	 *  still complete the command.
7069 	 */
7070 	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7071 		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7072 			opcode, type);
7073 		return -ETIMEDOUT;
7074 	}
7075 
7076 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7077 
7078 	if (tag & HPSA_ERROR_BIT) {
7079 		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7080 			opcode, type);
7081 		return -EIO;
7082 	}
7083 
7084 	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7085 		opcode, type);
7086 	return 0;
7087 }
7088 
7089 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7090 
hpsa_controller_hard_reset(struct pci_dev * pdev,void __iomem * vaddr,u32 use_doorbell)7091 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7092 	void __iomem *vaddr, u32 use_doorbell)
7093 {
7094 
7095 	if (use_doorbell) {
7096 		/* For everything after the P600, the PCI power state method
7097 		 * of resetting the controller doesn't work, so we have this
7098 		 * other way using the doorbell register.
7099 		 */
7100 		dev_info(&pdev->dev, "using doorbell to reset controller\n");
7101 		writel(use_doorbell, vaddr + SA5_DOORBELL);
7102 
7103 		/* PMC hardware guys tell us we need a 10 second delay after
7104 		 * doorbell reset and before any attempt to talk to the board
7105 		 * at all to ensure that this actually works and doesn't fall
7106 		 * over in some weird corner cases.
7107 		 */
7108 		msleep(10000);
7109 	} else { /* Try to do it the PCI power state way */
7110 
7111 		/* Quoting from the Open CISS Specification: "The Power
7112 		 * Management Control/Status Register (CSR) controls the power
7113 		 * state of the device.  The normal operating state is D0,
7114 		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
7115 		 * the controller, place the interface device in D3 then to D0,
7116 		 * this causes a secondary PCI reset which will reset the
7117 		 * controller." */
7118 
7119 		int rc = 0;
7120 
7121 		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7122 
7123 		/* enter the D3hot power management state */
7124 		rc = pci_set_power_state(pdev, PCI_D3hot);
7125 		if (rc)
7126 			return rc;
7127 
7128 		msleep(500);
7129 
7130 		/* enter the D0 power management state */
7131 		rc = pci_set_power_state(pdev, PCI_D0);
7132 		if (rc)
7133 			return rc;
7134 
7135 		/*
7136 		 * The P600 requires a small delay when changing states.
7137 		 * Otherwise we may think the board did not reset and we bail.
7138 		 * This for kdump only and is particular to the P600.
7139 		 */
7140 		msleep(500);
7141 	}
7142 	return 0;
7143 }
7144 
init_driver_version(char * driver_version,int len)7145 static void init_driver_version(char *driver_version, int len)
7146 {
7147 	memset(driver_version, 0, len);
7148 	strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7149 }
7150 
write_driver_ver_to_cfgtable(struct CfgTable __iomem * cfgtable)7151 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7152 {
7153 	char *driver_version;
7154 	int i, size = sizeof(cfgtable->driver_version);
7155 
7156 	driver_version = kmalloc(size, GFP_KERNEL);
7157 	if (!driver_version)
7158 		return -ENOMEM;
7159 
7160 	init_driver_version(driver_version, size);
7161 	for (i = 0; i < size; i++)
7162 		writeb(driver_version[i], &cfgtable->driver_version[i]);
7163 	kfree(driver_version);
7164 	return 0;
7165 }
7166 
read_driver_ver_from_cfgtable(struct CfgTable __iomem * cfgtable,unsigned char * driver_ver)7167 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7168 					  unsigned char *driver_ver)
7169 {
7170 	int i;
7171 
7172 	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7173 		driver_ver[i] = readb(&cfgtable->driver_version[i]);
7174 }
7175 
controller_reset_failed(struct CfgTable __iomem * cfgtable)7176 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7177 {
7178 
7179 	char *driver_ver, *old_driver_ver;
7180 	int rc, size = sizeof(cfgtable->driver_version);
7181 
7182 	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7183 	if (!old_driver_ver)
7184 		return -ENOMEM;
7185 	driver_ver = old_driver_ver + size;
7186 
7187 	/* After a reset, the 32 bytes of "driver version" in the cfgtable
7188 	 * should have been changed, otherwise we know the reset failed.
7189 	 */
7190 	init_driver_version(old_driver_ver, size);
7191 	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7192 	rc = !memcmp(driver_ver, old_driver_ver, size);
7193 	kfree(old_driver_ver);
7194 	return rc;
7195 }
7196 /* This does a hard reset of the controller using PCI power management
7197  * states or the using the doorbell register.
7198  */
hpsa_kdump_hard_reset_controller(struct pci_dev * pdev,u32 board_id)7199 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7200 {
7201 	u64 cfg_offset;
7202 	u32 cfg_base_addr;
7203 	u64 cfg_base_addr_index;
7204 	void __iomem *vaddr;
7205 	unsigned long paddr;
7206 	u32 misc_fw_support;
7207 	int rc;
7208 	struct CfgTable __iomem *cfgtable;
7209 	u32 use_doorbell;
7210 	u16 command_register;
7211 
7212 	/* For controllers as old as the P600, this is very nearly
7213 	 * the same thing as
7214 	 *
7215 	 * pci_save_state(pci_dev);
7216 	 * pci_set_power_state(pci_dev, PCI_D3hot);
7217 	 * pci_set_power_state(pci_dev, PCI_D0);
7218 	 * pci_restore_state(pci_dev);
7219 	 *
7220 	 * For controllers newer than the P600, the pci power state
7221 	 * method of resetting doesn't work so we have another way
7222 	 * using the doorbell register.
7223 	 */
7224 
7225 	if (!ctlr_is_resettable(board_id)) {
7226 		dev_warn(&pdev->dev, "Controller not resettable\n");
7227 		return -ENODEV;
7228 	}
7229 
7230 	/* if controller is soft- but not hard resettable... */
7231 	if (!ctlr_is_hard_resettable(board_id))
7232 		return -ENOTSUPP; /* try soft reset later. */
7233 
7234 	/* Save the PCI command register */
7235 	pci_read_config_word(pdev, 4, &command_register);
7236 	pci_save_state(pdev);
7237 
7238 	/* find the first memory BAR, so we can find the cfg table */
7239 	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7240 	if (rc)
7241 		return rc;
7242 	vaddr = remap_pci_mem(paddr, 0x250);
7243 	if (!vaddr)
7244 		return -ENOMEM;
7245 
7246 	/* find cfgtable in order to check if reset via doorbell is supported */
7247 	rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7248 					&cfg_base_addr_index, &cfg_offset);
7249 	if (rc)
7250 		goto unmap_vaddr;
7251 	cfgtable = remap_pci_mem(pci_resource_start(pdev,
7252 		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7253 	if (!cfgtable) {
7254 		rc = -ENOMEM;
7255 		goto unmap_vaddr;
7256 	}
7257 	rc = write_driver_ver_to_cfgtable(cfgtable);
7258 	if (rc)
7259 		goto unmap_cfgtable;
7260 
7261 	/* If reset via doorbell register is supported, use that.
7262 	 * There are two such methods.  Favor the newest method.
7263 	 */
7264 	misc_fw_support = readl(&cfgtable->misc_fw_support);
7265 	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7266 	if (use_doorbell) {
7267 		use_doorbell = DOORBELL_CTLR_RESET2;
7268 	} else {
7269 		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7270 		if (use_doorbell) {
7271 			dev_warn(&pdev->dev,
7272 				"Soft reset not supported. Firmware update is required.\n");
7273 			rc = -ENOTSUPP; /* try soft reset */
7274 			goto unmap_cfgtable;
7275 		}
7276 	}
7277 
7278 	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7279 	if (rc)
7280 		goto unmap_cfgtable;
7281 
7282 	pci_restore_state(pdev);
7283 	pci_write_config_word(pdev, 4, command_register);
7284 
7285 	/* Some devices (notably the HP Smart Array 5i Controller)
7286 	   need a little pause here */
7287 	msleep(HPSA_POST_RESET_PAUSE_MSECS);
7288 
7289 	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7290 	if (rc) {
7291 		dev_warn(&pdev->dev,
7292 			"Failed waiting for board to become ready after hard reset\n");
7293 		goto unmap_cfgtable;
7294 	}
7295 
7296 	rc = controller_reset_failed(vaddr);
7297 	if (rc < 0)
7298 		goto unmap_cfgtable;
7299 	if (rc) {
7300 		dev_warn(&pdev->dev, "Unable to successfully reset "
7301 			"controller. Will try soft reset.\n");
7302 		rc = -ENOTSUPP;
7303 	} else {
7304 		dev_info(&pdev->dev, "board ready after hard reset.\n");
7305 	}
7306 
7307 unmap_cfgtable:
7308 	iounmap(cfgtable);
7309 
7310 unmap_vaddr:
7311 	iounmap(vaddr);
7312 	return rc;
7313 }
7314 
7315 /*
7316  *  We cannot read the structure directly, for portability we must use
7317  *   the io functions.
7318  *   This is for debug only.
7319  */
print_cfg_table(struct device * dev,struct CfgTable __iomem * tb)7320 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7321 {
7322 #ifdef HPSA_DEBUG
7323 	int i;
7324 	char temp_name[17];
7325 
7326 	dev_info(dev, "Controller Configuration information\n");
7327 	dev_info(dev, "------------------------------------\n");
7328 	for (i = 0; i < 4; i++)
7329 		temp_name[i] = readb(&(tb->Signature[i]));
7330 	temp_name[4] = '\0';
7331 	dev_info(dev, "   Signature = %s\n", temp_name);
7332 	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
7333 	dev_info(dev, "   Transport methods supported = 0x%x\n",
7334 	       readl(&(tb->TransportSupport)));
7335 	dev_info(dev, "   Transport methods active = 0x%x\n",
7336 	       readl(&(tb->TransportActive)));
7337 	dev_info(dev, "   Requested transport Method = 0x%x\n",
7338 	       readl(&(tb->HostWrite.TransportRequest)));
7339 	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
7340 	       readl(&(tb->HostWrite.CoalIntDelay)));
7341 	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
7342 	       readl(&(tb->HostWrite.CoalIntCount)));
7343 	dev_info(dev, "   Max outstanding commands = %d\n",
7344 	       readl(&(tb->CmdsOutMax)));
7345 	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7346 	for (i = 0; i < 16; i++)
7347 		temp_name[i] = readb(&(tb->ServerName[i]));
7348 	temp_name[16] = '\0';
7349 	dev_info(dev, "   Server Name = %s\n", temp_name);
7350 	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
7351 		readl(&(tb->HeartBeat)));
7352 #endif				/* HPSA_DEBUG */
7353 }
7354 
find_PCI_BAR_index(struct pci_dev * pdev,unsigned long pci_bar_addr)7355 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7356 {
7357 	int i, offset, mem_type, bar_type;
7358 
7359 	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
7360 		return 0;
7361 	offset = 0;
7362 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7363 		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7364 		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7365 			offset += 4;
7366 		else {
7367 			mem_type = pci_resource_flags(pdev, i) &
7368 			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7369 			switch (mem_type) {
7370 			case PCI_BASE_ADDRESS_MEM_TYPE_32:
7371 			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7372 				offset += 4;	/* 32 bit */
7373 				break;
7374 			case PCI_BASE_ADDRESS_MEM_TYPE_64:
7375 				offset += 8;
7376 				break;
7377 			default:	/* reserved in PCI 2.2 */
7378 				dev_warn(&pdev->dev,
7379 				       "base address is invalid\n");
7380 				return -1;
7381 				break;
7382 			}
7383 		}
7384 		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7385 			return i + 1;
7386 	}
7387 	return -1;
7388 }
7389 
hpsa_disable_interrupt_mode(struct ctlr_info * h)7390 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7391 {
7392 	if (h->msix_vector) {
7393 		if (h->pdev->msix_enabled)
7394 			pci_disable_msix(h->pdev);
7395 		h->msix_vector = 0;
7396 	} else if (h->msi_vector) {
7397 		if (h->pdev->msi_enabled)
7398 			pci_disable_msi(h->pdev);
7399 		h->msi_vector = 0;
7400 	}
7401 }
7402 
7403 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7404  * controllers that are capable. If not, we use legacy INTx mode.
7405  */
hpsa_interrupt_mode(struct ctlr_info * h)7406 static void hpsa_interrupt_mode(struct ctlr_info *h)
7407 {
7408 #ifdef CONFIG_PCI_MSI
7409 	int err, i;
7410 	struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7411 
7412 	for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7413 		hpsa_msix_entries[i].vector = 0;
7414 		hpsa_msix_entries[i].entry = i;
7415 	}
7416 
7417 	/* Some boards advertise MSI but don't really support it */
7418 	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7419 	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7420 		goto default_int_mode;
7421 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7422 		dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7423 		h->msix_vector = MAX_REPLY_QUEUES;
7424 		if (h->msix_vector > num_online_cpus())
7425 			h->msix_vector = num_online_cpus();
7426 		err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7427 					    1, h->msix_vector);
7428 		if (err < 0) {
7429 			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7430 			h->msix_vector = 0;
7431 			goto single_msi_mode;
7432 		} else if (err < h->msix_vector) {
7433 			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7434 			       "available\n", err);
7435 		}
7436 		h->msix_vector = err;
7437 		for (i = 0; i < h->msix_vector; i++)
7438 			h->intr[i] = hpsa_msix_entries[i].vector;
7439 		return;
7440 	}
7441 single_msi_mode:
7442 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7443 		dev_info(&h->pdev->dev, "MSI capable controller\n");
7444 		if (!pci_enable_msi(h->pdev))
7445 			h->msi_vector = 1;
7446 		else
7447 			dev_warn(&h->pdev->dev, "MSI init failed\n");
7448 	}
7449 default_int_mode:
7450 #endif				/* CONFIG_PCI_MSI */
7451 	/* if we get here we're going to use the default interrupt mode */
7452 	h->intr[h->intr_mode] = h->pdev->irq;
7453 }
7454 
hpsa_lookup_board_id(struct pci_dev * pdev,u32 * board_id)7455 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7456 {
7457 	int i;
7458 	u32 subsystem_vendor_id, subsystem_device_id;
7459 
7460 	subsystem_vendor_id = pdev->subsystem_vendor;
7461 	subsystem_device_id = pdev->subsystem_device;
7462 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7463 		    subsystem_vendor_id;
7464 
7465 	for (i = 0; i < ARRAY_SIZE(products); i++)
7466 		if (*board_id == products[i].board_id)
7467 			return i;
7468 
7469 	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7470 		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7471 		!hpsa_allow_any) {
7472 		dev_warn(&pdev->dev, "unrecognized board ID: "
7473 			"0x%08x, ignoring.\n", *board_id);
7474 			return -ENODEV;
7475 	}
7476 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7477 }
7478 
hpsa_pci_find_memory_BAR(struct pci_dev * pdev,unsigned long * memory_bar)7479 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7480 				    unsigned long *memory_bar)
7481 {
7482 	int i;
7483 
7484 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7485 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7486 			/* addressing mode bits already removed */
7487 			*memory_bar = pci_resource_start(pdev, i);
7488 			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7489 				*memory_bar);
7490 			return 0;
7491 		}
7492 	dev_warn(&pdev->dev, "no memory BAR found\n");
7493 	return -ENODEV;
7494 }
7495 
hpsa_wait_for_board_state(struct pci_dev * pdev,void __iomem * vaddr,int wait_for_ready)7496 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7497 				     int wait_for_ready)
7498 {
7499 	int i, iterations;
7500 	u32 scratchpad;
7501 	if (wait_for_ready)
7502 		iterations = HPSA_BOARD_READY_ITERATIONS;
7503 	else
7504 		iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7505 
7506 	for (i = 0; i < iterations; i++) {
7507 		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7508 		if (wait_for_ready) {
7509 			if (scratchpad == HPSA_FIRMWARE_READY)
7510 				return 0;
7511 		} else {
7512 			if (scratchpad != HPSA_FIRMWARE_READY)
7513 				return 0;
7514 		}
7515 		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7516 	}
7517 	dev_warn(&pdev->dev, "board not ready, timed out.\n");
7518 	return -ENODEV;
7519 }
7520 
hpsa_find_cfg_addrs(struct pci_dev * pdev,void __iomem * vaddr,u32 * cfg_base_addr,u64 * cfg_base_addr_index,u64 * cfg_offset)7521 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7522 			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7523 			       u64 *cfg_offset)
7524 {
7525 	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7526 	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7527 	*cfg_base_addr &= (u32) 0x0000ffff;
7528 	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7529 	if (*cfg_base_addr_index == -1) {
7530 		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7531 		return -ENODEV;
7532 	}
7533 	return 0;
7534 }
7535 
hpsa_free_cfgtables(struct ctlr_info * h)7536 static void hpsa_free_cfgtables(struct ctlr_info *h)
7537 {
7538 	if (h->transtable) {
7539 		iounmap(h->transtable);
7540 		h->transtable = NULL;
7541 	}
7542 	if (h->cfgtable) {
7543 		iounmap(h->cfgtable);
7544 		h->cfgtable = NULL;
7545 	}
7546 }
7547 
7548 /* Find and map CISS config table and transfer table
7549 + * several items must be unmapped (freed) later
7550 + * */
hpsa_find_cfgtables(struct ctlr_info * h)7551 static int hpsa_find_cfgtables(struct ctlr_info *h)
7552 {
7553 	u64 cfg_offset;
7554 	u32 cfg_base_addr;
7555 	u64 cfg_base_addr_index;
7556 	u32 trans_offset;
7557 	int rc;
7558 
7559 	rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7560 		&cfg_base_addr_index, &cfg_offset);
7561 	if (rc)
7562 		return rc;
7563 	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7564 		       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7565 	if (!h->cfgtable) {
7566 		dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7567 		return -ENOMEM;
7568 	}
7569 	rc = write_driver_ver_to_cfgtable(h->cfgtable);
7570 	if (rc)
7571 		return rc;
7572 	/* Find performant mode table. */
7573 	trans_offset = readl(&h->cfgtable->TransMethodOffset);
7574 	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7575 				cfg_base_addr_index)+cfg_offset+trans_offset,
7576 				sizeof(*h->transtable));
7577 	if (!h->transtable) {
7578 		dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7579 		hpsa_free_cfgtables(h);
7580 		return -ENOMEM;
7581 	}
7582 	return 0;
7583 }
7584 
hpsa_get_max_perf_mode_cmds(struct ctlr_info * h)7585 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7586 {
7587 #define MIN_MAX_COMMANDS 16
7588 	BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7589 
7590 	h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7591 
7592 	/* Limit commands in memory limited kdump scenario. */
7593 	if (reset_devices && h->max_commands > 32)
7594 		h->max_commands = 32;
7595 
7596 	if (h->max_commands < MIN_MAX_COMMANDS) {
7597 		dev_warn(&h->pdev->dev,
7598 			"Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7599 			h->max_commands,
7600 			MIN_MAX_COMMANDS);
7601 		h->max_commands = MIN_MAX_COMMANDS;
7602 	}
7603 }
7604 
7605 /* If the controller reports that the total max sg entries is greater than 512,
7606  * then we know that chained SG blocks work.  (Original smart arrays did not
7607  * support chained SG blocks and would return zero for max sg entries.)
7608  */
hpsa_supports_chained_sg_blocks(struct ctlr_info * h)7609 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7610 {
7611 	return h->maxsgentries > 512;
7612 }
7613 
7614 /* Interrogate the hardware for some limits:
7615  * max commands, max SG elements without chaining, and with chaining,
7616  * SG chain block size, etc.
7617  */
hpsa_find_board_params(struct ctlr_info * h)7618 static void hpsa_find_board_params(struct ctlr_info *h)
7619 {
7620 	hpsa_get_max_perf_mode_cmds(h);
7621 	h->nr_cmds = h->max_commands;
7622 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7623 	h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7624 	if (hpsa_supports_chained_sg_blocks(h)) {
7625 		/* Limit in-command s/g elements to 32 save dma'able memory. */
7626 		h->max_cmd_sg_entries = 32;
7627 		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7628 		h->maxsgentries--; /* save one for chain pointer */
7629 	} else {
7630 		/*
7631 		 * Original smart arrays supported at most 31 s/g entries
7632 		 * embedded inline in the command (trying to use more
7633 		 * would lock up the controller)
7634 		 */
7635 		h->max_cmd_sg_entries = 31;
7636 		h->maxsgentries = 31; /* default to traditional values */
7637 		h->chainsize = 0;
7638 	}
7639 
7640 	/* Find out what task management functions are supported and cache */
7641 	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7642 	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7643 		dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7644 	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7645 		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7646 	if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7647 		dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7648 }
7649 
hpsa_CISS_signature_present(struct ctlr_info * h)7650 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7651 {
7652 	if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7653 		dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7654 		return false;
7655 	}
7656 	return true;
7657 }
7658 
hpsa_set_driver_support_bits(struct ctlr_info * h)7659 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7660 {
7661 	u32 driver_support;
7662 
7663 	driver_support = readl(&(h->cfgtable->driver_support));
7664 	/* Need to enable prefetch in the SCSI core for 6400 in x86 */
7665 #ifdef CONFIG_X86
7666 	driver_support |= ENABLE_SCSI_PREFETCH;
7667 #endif
7668 	driver_support |= ENABLE_UNIT_ATTN;
7669 	writel(driver_support, &(h->cfgtable->driver_support));
7670 }
7671 
7672 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7673  * in a prefetch beyond physical memory.
7674  */
hpsa_p600_dma_prefetch_quirk(struct ctlr_info * h)7675 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7676 {
7677 	u32 dma_prefetch;
7678 
7679 	if (h->board_id != 0x3225103C)
7680 		return;
7681 	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7682 	dma_prefetch |= 0x8000;
7683 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7684 }
7685 
hpsa_wait_for_clear_event_notify_ack(struct ctlr_info * h)7686 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7687 {
7688 	int i;
7689 	u32 doorbell_value;
7690 	unsigned long flags;
7691 	/* wait until the clear_event_notify bit 6 is cleared by controller. */
7692 	for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7693 		spin_lock_irqsave(&h->lock, flags);
7694 		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7695 		spin_unlock_irqrestore(&h->lock, flags);
7696 		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7697 			goto done;
7698 		/* delay and try again */
7699 		msleep(CLEAR_EVENT_WAIT_INTERVAL);
7700 	}
7701 	return -ENODEV;
7702 done:
7703 	return 0;
7704 }
7705 
hpsa_wait_for_mode_change_ack(struct ctlr_info * h)7706 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7707 {
7708 	int i;
7709 	u32 doorbell_value;
7710 	unsigned long flags;
7711 
7712 	/* under certain very rare conditions, this can take awhile.
7713 	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7714 	 * as we enter this code.)
7715 	 */
7716 	for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7717 		if (h->remove_in_progress)
7718 			goto done;
7719 		spin_lock_irqsave(&h->lock, flags);
7720 		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7721 		spin_unlock_irqrestore(&h->lock, flags);
7722 		if (!(doorbell_value & CFGTBL_ChangeReq))
7723 			goto done;
7724 		/* delay and try again */
7725 		msleep(MODE_CHANGE_WAIT_INTERVAL);
7726 	}
7727 	return -ENODEV;
7728 done:
7729 	return 0;
7730 }
7731 
7732 /* return -ENODEV or other reason on error, 0 on success */
hpsa_enter_simple_mode(struct ctlr_info * h)7733 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7734 {
7735 	u32 trans_support;
7736 
7737 	trans_support = readl(&(h->cfgtable->TransportSupport));
7738 	if (!(trans_support & SIMPLE_MODE))
7739 		return -ENOTSUPP;
7740 
7741 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7742 
7743 	/* Update the field, and then ring the doorbell */
7744 	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7745 	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7746 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7747 	if (hpsa_wait_for_mode_change_ack(h))
7748 		goto error;
7749 	print_cfg_table(&h->pdev->dev, h->cfgtable);
7750 	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7751 		goto error;
7752 	h->transMethod = CFGTBL_Trans_Simple;
7753 	return 0;
7754 error:
7755 	dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7756 	return -ENODEV;
7757 }
7758 
7759 /* free items allocated or mapped by hpsa_pci_init */
hpsa_free_pci_init(struct ctlr_info * h)7760 static void hpsa_free_pci_init(struct ctlr_info *h)
7761 {
7762 	hpsa_free_cfgtables(h);			/* pci_init 4 */
7763 	iounmap(h->vaddr);			/* pci_init 3 */
7764 	h->vaddr = NULL;
7765 	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
7766 	/*
7767 	 * call pci_disable_device before pci_release_regions per
7768 	 * Documentation/PCI/pci.txt
7769 	 */
7770 	pci_disable_device(h->pdev);		/* pci_init 1 */
7771 	pci_release_regions(h->pdev);		/* pci_init 2 */
7772 }
7773 
7774 /* several items must be freed later */
hpsa_pci_init(struct ctlr_info * h)7775 static int hpsa_pci_init(struct ctlr_info *h)
7776 {
7777 	int prod_index, err;
7778 
7779 	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7780 	if (prod_index < 0)
7781 		return prod_index;
7782 	h->product_name = products[prod_index].product_name;
7783 	h->access = *(products[prod_index].access);
7784 
7785 	h->needs_abort_tags_swizzled =
7786 		ctlr_needs_abort_tags_swizzled(h->board_id);
7787 
7788 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7789 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7790 
7791 	err = pci_enable_device(h->pdev);
7792 	if (err) {
7793 		dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7794 		pci_disable_device(h->pdev);
7795 		return err;
7796 	}
7797 
7798 	err = pci_request_regions(h->pdev, HPSA);
7799 	if (err) {
7800 		dev_err(&h->pdev->dev,
7801 			"failed to obtain PCI resources\n");
7802 		pci_disable_device(h->pdev);
7803 		return err;
7804 	}
7805 
7806 	pci_set_master(h->pdev);
7807 
7808 	hpsa_interrupt_mode(h);
7809 	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7810 	if (err)
7811 		goto clean2;	/* intmode+region, pci */
7812 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
7813 	if (!h->vaddr) {
7814 		dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7815 		err = -ENOMEM;
7816 		goto clean2;	/* intmode+region, pci */
7817 	}
7818 	err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7819 	if (err)
7820 		goto clean3;	/* vaddr, intmode+region, pci */
7821 	err = hpsa_find_cfgtables(h);
7822 	if (err)
7823 		goto clean3;	/* vaddr, intmode+region, pci */
7824 	hpsa_find_board_params(h);
7825 
7826 	if (!hpsa_CISS_signature_present(h)) {
7827 		err = -ENODEV;
7828 		goto clean4;	/* cfgtables, vaddr, intmode+region, pci */
7829 	}
7830 	hpsa_set_driver_support_bits(h);
7831 	hpsa_p600_dma_prefetch_quirk(h);
7832 	err = hpsa_enter_simple_mode(h);
7833 	if (err)
7834 		goto clean4;	/* cfgtables, vaddr, intmode+region, pci */
7835 	return 0;
7836 
7837 clean4:	/* cfgtables, vaddr, intmode+region, pci */
7838 	hpsa_free_cfgtables(h);
7839 clean3:	/* vaddr, intmode+region, pci */
7840 	iounmap(h->vaddr);
7841 	h->vaddr = NULL;
7842 clean2:	/* intmode+region, pci */
7843 	hpsa_disable_interrupt_mode(h);
7844 	/*
7845 	 * call pci_disable_device before pci_release_regions per
7846 	 * Documentation/PCI/pci.txt
7847 	 */
7848 	pci_disable_device(h->pdev);
7849 	pci_release_regions(h->pdev);
7850 	return err;
7851 }
7852 
hpsa_hba_inquiry(struct ctlr_info * h)7853 static void hpsa_hba_inquiry(struct ctlr_info *h)
7854 {
7855 	int rc;
7856 
7857 #define HBA_INQUIRY_BYTE_COUNT 64
7858 	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7859 	if (!h->hba_inquiry_data)
7860 		return;
7861 	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7862 		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7863 	if (rc != 0) {
7864 		kfree(h->hba_inquiry_data);
7865 		h->hba_inquiry_data = NULL;
7866 	}
7867 }
7868 
hpsa_init_reset_devices(struct pci_dev * pdev,u32 board_id)7869 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7870 {
7871 	int rc, i;
7872 	void __iomem *vaddr;
7873 
7874 	if (!reset_devices)
7875 		return 0;
7876 
7877 	/* kdump kernel is loading, we don't know in which state is
7878 	 * the pci interface. The dev->enable_cnt is equal zero
7879 	 * so we call enable+disable, wait a while and switch it on.
7880 	 */
7881 	rc = pci_enable_device(pdev);
7882 	if (rc) {
7883 		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7884 		return -ENODEV;
7885 	}
7886 	pci_disable_device(pdev);
7887 	msleep(260);			/* a randomly chosen number */
7888 	rc = pci_enable_device(pdev);
7889 	if (rc) {
7890 		dev_warn(&pdev->dev, "failed to enable device.\n");
7891 		return -ENODEV;
7892 	}
7893 
7894 	pci_set_master(pdev);
7895 
7896 	vaddr = pci_ioremap_bar(pdev, 0);
7897 	if (vaddr == NULL) {
7898 		rc = -ENOMEM;
7899 		goto out_disable;
7900 	}
7901 	writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7902 	iounmap(vaddr);
7903 
7904 	/* Reset the controller with a PCI power-cycle or via doorbell */
7905 	rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7906 
7907 	/* -ENOTSUPP here means we cannot reset the controller
7908 	 * but it's already (and still) up and running in
7909 	 * "performant mode".  Or, it might be 640x, which can't reset
7910 	 * due to concerns about shared bbwc between 6402/6404 pair.
7911 	 */
7912 	if (rc)
7913 		goto out_disable;
7914 
7915 	/* Now try to get the controller to respond to a no-op */
7916 	dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7917 	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7918 		if (hpsa_noop(pdev) == 0)
7919 			break;
7920 		else
7921 			dev_warn(&pdev->dev, "no-op failed%s\n",
7922 					(i < 11 ? "; re-trying" : ""));
7923 	}
7924 
7925 out_disable:
7926 
7927 	pci_disable_device(pdev);
7928 	return rc;
7929 }
7930 
hpsa_free_cmd_pool(struct ctlr_info * h)7931 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7932 {
7933 	kfree(h->cmd_pool_bits);
7934 	h->cmd_pool_bits = NULL;
7935 	if (h->cmd_pool) {
7936 		pci_free_consistent(h->pdev,
7937 				h->nr_cmds * sizeof(struct CommandList),
7938 				h->cmd_pool,
7939 				h->cmd_pool_dhandle);
7940 		h->cmd_pool = NULL;
7941 		h->cmd_pool_dhandle = 0;
7942 	}
7943 	if (h->errinfo_pool) {
7944 		pci_free_consistent(h->pdev,
7945 				h->nr_cmds * sizeof(struct ErrorInfo),
7946 				h->errinfo_pool,
7947 				h->errinfo_pool_dhandle);
7948 		h->errinfo_pool = NULL;
7949 		h->errinfo_pool_dhandle = 0;
7950 	}
7951 }
7952 
hpsa_alloc_cmd_pool(struct ctlr_info * h)7953 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7954 {
7955 	h->cmd_pool_bits = kzalloc(
7956 		DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7957 		sizeof(unsigned long), GFP_KERNEL);
7958 	h->cmd_pool = pci_alloc_consistent(h->pdev,
7959 		    h->nr_cmds * sizeof(*h->cmd_pool),
7960 		    &(h->cmd_pool_dhandle));
7961 	h->errinfo_pool = pci_alloc_consistent(h->pdev,
7962 		    h->nr_cmds * sizeof(*h->errinfo_pool),
7963 		    &(h->errinfo_pool_dhandle));
7964 	if ((h->cmd_pool_bits == NULL)
7965 	    || (h->cmd_pool == NULL)
7966 	    || (h->errinfo_pool == NULL)) {
7967 		dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7968 		goto clean_up;
7969 	}
7970 	hpsa_preinitialize_commands(h);
7971 	return 0;
7972 clean_up:
7973 	hpsa_free_cmd_pool(h);
7974 	return -ENOMEM;
7975 }
7976 
hpsa_irq_affinity_hints(struct ctlr_info * h)7977 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7978 {
7979 	int i, cpu;
7980 
7981 	cpu = cpumask_first(cpu_online_mask);
7982 	for (i = 0; i < h->msix_vector; i++) {
7983 		irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7984 		cpu = cpumask_next(cpu, cpu_online_mask);
7985 	}
7986 }
7987 
7988 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
hpsa_free_irqs(struct ctlr_info * h)7989 static void hpsa_free_irqs(struct ctlr_info *h)
7990 {
7991 	int i;
7992 
7993 	if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7994 		/* Single reply queue, only one irq to free */
7995 		i = h->intr_mode;
7996 		irq_set_affinity_hint(h->intr[i], NULL);
7997 		free_irq(h->intr[i], &h->q[i]);
7998 		h->q[i] = 0;
7999 		return;
8000 	}
8001 
8002 	for (i = 0; i < h->msix_vector; i++) {
8003 		irq_set_affinity_hint(h->intr[i], NULL);
8004 		free_irq(h->intr[i], &h->q[i]);
8005 		h->q[i] = 0;
8006 	}
8007 	for (; i < MAX_REPLY_QUEUES; i++)
8008 		h->q[i] = 0;
8009 }
8010 
8011 /* returns 0 on success; cleans up and returns -Enn on error */
hpsa_request_irqs(struct ctlr_info * h,irqreturn_t (* msixhandler)(int,void *),irqreturn_t (* intxhandler)(int,void *))8012 static int hpsa_request_irqs(struct ctlr_info *h,
8013 	irqreturn_t (*msixhandler)(int, void *),
8014 	irqreturn_t (*intxhandler)(int, void *))
8015 {
8016 	int rc, i;
8017 
8018 	/*
8019 	 * initialize h->q[x] = x so that interrupt handlers know which
8020 	 * queue to process.
8021 	 */
8022 	for (i = 0; i < MAX_REPLY_QUEUES; i++)
8023 		h->q[i] = (u8) i;
8024 
8025 	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
8026 		/* If performant mode and MSI-X, use multiple reply queues */
8027 		for (i = 0; i < h->msix_vector; i++) {
8028 			sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8029 			rc = request_irq(h->intr[i], msixhandler,
8030 					0, h->intrname[i],
8031 					&h->q[i]);
8032 			if (rc) {
8033 				int j;
8034 
8035 				dev_err(&h->pdev->dev,
8036 					"failed to get irq %d for %s\n",
8037 				       h->intr[i], h->devname);
8038 				for (j = 0; j < i; j++) {
8039 					free_irq(h->intr[j], &h->q[j]);
8040 					h->q[j] = 0;
8041 				}
8042 				for (; j < MAX_REPLY_QUEUES; j++)
8043 					h->q[j] = 0;
8044 				return rc;
8045 			}
8046 		}
8047 		hpsa_irq_affinity_hints(h);
8048 	} else {
8049 		/* Use single reply pool */
8050 		if (h->msix_vector > 0 || h->msi_vector) {
8051 			if (h->msix_vector)
8052 				sprintf(h->intrname[h->intr_mode],
8053 					"%s-msix", h->devname);
8054 			else
8055 				sprintf(h->intrname[h->intr_mode],
8056 					"%s-msi", h->devname);
8057 			rc = request_irq(h->intr[h->intr_mode],
8058 				msixhandler, 0,
8059 				h->intrname[h->intr_mode],
8060 				&h->q[h->intr_mode]);
8061 		} else {
8062 			sprintf(h->intrname[h->intr_mode],
8063 				"%s-intx", h->devname);
8064 			rc = request_irq(h->intr[h->intr_mode],
8065 				intxhandler, IRQF_SHARED,
8066 				h->intrname[h->intr_mode],
8067 				&h->q[h->intr_mode]);
8068 		}
8069 		irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
8070 	}
8071 	if (rc) {
8072 		dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8073 		       h->intr[h->intr_mode], h->devname);
8074 		hpsa_free_irqs(h);
8075 		return -ENODEV;
8076 	}
8077 	return 0;
8078 }
8079 
hpsa_kdump_soft_reset(struct ctlr_info * h)8080 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8081 {
8082 	int rc;
8083 	hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8084 
8085 	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8086 	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8087 	if (rc) {
8088 		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8089 		return rc;
8090 	}
8091 
8092 	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8093 	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8094 	if (rc) {
8095 		dev_warn(&h->pdev->dev, "Board failed to become ready "
8096 			"after soft reset.\n");
8097 		return rc;
8098 	}
8099 
8100 	return 0;
8101 }
8102 
hpsa_free_reply_queues(struct ctlr_info * h)8103 static void hpsa_free_reply_queues(struct ctlr_info *h)
8104 {
8105 	int i;
8106 
8107 	for (i = 0; i < h->nreply_queues; i++) {
8108 		if (!h->reply_queue[i].head)
8109 			continue;
8110 		pci_free_consistent(h->pdev,
8111 					h->reply_queue_size,
8112 					h->reply_queue[i].head,
8113 					h->reply_queue[i].busaddr);
8114 		h->reply_queue[i].head = NULL;
8115 		h->reply_queue[i].busaddr = 0;
8116 	}
8117 	h->reply_queue_size = 0;
8118 }
8119 
hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info * h)8120 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8121 {
8122 	hpsa_free_performant_mode(h);		/* init_one 7 */
8123 	hpsa_free_sg_chain_blocks(h);		/* init_one 6 */
8124 	hpsa_free_cmd_pool(h);			/* init_one 5 */
8125 	hpsa_free_irqs(h);			/* init_one 4 */
8126 	scsi_host_put(h->scsi_host);		/* init_one 3 */
8127 	h->scsi_host = NULL;			/* init_one 3 */
8128 	hpsa_free_pci_init(h);			/* init_one 2_5 */
8129 	free_percpu(h->lockup_detected);	/* init_one 2 */
8130 	h->lockup_detected = NULL;		/* init_one 2 */
8131 	if (h->resubmit_wq) {
8132 		destroy_workqueue(h->resubmit_wq);	/* init_one 1 */
8133 		h->resubmit_wq = NULL;
8134 	}
8135 	if (h->rescan_ctlr_wq) {
8136 		destroy_workqueue(h->rescan_ctlr_wq);
8137 		h->rescan_ctlr_wq = NULL;
8138 	}
8139 	kfree(h);				/* init_one 1 */
8140 }
8141 
8142 /* Called when controller lockup detected. */
fail_all_outstanding_cmds(struct ctlr_info * h)8143 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8144 {
8145 	int i, refcount;
8146 	struct CommandList *c;
8147 	int failcount = 0;
8148 
8149 	flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8150 	for (i = 0; i < h->nr_cmds; i++) {
8151 		c = h->cmd_pool + i;
8152 		refcount = atomic_inc_return(&c->refcount);
8153 		if (refcount > 1) {
8154 			c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8155 			finish_cmd(c);
8156 			atomic_dec(&h->commands_outstanding);
8157 			failcount++;
8158 		}
8159 		cmd_free(h, c);
8160 	}
8161 	dev_warn(&h->pdev->dev,
8162 		"failed %d commands in fail_all\n", failcount);
8163 }
8164 
set_lockup_detected_for_all_cpus(struct ctlr_info * h,u32 value)8165 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8166 {
8167 	int cpu;
8168 
8169 	for_each_online_cpu(cpu) {
8170 		u32 *lockup_detected;
8171 		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8172 		*lockup_detected = value;
8173 	}
8174 	wmb(); /* be sure the per-cpu variables are out to memory */
8175 }
8176 
controller_lockup_detected(struct ctlr_info * h)8177 static void controller_lockup_detected(struct ctlr_info *h)
8178 {
8179 	unsigned long flags;
8180 	u32 lockup_detected;
8181 
8182 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8183 	spin_lock_irqsave(&h->lock, flags);
8184 	lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8185 	if (!lockup_detected) {
8186 		/* no heartbeat, but controller gave us a zero. */
8187 		dev_warn(&h->pdev->dev,
8188 			"lockup detected after %d but scratchpad register is zero\n",
8189 			h->heartbeat_sample_interval / HZ);
8190 		lockup_detected = 0xffffffff;
8191 	}
8192 	set_lockup_detected_for_all_cpus(h, lockup_detected);
8193 	spin_unlock_irqrestore(&h->lock, flags);
8194 	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8195 			lockup_detected, h->heartbeat_sample_interval / HZ);
8196 	pci_disable_device(h->pdev);
8197 	fail_all_outstanding_cmds(h);
8198 }
8199 
detect_controller_lockup(struct ctlr_info * h)8200 static int detect_controller_lockup(struct ctlr_info *h)
8201 {
8202 	u64 now;
8203 	u32 heartbeat;
8204 	unsigned long flags;
8205 
8206 	now = get_jiffies_64();
8207 	/* If we've received an interrupt recently, we're ok. */
8208 	if (time_after64(h->last_intr_timestamp +
8209 				(h->heartbeat_sample_interval), now))
8210 		return false;
8211 
8212 	/*
8213 	 * If we've already checked the heartbeat recently, we're ok.
8214 	 * This could happen if someone sends us a signal. We
8215 	 * otherwise don't care about signals in this thread.
8216 	 */
8217 	if (time_after64(h->last_heartbeat_timestamp +
8218 				(h->heartbeat_sample_interval), now))
8219 		return false;
8220 
8221 	/* If heartbeat has not changed since we last looked, we're not ok. */
8222 	spin_lock_irqsave(&h->lock, flags);
8223 	heartbeat = readl(&h->cfgtable->HeartBeat);
8224 	spin_unlock_irqrestore(&h->lock, flags);
8225 	if (h->last_heartbeat == heartbeat) {
8226 		controller_lockup_detected(h);
8227 		return true;
8228 	}
8229 
8230 	/* We're ok. */
8231 	h->last_heartbeat = heartbeat;
8232 	h->last_heartbeat_timestamp = now;
8233 	return false;
8234 }
8235 
hpsa_ack_ctlr_events(struct ctlr_info * h)8236 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8237 {
8238 	int i;
8239 	char *event_type;
8240 
8241 	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8242 		return;
8243 
8244 	/* Ask the controller to clear the events we're handling. */
8245 	if ((h->transMethod & (CFGTBL_Trans_io_accel1
8246 			| CFGTBL_Trans_io_accel2)) &&
8247 		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8248 		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8249 
8250 		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8251 			event_type = "state change";
8252 		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8253 			event_type = "configuration change";
8254 		/* Stop sending new RAID offload reqs via the IO accelerator */
8255 		scsi_block_requests(h->scsi_host);
8256 		for (i = 0; i < h->ndevices; i++)
8257 			h->dev[i]->offload_enabled = 0;
8258 		hpsa_drain_accel_commands(h);
8259 		/* Set 'accelerator path config change' bit */
8260 		dev_warn(&h->pdev->dev,
8261 			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8262 			h->events, event_type);
8263 		writel(h->events, &(h->cfgtable->clear_event_notify));
8264 		/* Set the "clear event notify field update" bit 6 */
8265 		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8266 		/* Wait until ctlr clears 'clear event notify field', bit 6 */
8267 		hpsa_wait_for_clear_event_notify_ack(h);
8268 		scsi_unblock_requests(h->scsi_host);
8269 	} else {
8270 		/* Acknowledge controller notification events. */
8271 		writel(h->events, &(h->cfgtable->clear_event_notify));
8272 		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8273 		hpsa_wait_for_clear_event_notify_ack(h);
8274 #if 0
8275 		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8276 		hpsa_wait_for_mode_change_ack(h);
8277 #endif
8278 	}
8279 	return;
8280 }
8281 
8282 /* Check a register on the controller to see if there are configuration
8283  * changes (added/changed/removed logical drives, etc.) which mean that
8284  * we should rescan the controller for devices.
8285  * Also check flag for driver-initiated rescan.
8286  */
hpsa_ctlr_needs_rescan(struct ctlr_info * h)8287 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8288 {
8289 	if (h->drv_req_rescan) {
8290 		h->drv_req_rescan = 0;
8291 		return 1;
8292 	}
8293 
8294 	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8295 		return 0;
8296 
8297 	h->events = readl(&(h->cfgtable->event_notify));
8298 	return h->events & RESCAN_REQUIRED_EVENT_BITS;
8299 }
8300 
8301 /*
8302  * Check if any of the offline devices have become ready
8303  */
hpsa_offline_devices_ready(struct ctlr_info * h)8304 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8305 {
8306 	unsigned long flags;
8307 	struct offline_device_entry *d;
8308 	struct list_head *this, *tmp;
8309 
8310 	spin_lock_irqsave(&h->offline_device_lock, flags);
8311 	list_for_each_safe(this, tmp, &h->offline_device_list) {
8312 		d = list_entry(this, struct offline_device_entry,
8313 				offline_list);
8314 		spin_unlock_irqrestore(&h->offline_device_lock, flags);
8315 		if (!hpsa_volume_offline(h, d->scsi3addr)) {
8316 			spin_lock_irqsave(&h->offline_device_lock, flags);
8317 			list_del(&d->offline_list);
8318 			spin_unlock_irqrestore(&h->offline_device_lock, flags);
8319 			return 1;
8320 		}
8321 		spin_lock_irqsave(&h->offline_device_lock, flags);
8322 	}
8323 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
8324 	return 0;
8325 }
8326 
hpsa_luns_changed(struct ctlr_info * h)8327 static int hpsa_luns_changed(struct ctlr_info *h)
8328 {
8329 	int rc = 1; /* assume there are changes */
8330 	struct ReportLUNdata *logdev = NULL;
8331 
8332 	/* if we can't find out if lun data has changed,
8333 	 * assume that it has.
8334 	 */
8335 
8336 	if (!h->lastlogicals)
8337 		goto out;
8338 
8339 	logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8340 	if (!logdev) {
8341 		dev_warn(&h->pdev->dev,
8342 			"Out of memory, can't track lun changes.\n");
8343 		goto out;
8344 	}
8345 	if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8346 		dev_warn(&h->pdev->dev,
8347 			"report luns failed, can't track lun changes.\n");
8348 		goto out;
8349 	}
8350 	if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8351 		dev_info(&h->pdev->dev,
8352 			"Lun changes detected.\n");
8353 		memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8354 		goto out;
8355 	} else
8356 		rc = 0; /* no changes detected. */
8357 out:
8358 	kfree(logdev);
8359 	return rc;
8360 }
8361 
hpsa_rescan_ctlr_worker(struct work_struct * work)8362 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8363 {
8364 	unsigned long flags;
8365 	struct ctlr_info *h = container_of(to_delayed_work(work),
8366 					struct ctlr_info, rescan_ctlr_work);
8367 
8368 
8369 	if (h->remove_in_progress)
8370 		return;
8371 
8372 	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8373 		scsi_host_get(h->scsi_host);
8374 		hpsa_ack_ctlr_events(h);
8375 		hpsa_scan_start(h->scsi_host);
8376 		scsi_host_put(h->scsi_host);
8377 	} else if (h->discovery_polling) {
8378 		hpsa_disable_rld_caching(h);
8379 		if (hpsa_luns_changed(h)) {
8380 			struct Scsi_Host *sh = NULL;
8381 
8382 			dev_info(&h->pdev->dev,
8383 				"driver discovery polling rescan.\n");
8384 			sh = scsi_host_get(h->scsi_host);
8385 			if (sh != NULL) {
8386 				hpsa_scan_start(sh);
8387 				scsi_host_put(sh);
8388 			}
8389 		}
8390 	}
8391 	spin_lock_irqsave(&h->lock, flags);
8392 	if (!h->remove_in_progress)
8393 		queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8394 				h->heartbeat_sample_interval);
8395 	spin_unlock_irqrestore(&h->lock, flags);
8396 }
8397 
hpsa_monitor_ctlr_worker(struct work_struct * work)8398 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8399 {
8400 	unsigned long flags;
8401 	struct ctlr_info *h = container_of(to_delayed_work(work),
8402 					struct ctlr_info, monitor_ctlr_work);
8403 
8404 	detect_controller_lockup(h);
8405 	if (lockup_detected(h))
8406 		return;
8407 
8408 	spin_lock_irqsave(&h->lock, flags);
8409 	if (!h->remove_in_progress)
8410 		schedule_delayed_work(&h->monitor_ctlr_work,
8411 				h->heartbeat_sample_interval);
8412 	spin_unlock_irqrestore(&h->lock, flags);
8413 }
8414 
hpsa_create_controller_wq(struct ctlr_info * h,char * name)8415 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8416 						char *name)
8417 {
8418 	struct workqueue_struct *wq = NULL;
8419 
8420 	wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8421 	if (!wq)
8422 		dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8423 
8424 	return wq;
8425 }
8426 
hpsa_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8427 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8428 {
8429 	int dac, rc;
8430 	struct ctlr_info *h;
8431 	int try_soft_reset = 0;
8432 	unsigned long flags;
8433 	u32 board_id;
8434 
8435 	if (number_of_controllers == 0)
8436 		printk(KERN_INFO DRIVER_NAME "\n");
8437 
8438 	rc = hpsa_lookup_board_id(pdev, &board_id);
8439 	if (rc < 0) {
8440 		dev_warn(&pdev->dev, "Board ID not found\n");
8441 		return rc;
8442 	}
8443 
8444 	rc = hpsa_init_reset_devices(pdev, board_id);
8445 	if (rc) {
8446 		if (rc != -ENOTSUPP)
8447 			return rc;
8448 		/* If the reset fails in a particular way (it has no way to do
8449 		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8450 		 * a soft reset once we get the controller configured up to the
8451 		 * point that it can accept a command.
8452 		 */
8453 		try_soft_reset = 1;
8454 		rc = 0;
8455 	}
8456 
8457 reinit_after_soft_reset:
8458 
8459 	/* Command structures must be aligned on a 32-byte boundary because
8460 	 * the 5 lower bits of the address are used by the hardware. and by
8461 	 * the driver.  See comments in hpsa.h for more info.
8462 	 */
8463 	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8464 	h = kzalloc(sizeof(*h), GFP_KERNEL);
8465 	if (!h) {
8466 		dev_err(&pdev->dev, "Failed to allocate controller head\n");
8467 		return -ENOMEM;
8468 	}
8469 
8470 	h->pdev = pdev;
8471 
8472 	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8473 	INIT_LIST_HEAD(&h->offline_device_list);
8474 	spin_lock_init(&h->lock);
8475 	spin_lock_init(&h->offline_device_lock);
8476 	spin_lock_init(&h->scan_lock);
8477 	atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8478 	atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8479 
8480 	/* Allocate and clear per-cpu variable lockup_detected */
8481 	h->lockup_detected = alloc_percpu(u32);
8482 	if (!h->lockup_detected) {
8483 		dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8484 		rc = -ENOMEM;
8485 		goto clean1;	/* aer/h */
8486 	}
8487 	set_lockup_detected_for_all_cpus(h, 0);
8488 
8489 	rc = hpsa_pci_init(h);
8490 	if (rc)
8491 		goto clean2;	/* lu, aer/h */
8492 
8493 	/* relies on h-> settings made by hpsa_pci_init, including
8494 	 * interrupt_mode h->intr */
8495 	rc = hpsa_scsi_host_alloc(h);
8496 	if (rc)
8497 		goto clean2_5;	/* pci, lu, aer/h */
8498 
8499 	sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8500 	h->ctlr = number_of_controllers;
8501 	number_of_controllers++;
8502 
8503 	/* configure PCI DMA stuff */
8504 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8505 	if (rc == 0) {
8506 		dac = 1;
8507 	} else {
8508 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8509 		if (rc == 0) {
8510 			dac = 0;
8511 		} else {
8512 			dev_err(&pdev->dev, "no suitable DMA available\n");
8513 			goto clean3;	/* shost, pci, lu, aer/h */
8514 		}
8515 	}
8516 
8517 	/* make sure the board interrupts are off */
8518 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8519 
8520 	rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8521 	if (rc)
8522 		goto clean3;	/* shost, pci, lu, aer/h */
8523 	rc = hpsa_alloc_cmd_pool(h);
8524 	if (rc)
8525 		goto clean4;	/* irq, shost, pci, lu, aer/h */
8526 	rc = hpsa_alloc_sg_chain_blocks(h);
8527 	if (rc)
8528 		goto clean5;	/* cmd, irq, shost, pci, lu, aer/h */
8529 	init_waitqueue_head(&h->scan_wait_queue);
8530 	init_waitqueue_head(&h->abort_cmd_wait_queue);
8531 	init_waitqueue_head(&h->event_sync_wait_queue);
8532 	mutex_init(&h->reset_mutex);
8533 	h->scan_finished = 1; /* no scan currently in progress */
8534 	h->scan_waiting = 0;
8535 
8536 	pci_set_drvdata(pdev, h);
8537 	h->ndevices = 0;
8538 
8539 	spin_lock_init(&h->devlock);
8540 	rc = hpsa_put_ctlr_into_performant_mode(h);
8541 	if (rc)
8542 		goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8543 
8544 	/* hook into SCSI subsystem */
8545 	rc = hpsa_scsi_add_host(h);
8546 	if (rc)
8547 		goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8548 
8549 	/* create the resubmit workqueue */
8550 	h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8551 	if (!h->rescan_ctlr_wq) {
8552 		rc = -ENOMEM;
8553 		goto clean7;
8554 	}
8555 
8556 	h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8557 	if (!h->resubmit_wq) {
8558 		rc = -ENOMEM;
8559 		goto clean7;	/* aer/h */
8560 	}
8561 
8562 	/*
8563 	 * At this point, the controller is ready to take commands.
8564 	 * Now, if reset_devices and the hard reset didn't work, try
8565 	 * the soft reset and see if that works.
8566 	 */
8567 	if (try_soft_reset) {
8568 
8569 		/* This is kind of gross.  We may or may not get a completion
8570 		 * from the soft reset command, and if we do, then the value
8571 		 * from the fifo may or may not be valid.  So, we wait 10 secs
8572 		 * after the reset throwing away any completions we get during
8573 		 * that time.  Unregister the interrupt handler and register
8574 		 * fake ones to scoop up any residual completions.
8575 		 */
8576 		spin_lock_irqsave(&h->lock, flags);
8577 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
8578 		spin_unlock_irqrestore(&h->lock, flags);
8579 		hpsa_free_irqs(h);
8580 		rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8581 					hpsa_intx_discard_completions);
8582 		if (rc) {
8583 			dev_warn(&h->pdev->dev,
8584 				"Failed to request_irq after soft reset.\n");
8585 			/*
8586 			 * cannot goto clean7 or free_irqs will be called
8587 			 * again. Instead, do its work
8588 			 */
8589 			hpsa_free_performant_mode(h);	/* clean7 */
8590 			hpsa_free_sg_chain_blocks(h);	/* clean6 */
8591 			hpsa_free_cmd_pool(h);		/* clean5 */
8592 			/*
8593 			 * skip hpsa_free_irqs(h) clean4 since that
8594 			 * was just called before request_irqs failed
8595 			 */
8596 			goto clean3;
8597 		}
8598 
8599 		rc = hpsa_kdump_soft_reset(h);
8600 		if (rc)
8601 			/* Neither hard nor soft reset worked, we're hosed. */
8602 			goto clean7;
8603 
8604 		dev_info(&h->pdev->dev, "Board READY.\n");
8605 		dev_info(&h->pdev->dev,
8606 			"Waiting for stale completions to drain.\n");
8607 		h->access.set_intr_mask(h, HPSA_INTR_ON);
8608 		msleep(10000);
8609 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
8610 
8611 		rc = controller_reset_failed(h->cfgtable);
8612 		if (rc)
8613 			dev_info(&h->pdev->dev,
8614 				"Soft reset appears to have failed.\n");
8615 
8616 		/* since the controller's reset, we have to go back and re-init
8617 		 * everything.  Easiest to just forget what we've done and do it
8618 		 * all over again.
8619 		 */
8620 		hpsa_undo_allocations_after_kdump_soft_reset(h);
8621 		try_soft_reset = 0;
8622 		if (rc)
8623 			/* don't goto clean, we already unallocated */
8624 			return -ENODEV;
8625 
8626 		goto reinit_after_soft_reset;
8627 	}
8628 
8629 	/* Enable Accelerated IO path at driver layer */
8630 	h->acciopath_status = 1;
8631 	/* Disable discovery polling.*/
8632 	h->discovery_polling = 0;
8633 
8634 
8635 	/* Turn the interrupts on so we can service requests */
8636 	h->access.set_intr_mask(h, HPSA_INTR_ON);
8637 
8638 	hpsa_hba_inquiry(h);
8639 
8640 	h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8641 	if (!h->lastlogicals)
8642 		dev_info(&h->pdev->dev,
8643 			"Can't track change to report lun data\n");
8644 
8645 	/* Monitor the controller for firmware lockups */
8646 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8647 	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8648 	schedule_delayed_work(&h->monitor_ctlr_work,
8649 				h->heartbeat_sample_interval);
8650 	INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8651 	queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8652 				h->heartbeat_sample_interval);
8653 	return 0;
8654 
8655 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8656 	hpsa_free_performant_mode(h);
8657 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8658 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8659 	hpsa_free_sg_chain_blocks(h);
8660 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8661 	hpsa_free_cmd_pool(h);
8662 clean4: /* irq, shost, pci, lu, aer/h */
8663 	hpsa_free_irqs(h);
8664 clean3: /* shost, pci, lu, aer/h */
8665 	scsi_host_put(h->scsi_host);
8666 	h->scsi_host = NULL;
8667 clean2_5: /* pci, lu, aer/h */
8668 	hpsa_free_pci_init(h);
8669 clean2: /* lu, aer/h */
8670 	if (h->lockup_detected) {
8671 		free_percpu(h->lockup_detected);
8672 		h->lockup_detected = NULL;
8673 	}
8674 clean1:	/* wq/aer/h */
8675 	if (h->resubmit_wq) {
8676 		destroy_workqueue(h->resubmit_wq);
8677 		h->resubmit_wq = NULL;
8678 	}
8679 	if (h->rescan_ctlr_wq) {
8680 		destroy_workqueue(h->rescan_ctlr_wq);
8681 		h->rescan_ctlr_wq = NULL;
8682 	}
8683 	kfree(h);
8684 	return rc;
8685 }
8686 
hpsa_flush_cache(struct ctlr_info * h)8687 static void hpsa_flush_cache(struct ctlr_info *h)
8688 {
8689 	char *flush_buf;
8690 	struct CommandList *c;
8691 	int rc;
8692 
8693 	if (unlikely(lockup_detected(h)))
8694 		return;
8695 	flush_buf = kzalloc(4, GFP_KERNEL);
8696 	if (!flush_buf)
8697 		return;
8698 
8699 	c = cmd_alloc(h);
8700 
8701 	if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8702 		RAID_CTLR_LUNID, TYPE_CMD)) {
8703 		goto out;
8704 	}
8705 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8706 					PCI_DMA_TODEVICE, NO_TIMEOUT);
8707 	if (rc)
8708 		goto out;
8709 	if (c->err_info->CommandStatus != 0)
8710 out:
8711 		dev_warn(&h->pdev->dev,
8712 			"error flushing cache on controller\n");
8713 	cmd_free(h, c);
8714 	kfree(flush_buf);
8715 }
8716 
8717 /* Make controller gather fresh report lun data each time we
8718  * send down a report luns request
8719  */
hpsa_disable_rld_caching(struct ctlr_info * h)8720 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8721 {
8722 	u32 *options;
8723 	struct CommandList *c;
8724 	int rc;
8725 
8726 	/* Don't bother trying to set diag options if locked up */
8727 	if (unlikely(h->lockup_detected))
8728 		return;
8729 
8730 	options = kzalloc(sizeof(*options), GFP_KERNEL);
8731 	if (!options) {
8732 		dev_err(&h->pdev->dev,
8733 			"Error: failed to disable rld caching, during alloc.\n");
8734 		return;
8735 	}
8736 
8737 	c = cmd_alloc(h);
8738 
8739 	/* first, get the current diag options settings */
8740 	if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8741 		RAID_CTLR_LUNID, TYPE_CMD))
8742 		goto errout;
8743 
8744 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8745 		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8746 	if ((rc != 0) || (c->err_info->CommandStatus != 0))
8747 		goto errout;
8748 
8749 	/* Now, set the bit for disabling the RLD caching */
8750 	*options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
8751 
8752 	if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
8753 		RAID_CTLR_LUNID, TYPE_CMD))
8754 		goto errout;
8755 
8756 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8757 		PCI_DMA_TODEVICE, NO_TIMEOUT);
8758 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
8759 		goto errout;
8760 
8761 	/* Now verify that it got set: */
8762 	if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
8763 		RAID_CTLR_LUNID, TYPE_CMD))
8764 		goto errout;
8765 
8766 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8767 		PCI_DMA_FROMDEVICE, NO_TIMEOUT);
8768 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
8769 		goto errout;
8770 
8771 	if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8772 		goto out;
8773 
8774 errout:
8775 	dev_err(&h->pdev->dev,
8776 			"Error: failed to disable report lun data caching.\n");
8777 out:
8778 	cmd_free(h, c);
8779 	kfree(options);
8780 }
8781 
hpsa_shutdown(struct pci_dev * pdev)8782 static void hpsa_shutdown(struct pci_dev *pdev)
8783 {
8784 	struct ctlr_info *h;
8785 
8786 	h = pci_get_drvdata(pdev);
8787 	/* Turn board interrupts off  and send the flush cache command
8788 	 * sendcmd will turn off interrupt, and send the flush...
8789 	 * To write all data in the battery backed cache to disks
8790 	 */
8791 	hpsa_flush_cache(h);
8792 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8793 	hpsa_free_irqs(h);			/* init_one 4 */
8794 	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
8795 }
8796 
hpsa_free_device_info(struct ctlr_info * h)8797 static void hpsa_free_device_info(struct ctlr_info *h)
8798 {
8799 	int i;
8800 
8801 	for (i = 0; i < h->ndevices; i++) {
8802 		kfree(h->dev[i]);
8803 		h->dev[i] = NULL;
8804 	}
8805 }
8806 
hpsa_remove_one(struct pci_dev * pdev)8807 static void hpsa_remove_one(struct pci_dev *pdev)
8808 {
8809 	struct ctlr_info *h;
8810 	unsigned long flags;
8811 
8812 	if (pci_get_drvdata(pdev) == NULL) {
8813 		dev_err(&pdev->dev, "unable to remove device\n");
8814 		return;
8815 	}
8816 	h = pci_get_drvdata(pdev);
8817 
8818 	/* Get rid of any controller monitoring work items */
8819 	spin_lock_irqsave(&h->lock, flags);
8820 	h->remove_in_progress = 1;
8821 	spin_unlock_irqrestore(&h->lock, flags);
8822 	cancel_delayed_work_sync(&h->monitor_ctlr_work);
8823 	cancel_delayed_work_sync(&h->rescan_ctlr_work);
8824 	destroy_workqueue(h->rescan_ctlr_wq);
8825 	destroy_workqueue(h->resubmit_wq);
8826 
8827 	hpsa_delete_sas_host(h);
8828 
8829 	/*
8830 	 * Call before disabling interrupts.
8831 	 * scsi_remove_host can trigger I/O operations especially
8832 	 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8833 	 * operations which cannot complete and will hang the system.
8834 	 */
8835 	if (h->scsi_host)
8836 		scsi_remove_host(h->scsi_host);		/* init_one 8 */
8837 	/* includes hpsa_free_irqs - init_one 4 */
8838 	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
8839 	hpsa_shutdown(pdev);
8840 
8841 	hpsa_free_device_info(h);		/* scan */
8842 
8843 	kfree(h->hba_inquiry_data);			/* init_one 10 */
8844 	h->hba_inquiry_data = NULL;			/* init_one 10 */
8845 	hpsa_free_ioaccel2_sg_chain_blocks(h);
8846 	hpsa_free_performant_mode(h);			/* init_one 7 */
8847 	hpsa_free_sg_chain_blocks(h);			/* init_one 6 */
8848 	hpsa_free_cmd_pool(h);				/* init_one 5 */
8849 	kfree(h->lastlogicals);
8850 
8851 	/* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8852 
8853 	scsi_host_put(h->scsi_host);			/* init_one 3 */
8854 	h->scsi_host = NULL;				/* init_one 3 */
8855 
8856 	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
8857 	hpsa_free_pci_init(h);				/* init_one 2.5 */
8858 
8859 	free_percpu(h->lockup_detected);		/* init_one 2 */
8860 	h->lockup_detected = NULL;			/* init_one 2 */
8861 	/* (void) pci_disable_pcie_error_reporting(pdev); */	/* init_one 1 */
8862 
8863 	kfree(h);					/* init_one 1 */
8864 }
8865 
hpsa_suspend(struct pci_dev * pdev,pm_message_t state)8866 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8867 	__attribute__((unused)) pm_message_t state)
8868 {
8869 	return -ENOSYS;
8870 }
8871 
hpsa_resume(struct pci_dev * pdev)8872 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8873 {
8874 	return -ENOSYS;
8875 }
8876 
8877 static struct pci_driver hpsa_pci_driver = {
8878 	.name = HPSA,
8879 	.probe = hpsa_init_one,
8880 	.remove = hpsa_remove_one,
8881 	.id_table = hpsa_pci_device_id,	/* id_table */
8882 	.shutdown = hpsa_shutdown,
8883 	.suspend = hpsa_suspend,
8884 	.resume = hpsa_resume,
8885 };
8886 
8887 /* Fill in bucket_map[], given nsgs (the max number of
8888  * scatter gather elements supported) and bucket[],
8889  * which is an array of 8 integers.  The bucket[] array
8890  * contains 8 different DMA transfer sizes (in 16
8891  * byte increments) which the controller uses to fetch
8892  * commands.  This function fills in bucket_map[], which
8893  * maps a given number of scatter gather elements to one of
8894  * the 8 DMA transfer sizes.  The point of it is to allow the
8895  * controller to only do as much DMA as needed to fetch the
8896  * command, with the DMA transfer size encoded in the lower
8897  * bits of the command address.
8898  */
calc_bucket_map(int bucket[],int num_buckets,int nsgs,int min_blocks,u32 * bucket_map)8899 static void  calc_bucket_map(int bucket[], int num_buckets,
8900 	int nsgs, int min_blocks, u32 *bucket_map)
8901 {
8902 	int i, j, b, size;
8903 
8904 	/* Note, bucket_map must have nsgs+1 entries. */
8905 	for (i = 0; i <= nsgs; i++) {
8906 		/* Compute size of a command with i SG entries */
8907 		size = i + min_blocks;
8908 		b = num_buckets; /* Assume the biggest bucket */
8909 		/* Find the bucket that is just big enough */
8910 		for (j = 0; j < num_buckets; j++) {
8911 			if (bucket[j] >= size) {
8912 				b = j;
8913 				break;
8914 			}
8915 		}
8916 		/* for a command with i SG entries, use bucket b. */
8917 		bucket_map[i] = b;
8918 	}
8919 }
8920 
8921 /*
8922  * return -ENODEV on err, 0 on success (or no action)
8923  * allocates numerous items that must be freed later
8924  */
hpsa_enter_performant_mode(struct ctlr_info * h,u32 trans_support)8925 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8926 {
8927 	int i;
8928 	unsigned long register_value;
8929 	unsigned long transMethod = CFGTBL_Trans_Performant |
8930 			(trans_support & CFGTBL_Trans_use_short_tags) |
8931 				CFGTBL_Trans_enable_directed_msix |
8932 			(trans_support & (CFGTBL_Trans_io_accel1 |
8933 				CFGTBL_Trans_io_accel2));
8934 	struct access_method access = SA5_performant_access;
8935 
8936 	/* This is a bit complicated.  There are 8 registers on
8937 	 * the controller which we write to to tell it 8 different
8938 	 * sizes of commands which there may be.  It's a way of
8939 	 * reducing the DMA done to fetch each command.  Encoded into
8940 	 * each command's tag are 3 bits which communicate to the controller
8941 	 * which of the eight sizes that command fits within.  The size of
8942 	 * each command depends on how many scatter gather entries there are.
8943 	 * Each SG entry requires 16 bytes.  The eight registers are programmed
8944 	 * with the number of 16-byte blocks a command of that size requires.
8945 	 * The smallest command possible requires 5 such 16 byte blocks.
8946 	 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8947 	 * blocks.  Note, this only extends to the SG entries contained
8948 	 * within the command block, and does not extend to chained blocks
8949 	 * of SG elements.   bft[] contains the eight values we write to
8950 	 * the registers.  They are not evenly distributed, but have more
8951 	 * sizes for small commands, and fewer sizes for larger commands.
8952 	 */
8953 	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8954 #define MIN_IOACCEL2_BFT_ENTRY 5
8955 #define HPSA_IOACCEL2_HEADER_SZ 4
8956 	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8957 			13, 14, 15, 16, 17, 18, 19,
8958 			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8959 	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8960 	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8961 	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8962 				 16 * MIN_IOACCEL2_BFT_ENTRY);
8963 	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8964 	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8965 	/*  5 = 1 s/g entry or 4k
8966 	 *  6 = 2 s/g entry or 8k
8967 	 *  8 = 4 s/g entry or 16k
8968 	 * 10 = 6 s/g entry or 24k
8969 	 */
8970 
8971 	/* If the controller supports either ioaccel method then
8972 	 * we can also use the RAID stack submit path that does not
8973 	 * perform the superfluous readl() after each command submission.
8974 	 */
8975 	if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8976 		access = SA5_performant_access_no_read;
8977 
8978 	/* Controller spec: zero out this buffer. */
8979 	for (i = 0; i < h->nreply_queues; i++)
8980 		memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8981 
8982 	bft[7] = SG_ENTRIES_IN_CMD + 4;
8983 	calc_bucket_map(bft, ARRAY_SIZE(bft),
8984 				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8985 	for (i = 0; i < 8; i++)
8986 		writel(bft[i], &h->transtable->BlockFetch[i]);
8987 
8988 	/* size of controller ring buffer */
8989 	writel(h->max_commands, &h->transtable->RepQSize);
8990 	writel(h->nreply_queues, &h->transtable->RepQCount);
8991 	writel(0, &h->transtable->RepQCtrAddrLow32);
8992 	writel(0, &h->transtable->RepQCtrAddrHigh32);
8993 
8994 	for (i = 0; i < h->nreply_queues; i++) {
8995 		writel(0, &h->transtable->RepQAddr[i].upper);
8996 		writel(h->reply_queue[i].busaddr,
8997 			&h->transtable->RepQAddr[i].lower);
8998 	}
8999 
9000 	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9001 	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9002 	/*
9003 	 * enable outbound interrupt coalescing in accelerator mode;
9004 	 */
9005 	if (trans_support & CFGTBL_Trans_io_accel1) {
9006 		access = SA5_ioaccel_mode1_access;
9007 		writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9008 		writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9009 	} else {
9010 		if (trans_support & CFGTBL_Trans_io_accel2) {
9011 			access = SA5_ioaccel_mode2_access;
9012 			writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9013 			writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9014 		}
9015 	}
9016 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9017 	if (hpsa_wait_for_mode_change_ack(h)) {
9018 		dev_err(&h->pdev->dev,
9019 			"performant mode problem - doorbell timeout\n");
9020 		return -ENODEV;
9021 	}
9022 	register_value = readl(&(h->cfgtable->TransportActive));
9023 	if (!(register_value & CFGTBL_Trans_Performant)) {
9024 		dev_err(&h->pdev->dev,
9025 			"performant mode problem - transport not active\n");
9026 		return -ENODEV;
9027 	}
9028 	/* Change the access methods to the performant access methods */
9029 	h->access = access;
9030 	h->transMethod = transMethod;
9031 
9032 	if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9033 		(trans_support & CFGTBL_Trans_io_accel2)))
9034 		return 0;
9035 
9036 	if (trans_support & CFGTBL_Trans_io_accel1) {
9037 		/* Set up I/O accelerator mode */
9038 		for (i = 0; i < h->nreply_queues; i++) {
9039 			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9040 			h->reply_queue[i].current_entry =
9041 				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9042 		}
9043 		bft[7] = h->ioaccel_maxsg + 8;
9044 		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9045 				h->ioaccel1_blockFetchTable);
9046 
9047 		/* initialize all reply queue entries to unused */
9048 		for (i = 0; i < h->nreply_queues; i++)
9049 			memset(h->reply_queue[i].head,
9050 				(u8) IOACCEL_MODE1_REPLY_UNUSED,
9051 				h->reply_queue_size);
9052 
9053 		/* set all the constant fields in the accelerator command
9054 		 * frames once at init time to save CPU cycles later.
9055 		 */
9056 		for (i = 0; i < h->nr_cmds; i++) {
9057 			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9058 
9059 			cp->function = IOACCEL1_FUNCTION_SCSIIO;
9060 			cp->err_info = (u32) (h->errinfo_pool_dhandle +
9061 					(i * sizeof(struct ErrorInfo)));
9062 			cp->err_info_len = sizeof(struct ErrorInfo);
9063 			cp->sgl_offset = IOACCEL1_SGLOFFSET;
9064 			cp->host_context_flags =
9065 				cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9066 			cp->timeout_sec = 0;
9067 			cp->ReplyQueue = 0;
9068 			cp->tag =
9069 				cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9070 			cp->host_addr =
9071 				cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9072 					(i * sizeof(struct io_accel1_cmd)));
9073 		}
9074 	} else if (trans_support & CFGTBL_Trans_io_accel2) {
9075 		u64 cfg_offset, cfg_base_addr_index;
9076 		u32 bft2_offset, cfg_base_addr;
9077 		int rc;
9078 
9079 		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9080 			&cfg_base_addr_index, &cfg_offset);
9081 		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9082 		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9083 		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9084 				4, h->ioaccel2_blockFetchTable);
9085 		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9086 		BUILD_BUG_ON(offsetof(struct CfgTable,
9087 				io_accel_request_size_offset) != 0xb8);
9088 		h->ioaccel2_bft2_regs =
9089 			remap_pci_mem(pci_resource_start(h->pdev,
9090 					cfg_base_addr_index) +
9091 					cfg_offset + bft2_offset,
9092 					ARRAY_SIZE(bft2) *
9093 					sizeof(*h->ioaccel2_bft2_regs));
9094 		for (i = 0; i < ARRAY_SIZE(bft2); i++)
9095 			writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9096 	}
9097 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9098 	if (hpsa_wait_for_mode_change_ack(h)) {
9099 		dev_err(&h->pdev->dev,
9100 			"performant mode problem - enabling ioaccel mode\n");
9101 		return -ENODEV;
9102 	}
9103 	return 0;
9104 }
9105 
9106 /* Free ioaccel1 mode command blocks and block fetch table */
hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info * h)9107 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9108 {
9109 	if (h->ioaccel_cmd_pool) {
9110 		pci_free_consistent(h->pdev,
9111 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9112 			h->ioaccel_cmd_pool,
9113 			h->ioaccel_cmd_pool_dhandle);
9114 		h->ioaccel_cmd_pool = NULL;
9115 		h->ioaccel_cmd_pool_dhandle = 0;
9116 	}
9117 	kfree(h->ioaccel1_blockFetchTable);
9118 	h->ioaccel1_blockFetchTable = NULL;
9119 }
9120 
9121 /* Allocate ioaccel1 mode command blocks and block fetch table */
hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info * h)9122 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9123 {
9124 	h->ioaccel_maxsg =
9125 		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9126 	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9127 		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9128 
9129 	/* Command structures must be aligned on a 128-byte boundary
9130 	 * because the 7 lower bits of the address are used by the
9131 	 * hardware.
9132 	 */
9133 	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9134 			IOACCEL1_COMMANDLIST_ALIGNMENT);
9135 	h->ioaccel_cmd_pool =
9136 		pci_alloc_consistent(h->pdev,
9137 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9138 			&(h->ioaccel_cmd_pool_dhandle));
9139 
9140 	h->ioaccel1_blockFetchTable =
9141 		kmalloc(((h->ioaccel_maxsg + 1) *
9142 				sizeof(u32)), GFP_KERNEL);
9143 
9144 	if ((h->ioaccel_cmd_pool == NULL) ||
9145 		(h->ioaccel1_blockFetchTable == NULL))
9146 		goto clean_up;
9147 
9148 	memset(h->ioaccel_cmd_pool, 0,
9149 		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9150 	return 0;
9151 
9152 clean_up:
9153 	hpsa_free_ioaccel1_cmd_and_bft(h);
9154 	return -ENOMEM;
9155 }
9156 
9157 /* Free ioaccel2 mode command blocks and block fetch table */
hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info * h)9158 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9159 {
9160 	hpsa_free_ioaccel2_sg_chain_blocks(h);
9161 
9162 	if (h->ioaccel2_cmd_pool) {
9163 		pci_free_consistent(h->pdev,
9164 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9165 			h->ioaccel2_cmd_pool,
9166 			h->ioaccel2_cmd_pool_dhandle);
9167 		h->ioaccel2_cmd_pool = NULL;
9168 		h->ioaccel2_cmd_pool_dhandle = 0;
9169 	}
9170 	kfree(h->ioaccel2_blockFetchTable);
9171 	h->ioaccel2_blockFetchTable = NULL;
9172 }
9173 
9174 /* Allocate ioaccel2 mode command blocks and block fetch table */
hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info * h)9175 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9176 {
9177 	int rc;
9178 
9179 	/* Allocate ioaccel2 mode command blocks and block fetch table */
9180 
9181 	h->ioaccel_maxsg =
9182 		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9183 	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9184 		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9185 
9186 	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9187 			IOACCEL2_COMMANDLIST_ALIGNMENT);
9188 	h->ioaccel2_cmd_pool =
9189 		pci_alloc_consistent(h->pdev,
9190 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9191 			&(h->ioaccel2_cmd_pool_dhandle));
9192 
9193 	h->ioaccel2_blockFetchTable =
9194 		kmalloc(((h->ioaccel_maxsg + 1) *
9195 				sizeof(u32)), GFP_KERNEL);
9196 
9197 	if ((h->ioaccel2_cmd_pool == NULL) ||
9198 		(h->ioaccel2_blockFetchTable == NULL)) {
9199 		rc = -ENOMEM;
9200 		goto clean_up;
9201 	}
9202 
9203 	rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9204 	if (rc)
9205 		goto clean_up;
9206 
9207 	memset(h->ioaccel2_cmd_pool, 0,
9208 		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9209 	return 0;
9210 
9211 clean_up:
9212 	hpsa_free_ioaccel2_cmd_and_bft(h);
9213 	return rc;
9214 }
9215 
9216 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
hpsa_free_performant_mode(struct ctlr_info * h)9217 static void hpsa_free_performant_mode(struct ctlr_info *h)
9218 {
9219 	kfree(h->blockFetchTable);
9220 	h->blockFetchTable = NULL;
9221 	hpsa_free_reply_queues(h);
9222 	hpsa_free_ioaccel1_cmd_and_bft(h);
9223 	hpsa_free_ioaccel2_cmd_and_bft(h);
9224 }
9225 
9226 /* return -ENODEV on error, 0 on success (or no action)
9227  * allocates numerous items that must be freed later
9228  */
hpsa_put_ctlr_into_performant_mode(struct ctlr_info * h)9229 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9230 {
9231 	u32 trans_support;
9232 	unsigned long transMethod = CFGTBL_Trans_Performant |
9233 					CFGTBL_Trans_use_short_tags;
9234 	int i, rc;
9235 
9236 	if (hpsa_simple_mode)
9237 		return 0;
9238 
9239 	trans_support = readl(&(h->cfgtable->TransportSupport));
9240 	if (!(trans_support & PERFORMANT_MODE))
9241 		return 0;
9242 
9243 	/* Check for I/O accelerator mode support */
9244 	if (trans_support & CFGTBL_Trans_io_accel1) {
9245 		transMethod |= CFGTBL_Trans_io_accel1 |
9246 				CFGTBL_Trans_enable_directed_msix;
9247 		rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9248 		if (rc)
9249 			return rc;
9250 	} else if (trans_support & CFGTBL_Trans_io_accel2) {
9251 		transMethod |= CFGTBL_Trans_io_accel2 |
9252 				CFGTBL_Trans_enable_directed_msix;
9253 		rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9254 		if (rc)
9255 			return rc;
9256 	}
9257 
9258 	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
9259 	hpsa_get_max_perf_mode_cmds(h);
9260 	/* Performant mode ring buffer and supporting data structures */
9261 	h->reply_queue_size = h->max_commands * sizeof(u64);
9262 
9263 	for (i = 0; i < h->nreply_queues; i++) {
9264 		h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9265 						h->reply_queue_size,
9266 						&(h->reply_queue[i].busaddr));
9267 		if (!h->reply_queue[i].head) {
9268 			rc = -ENOMEM;
9269 			goto clean1;	/* rq, ioaccel */
9270 		}
9271 		h->reply_queue[i].size = h->max_commands;
9272 		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
9273 		h->reply_queue[i].current_entry = 0;
9274 	}
9275 
9276 	/* Need a block fetch table for performant mode */
9277 	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9278 				sizeof(u32)), GFP_KERNEL);
9279 	if (!h->blockFetchTable) {
9280 		rc = -ENOMEM;
9281 		goto clean1;	/* rq, ioaccel */
9282 	}
9283 
9284 	rc = hpsa_enter_performant_mode(h, trans_support);
9285 	if (rc)
9286 		goto clean2;	/* bft, rq, ioaccel */
9287 	return 0;
9288 
9289 clean2:	/* bft, rq, ioaccel */
9290 	kfree(h->blockFetchTable);
9291 	h->blockFetchTable = NULL;
9292 clean1:	/* rq, ioaccel */
9293 	hpsa_free_reply_queues(h);
9294 	hpsa_free_ioaccel1_cmd_and_bft(h);
9295 	hpsa_free_ioaccel2_cmd_and_bft(h);
9296 	return rc;
9297 }
9298 
is_accelerated_cmd(struct CommandList * c)9299 static int is_accelerated_cmd(struct CommandList *c)
9300 {
9301 	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9302 }
9303 
hpsa_drain_accel_commands(struct ctlr_info * h)9304 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9305 {
9306 	struct CommandList *c = NULL;
9307 	int i, accel_cmds_out;
9308 	int refcount;
9309 
9310 	do { /* wait for all outstanding ioaccel commands to drain out */
9311 		accel_cmds_out = 0;
9312 		for (i = 0; i < h->nr_cmds; i++) {
9313 			c = h->cmd_pool + i;
9314 			refcount = atomic_inc_return(&c->refcount);
9315 			if (refcount > 1) /* Command is allocated */
9316 				accel_cmds_out += is_accelerated_cmd(c);
9317 			cmd_free(h, c);
9318 		}
9319 		if (accel_cmds_out <= 0)
9320 			break;
9321 		msleep(100);
9322 	} while (1);
9323 }
9324 
hpsa_alloc_sas_phy(struct hpsa_sas_port * hpsa_sas_port)9325 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9326 				struct hpsa_sas_port *hpsa_sas_port)
9327 {
9328 	struct hpsa_sas_phy *hpsa_sas_phy;
9329 	struct sas_phy *phy;
9330 
9331 	hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9332 	if (!hpsa_sas_phy)
9333 		return NULL;
9334 
9335 	phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9336 		hpsa_sas_port->next_phy_index);
9337 	if (!phy) {
9338 		kfree(hpsa_sas_phy);
9339 		return NULL;
9340 	}
9341 
9342 	hpsa_sas_port->next_phy_index++;
9343 	hpsa_sas_phy->phy = phy;
9344 	hpsa_sas_phy->parent_port = hpsa_sas_port;
9345 
9346 	return hpsa_sas_phy;
9347 }
9348 
hpsa_free_sas_phy(struct hpsa_sas_phy * hpsa_sas_phy)9349 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9350 {
9351 	struct sas_phy *phy = hpsa_sas_phy->phy;
9352 
9353 	sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9354 	if (hpsa_sas_phy->added_to_port)
9355 		list_del(&hpsa_sas_phy->phy_list_entry);
9356 	sas_phy_delete(phy);
9357 	kfree(hpsa_sas_phy);
9358 }
9359 
hpsa_sas_port_add_phy(struct hpsa_sas_phy * hpsa_sas_phy)9360 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9361 {
9362 	int rc;
9363 	struct hpsa_sas_port *hpsa_sas_port;
9364 	struct sas_phy *phy;
9365 	struct sas_identify *identify;
9366 
9367 	hpsa_sas_port = hpsa_sas_phy->parent_port;
9368 	phy = hpsa_sas_phy->phy;
9369 
9370 	identify = &phy->identify;
9371 	memset(identify, 0, sizeof(*identify));
9372 	identify->sas_address = hpsa_sas_port->sas_address;
9373 	identify->device_type = SAS_END_DEVICE;
9374 	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9375 	identify->target_port_protocols = SAS_PROTOCOL_STP;
9376 	phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9377 	phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9378 	phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9379 	phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9380 	phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9381 
9382 	rc = sas_phy_add(hpsa_sas_phy->phy);
9383 	if (rc)
9384 		return rc;
9385 
9386 	sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9387 	list_add_tail(&hpsa_sas_phy->phy_list_entry,
9388 			&hpsa_sas_port->phy_list_head);
9389 	hpsa_sas_phy->added_to_port = true;
9390 
9391 	return 0;
9392 }
9393 
9394 static int
hpsa_sas_port_add_rphy(struct hpsa_sas_port * hpsa_sas_port,struct sas_rphy * rphy)9395 	hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9396 				struct sas_rphy *rphy)
9397 {
9398 	struct sas_identify *identify;
9399 
9400 	identify = &rphy->identify;
9401 	identify->sas_address = hpsa_sas_port->sas_address;
9402 	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9403 	identify->target_port_protocols = SAS_PROTOCOL_STP;
9404 
9405 	return sas_rphy_add(rphy);
9406 }
9407 
9408 static struct hpsa_sas_port
hpsa_alloc_sas_port(struct hpsa_sas_node * hpsa_sas_node,u64 sas_address)9409 	*hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9410 				u64 sas_address)
9411 {
9412 	int rc;
9413 	struct hpsa_sas_port *hpsa_sas_port;
9414 	struct sas_port *port;
9415 
9416 	hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9417 	if (!hpsa_sas_port)
9418 		return NULL;
9419 
9420 	INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9421 	hpsa_sas_port->parent_node = hpsa_sas_node;
9422 
9423 	port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9424 	if (!port)
9425 		goto free_hpsa_port;
9426 
9427 	rc = sas_port_add(port);
9428 	if (rc)
9429 		goto free_sas_port;
9430 
9431 	hpsa_sas_port->port = port;
9432 	hpsa_sas_port->sas_address = sas_address;
9433 	list_add_tail(&hpsa_sas_port->port_list_entry,
9434 			&hpsa_sas_node->port_list_head);
9435 
9436 	return hpsa_sas_port;
9437 
9438 free_sas_port:
9439 	sas_port_free(port);
9440 free_hpsa_port:
9441 	kfree(hpsa_sas_port);
9442 
9443 	return NULL;
9444 }
9445 
hpsa_free_sas_port(struct hpsa_sas_port * hpsa_sas_port)9446 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9447 {
9448 	struct hpsa_sas_phy *hpsa_sas_phy;
9449 	struct hpsa_sas_phy *next;
9450 
9451 	list_for_each_entry_safe(hpsa_sas_phy, next,
9452 			&hpsa_sas_port->phy_list_head, phy_list_entry)
9453 		hpsa_free_sas_phy(hpsa_sas_phy);
9454 
9455 	sas_port_delete(hpsa_sas_port->port);
9456 	list_del(&hpsa_sas_port->port_list_entry);
9457 	kfree(hpsa_sas_port);
9458 }
9459 
hpsa_alloc_sas_node(struct device * parent_dev)9460 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9461 {
9462 	struct hpsa_sas_node *hpsa_sas_node;
9463 
9464 	hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9465 	if (hpsa_sas_node) {
9466 		hpsa_sas_node->parent_dev = parent_dev;
9467 		INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9468 	}
9469 
9470 	return hpsa_sas_node;
9471 }
9472 
hpsa_free_sas_node(struct hpsa_sas_node * hpsa_sas_node)9473 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9474 {
9475 	struct hpsa_sas_port *hpsa_sas_port;
9476 	struct hpsa_sas_port *next;
9477 
9478 	if (!hpsa_sas_node)
9479 		return;
9480 
9481 	list_for_each_entry_safe(hpsa_sas_port, next,
9482 			&hpsa_sas_node->port_list_head, port_list_entry)
9483 		hpsa_free_sas_port(hpsa_sas_port);
9484 
9485 	kfree(hpsa_sas_node);
9486 }
9487 
9488 static struct hpsa_scsi_dev_t
hpsa_find_device_by_sas_rphy(struct ctlr_info * h,struct sas_rphy * rphy)9489 	*hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9490 					struct sas_rphy *rphy)
9491 {
9492 	int i;
9493 	struct hpsa_scsi_dev_t *device;
9494 
9495 	for (i = 0; i < h->ndevices; i++) {
9496 		device = h->dev[i];
9497 		if (!device->sas_port)
9498 			continue;
9499 		if (device->sas_port->rphy == rphy)
9500 			return device;
9501 	}
9502 
9503 	return NULL;
9504 }
9505 
hpsa_add_sas_host(struct ctlr_info * h)9506 static int hpsa_add_sas_host(struct ctlr_info *h)
9507 {
9508 	int rc;
9509 	struct device *parent_dev;
9510 	struct hpsa_sas_node *hpsa_sas_node;
9511 	struct hpsa_sas_port *hpsa_sas_port;
9512 	struct hpsa_sas_phy *hpsa_sas_phy;
9513 
9514 	parent_dev = &h->scsi_host->shost_gendev;
9515 
9516 	hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9517 	if (!hpsa_sas_node)
9518 		return -ENOMEM;
9519 
9520 	hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9521 	if (!hpsa_sas_port) {
9522 		rc = -ENODEV;
9523 		goto free_sas_node;
9524 	}
9525 
9526 	hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9527 	if (!hpsa_sas_phy) {
9528 		rc = -ENODEV;
9529 		goto free_sas_port;
9530 	}
9531 
9532 	rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9533 	if (rc)
9534 		goto free_sas_phy;
9535 
9536 	h->sas_host = hpsa_sas_node;
9537 
9538 	return 0;
9539 
9540 free_sas_phy:
9541 	hpsa_free_sas_phy(hpsa_sas_phy);
9542 free_sas_port:
9543 	hpsa_free_sas_port(hpsa_sas_port);
9544 free_sas_node:
9545 	hpsa_free_sas_node(hpsa_sas_node);
9546 
9547 	return rc;
9548 }
9549 
hpsa_delete_sas_host(struct ctlr_info * h)9550 static void hpsa_delete_sas_host(struct ctlr_info *h)
9551 {
9552 	hpsa_free_sas_node(h->sas_host);
9553 }
9554 
hpsa_add_sas_device(struct hpsa_sas_node * hpsa_sas_node,struct hpsa_scsi_dev_t * device)9555 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9556 				struct hpsa_scsi_dev_t *device)
9557 {
9558 	int rc;
9559 	struct hpsa_sas_port *hpsa_sas_port;
9560 	struct sas_rphy *rphy;
9561 
9562 	hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9563 	if (!hpsa_sas_port)
9564 		return -ENOMEM;
9565 
9566 	rphy = sas_end_device_alloc(hpsa_sas_port->port);
9567 	if (!rphy) {
9568 		rc = -ENODEV;
9569 		goto free_sas_port;
9570 	}
9571 
9572 	hpsa_sas_port->rphy = rphy;
9573 	device->sas_port = hpsa_sas_port;
9574 
9575 	rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9576 	if (rc)
9577 		goto free_sas_port;
9578 
9579 	return 0;
9580 
9581 free_sas_port:
9582 	hpsa_free_sas_port(hpsa_sas_port);
9583 	device->sas_port = NULL;
9584 
9585 	return rc;
9586 }
9587 
hpsa_remove_sas_device(struct hpsa_scsi_dev_t * device)9588 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9589 {
9590 	if (device->sas_port) {
9591 		hpsa_free_sas_port(device->sas_port);
9592 		device->sas_port = NULL;
9593 	}
9594 }
9595 
9596 static int
hpsa_sas_get_linkerrors(struct sas_phy * phy)9597 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9598 {
9599 	return 0;
9600 }
9601 
9602 static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy * rphy,u64 * identifier)9603 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9604 {
9605 	return 0;
9606 }
9607 
9608 static int
hpsa_sas_get_bay_identifier(struct sas_rphy * rphy)9609 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9610 {
9611 	return -ENXIO;
9612 }
9613 
9614 static int
hpsa_sas_phy_reset(struct sas_phy * phy,int hard_reset)9615 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9616 {
9617 	return 0;
9618 }
9619 
9620 static int
hpsa_sas_phy_enable(struct sas_phy * phy,int enable)9621 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9622 {
9623 	return 0;
9624 }
9625 
9626 static int
hpsa_sas_phy_setup(struct sas_phy * phy)9627 hpsa_sas_phy_setup(struct sas_phy *phy)
9628 {
9629 	return 0;
9630 }
9631 
9632 static void
hpsa_sas_phy_release(struct sas_phy * phy)9633 hpsa_sas_phy_release(struct sas_phy *phy)
9634 {
9635 }
9636 
9637 static int
hpsa_sas_phy_speed(struct sas_phy * phy,struct sas_phy_linkrates * rates)9638 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9639 {
9640 	return -EINVAL;
9641 }
9642 
9643 /* SMP = Serial Management Protocol */
9644 static int
hpsa_sas_smp_handler(struct Scsi_Host * shost,struct sas_rphy * rphy,struct request * req)9645 hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9646 struct request *req)
9647 {
9648 	return -EINVAL;
9649 }
9650 
9651 static struct sas_function_template hpsa_sas_transport_functions = {
9652 	.get_linkerrors = hpsa_sas_get_linkerrors,
9653 	.get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9654 	.get_bay_identifier = hpsa_sas_get_bay_identifier,
9655 	.phy_reset = hpsa_sas_phy_reset,
9656 	.phy_enable = hpsa_sas_phy_enable,
9657 	.phy_setup = hpsa_sas_phy_setup,
9658 	.phy_release = hpsa_sas_phy_release,
9659 	.set_phy_speed = hpsa_sas_phy_speed,
9660 	.smp_handler = hpsa_sas_smp_handler,
9661 };
9662 
9663 /*
9664  *  This is it.  Register the PCI driver information for the cards we control
9665  *  the OS will call our registered routines when it finds one of our cards.
9666  */
hpsa_init(void)9667 static int __init hpsa_init(void)
9668 {
9669 	int rc;
9670 
9671 	hpsa_sas_transport_template =
9672 		sas_attach_transport(&hpsa_sas_transport_functions);
9673 	if (!hpsa_sas_transport_template)
9674 		return -ENODEV;
9675 
9676 	rc = pci_register_driver(&hpsa_pci_driver);
9677 
9678 	if (rc)
9679 		sas_release_transport(hpsa_sas_transport_template);
9680 
9681 	return rc;
9682 }
9683 
hpsa_cleanup(void)9684 static void __exit hpsa_cleanup(void)
9685 {
9686 	pci_unregister_driver(&hpsa_pci_driver);
9687 	sas_release_transport(hpsa_sas_transport_template);
9688 }
9689 
verify_offsets(void)9690 static void __attribute__((unused)) verify_offsets(void)
9691 {
9692 #define VERIFY_OFFSET(member, offset) \
9693 	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9694 
9695 	VERIFY_OFFSET(structure_size, 0);
9696 	VERIFY_OFFSET(volume_blk_size, 4);
9697 	VERIFY_OFFSET(volume_blk_cnt, 8);
9698 	VERIFY_OFFSET(phys_blk_shift, 16);
9699 	VERIFY_OFFSET(parity_rotation_shift, 17);
9700 	VERIFY_OFFSET(strip_size, 18);
9701 	VERIFY_OFFSET(disk_starting_blk, 20);
9702 	VERIFY_OFFSET(disk_blk_cnt, 28);
9703 	VERIFY_OFFSET(data_disks_per_row, 36);
9704 	VERIFY_OFFSET(metadata_disks_per_row, 38);
9705 	VERIFY_OFFSET(row_cnt, 40);
9706 	VERIFY_OFFSET(layout_map_count, 42);
9707 	VERIFY_OFFSET(flags, 44);
9708 	VERIFY_OFFSET(dekindex, 46);
9709 	/* VERIFY_OFFSET(reserved, 48 */
9710 	VERIFY_OFFSET(data, 64);
9711 
9712 #undef VERIFY_OFFSET
9713 
9714 #define VERIFY_OFFSET(member, offset) \
9715 	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9716 
9717 	VERIFY_OFFSET(IU_type, 0);
9718 	VERIFY_OFFSET(direction, 1);
9719 	VERIFY_OFFSET(reply_queue, 2);
9720 	/* VERIFY_OFFSET(reserved1, 3);  */
9721 	VERIFY_OFFSET(scsi_nexus, 4);
9722 	VERIFY_OFFSET(Tag, 8);
9723 	VERIFY_OFFSET(cdb, 16);
9724 	VERIFY_OFFSET(cciss_lun, 32);
9725 	VERIFY_OFFSET(data_len, 40);
9726 	VERIFY_OFFSET(cmd_priority_task_attr, 44);
9727 	VERIFY_OFFSET(sg_count, 45);
9728 	/* VERIFY_OFFSET(reserved3 */
9729 	VERIFY_OFFSET(err_ptr, 48);
9730 	VERIFY_OFFSET(err_len, 56);
9731 	/* VERIFY_OFFSET(reserved4  */
9732 	VERIFY_OFFSET(sg, 64);
9733 
9734 #undef VERIFY_OFFSET
9735 
9736 #define VERIFY_OFFSET(member, offset) \
9737 	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
9738 
9739 	VERIFY_OFFSET(dev_handle, 0x00);
9740 	VERIFY_OFFSET(reserved1, 0x02);
9741 	VERIFY_OFFSET(function, 0x03);
9742 	VERIFY_OFFSET(reserved2, 0x04);
9743 	VERIFY_OFFSET(err_info, 0x0C);
9744 	VERIFY_OFFSET(reserved3, 0x10);
9745 	VERIFY_OFFSET(err_info_len, 0x12);
9746 	VERIFY_OFFSET(reserved4, 0x13);
9747 	VERIFY_OFFSET(sgl_offset, 0x14);
9748 	VERIFY_OFFSET(reserved5, 0x15);
9749 	VERIFY_OFFSET(transfer_len, 0x1C);
9750 	VERIFY_OFFSET(reserved6, 0x20);
9751 	VERIFY_OFFSET(io_flags, 0x24);
9752 	VERIFY_OFFSET(reserved7, 0x26);
9753 	VERIFY_OFFSET(LUN, 0x34);
9754 	VERIFY_OFFSET(control, 0x3C);
9755 	VERIFY_OFFSET(CDB, 0x40);
9756 	VERIFY_OFFSET(reserved8, 0x50);
9757 	VERIFY_OFFSET(host_context_flags, 0x60);
9758 	VERIFY_OFFSET(timeout_sec, 0x62);
9759 	VERIFY_OFFSET(ReplyQueue, 0x64);
9760 	VERIFY_OFFSET(reserved9, 0x65);
9761 	VERIFY_OFFSET(tag, 0x68);
9762 	VERIFY_OFFSET(host_addr, 0x70);
9763 	VERIFY_OFFSET(CISS_LUN, 0x78);
9764 	VERIFY_OFFSET(SG, 0x78 + 8);
9765 #undef VERIFY_OFFSET
9766 }
9767 
9768 module_init(hpsa_init);
9769 module_exit(hpsa_cleanup);
9770