1 /*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 *
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17 *
18 */
19 #ifndef HPSA_H
20 #define HPSA_H
21
22 #include <scsi/scsicam.h>
23
24 #define IO_OK 0
25 #define IO_ERROR 1
26
27 struct ctlr_info;
28
29 struct access_method {
30 void (*submit_command)(struct ctlr_info *h,
31 struct CommandList *c);
32 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
33 bool (*intr_pending)(struct ctlr_info *h);
34 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
35 };
36
37 /* for SAS hosts and SAS expanders */
38 struct hpsa_sas_node {
39 struct device *parent_dev;
40 struct list_head port_list_head;
41 };
42
43 struct hpsa_sas_port {
44 struct list_head port_list_entry;
45 u64 sas_address;
46 struct sas_port *port;
47 int next_phy_index;
48 struct list_head phy_list_head;
49 struct hpsa_sas_node *parent_node;
50 struct sas_rphy *rphy;
51 };
52
53 struct hpsa_sas_phy {
54 struct list_head phy_list_entry;
55 struct sas_phy *phy;
56 struct hpsa_sas_port *parent_port;
57 bool added_to_port;
58 };
59
60 #define EXTERNAL_QD 7
61 struct hpsa_scsi_dev_t {
62 unsigned int devtype;
63 int bus, target, lun; /* as presented to the OS */
64 unsigned char scsi3addr[8]; /* as presented to the HW */
65 u8 physical_device : 1;
66 u8 expose_device;
67 u8 removed : 1; /* device is marked for death */
68 u8 was_removed : 1; /* device actually removed */
69 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
70 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
71 u64 sas_address;
72 u64 eli; /* from report diags. */
73 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
74 unsigned char model[16]; /* bytes 16-31 of inquiry data */
75 unsigned char rev; /* byte 2 of inquiry data */
76 unsigned char raid_level; /* from inquiry page 0xC1 */
77 unsigned char volume_offline; /* discovered via TUR or VPD */
78 u16 queue_depth; /* max queue_depth for this device */
79 atomic_t commands_outstanding; /* track commands sent to device */
80 atomic_t ioaccel_cmds_out; /* Only used for physical devices
81 * counts commands sent to physical
82 * device via "ioaccel" path.
83 */
84 bool in_reset;
85 u32 ioaccel_handle;
86 u8 active_path_index;
87 u8 path_map;
88 u8 bay;
89 u8 box[8];
90 u16 phys_connector[8];
91 int offload_config; /* I/O accel RAID offload configured */
92 int offload_enabled; /* I/O accel RAID offload enabled */
93 int offload_to_be_enabled;
94 int hba_ioaccel_enabled;
95 int offload_to_mirror; /* Send next I/O accelerator RAID
96 * offload request to mirror drive
97 */
98 struct raid_map_data raid_map; /* I/O accelerator RAID map */
99
100 /*
101 * Pointers from logical drive map indices to the phys drives that
102 * make those logical drives. Note, multiple logical drives may
103 * share physical drives. You can have for instance 5 physical
104 * drives with 3 logical drives each using those same 5 physical
105 * disks. We need these pointers for counting i/o's out to physical
106 * devices in order to honor physical device queue depth limits.
107 */
108 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
109 int nphysical_disks;
110 int supports_aborts;
111 struct hpsa_sas_port *sas_port;
112 int external; /* 1-from external array 0-not <0-unknown */
113 };
114
115 struct reply_queue_buffer {
116 u64 *head;
117 size_t size;
118 u8 wraparound;
119 u32 current_entry;
120 dma_addr_t busaddr;
121 };
122
123 #pragma pack(1)
124 struct bmic_controller_parameters {
125 u8 led_flags;
126 u8 enable_command_list_verification;
127 u8 backed_out_write_drives;
128 u16 stripes_for_parity;
129 u8 parity_distribution_mode_flags;
130 u16 max_driver_requests;
131 u16 elevator_trend_count;
132 u8 disable_elevator;
133 u8 force_scan_complete;
134 u8 scsi_transfer_mode;
135 u8 force_narrow;
136 u8 rebuild_priority;
137 u8 expand_priority;
138 u8 host_sdb_asic_fix;
139 u8 pdpi_burst_from_host_disabled;
140 char software_name[64];
141 char hardware_name[32];
142 u8 bridge_revision;
143 u8 snapshot_priority;
144 u32 os_specific;
145 u8 post_prompt_timeout;
146 u8 automatic_drive_slamming;
147 u8 reserved1;
148 u8 nvram_flags;
149 u8 cache_nvram_flags;
150 u8 drive_config_flags;
151 u16 reserved2;
152 u8 temp_warning_level;
153 u8 temp_shutdown_level;
154 u8 temp_condition_reset;
155 u8 max_coalesce_commands;
156 u32 max_coalesce_delay;
157 u8 orca_password[4];
158 u8 access_id[16];
159 u8 reserved[356];
160 };
161 #pragma pack()
162
163 struct ctlr_info {
164 unsigned int *reply_map;
165 int ctlr;
166 char devname[8];
167 char *product_name;
168 struct pci_dev *pdev;
169 u32 board_id;
170 u64 sas_address;
171 void __iomem *vaddr;
172 unsigned long paddr;
173 int nr_cmds; /* Number of commands allowed on this controller */
174 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
175 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
176 struct CfgTable __iomem *cfgtable;
177 int interrupts_enabled;
178 int max_commands;
179 int last_collision_tag; /* tags are global */
180 atomic_t commands_outstanding;
181 # define PERF_MODE_INT 0
182 # define DOORBELL_INT 1
183 # define SIMPLE_MODE_INT 2
184 # define MEMQ_MODE_INT 3
185 unsigned int msix_vectors;
186 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
187 struct access_method access;
188
189 /* queue and queue Info */
190 unsigned int Qdepth;
191 unsigned int maxSG;
192 spinlock_t lock;
193 int maxsgentries;
194 u8 max_cmd_sg_entries;
195 int chainsize;
196 struct SGDescriptor **cmd_sg_list;
197 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
198
199 /* pointers to command and error info pool */
200 struct CommandList *cmd_pool;
201 dma_addr_t cmd_pool_dhandle;
202 struct io_accel1_cmd *ioaccel_cmd_pool;
203 dma_addr_t ioaccel_cmd_pool_dhandle;
204 struct io_accel2_cmd *ioaccel2_cmd_pool;
205 dma_addr_t ioaccel2_cmd_pool_dhandle;
206 struct ErrorInfo *errinfo_pool;
207 dma_addr_t errinfo_pool_dhandle;
208 unsigned long *cmd_pool_bits;
209 int scan_finished;
210 u8 scan_waiting : 1;
211 spinlock_t scan_lock;
212 wait_queue_head_t scan_wait_queue;
213
214 struct Scsi_Host *scsi_host;
215 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
216 int ndevices; /* number of used elements in .dev[] array. */
217 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
218 /*
219 * Performant mode tables.
220 */
221 u32 trans_support;
222 u32 trans_offset;
223 struct TransTable_struct __iomem *transtable;
224 unsigned long transMethod;
225
226 /* cap concurrent passthrus at some reasonable maximum */
227 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
228 atomic_t passthru_cmds_avail;
229
230 /*
231 * Performant mode completion buffers
232 */
233 size_t reply_queue_size;
234 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
235 u8 nreply_queues;
236 u32 *blockFetchTable;
237 u32 *ioaccel1_blockFetchTable;
238 u32 *ioaccel2_blockFetchTable;
239 u32 __iomem *ioaccel2_bft2_regs;
240 unsigned char *hba_inquiry_data;
241 u32 driver_support;
242 u32 fw_support;
243 int ioaccel_support;
244 int ioaccel_maxsg;
245 u64 last_intr_timestamp;
246 u32 last_heartbeat;
247 u64 last_heartbeat_timestamp;
248 u32 heartbeat_sample_interval;
249 atomic_t firmware_flash_in_progress;
250 u32 __percpu *lockup_detected;
251 struct delayed_work monitor_ctlr_work;
252 struct delayed_work rescan_ctlr_work;
253 struct delayed_work event_monitor_work;
254 int remove_in_progress;
255 /* Address of h->q[x] is passed to intr handler to know which queue */
256 u8 q[MAX_REPLY_QUEUES];
257 char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
258 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
259 #define HPSATMF_BITS_SUPPORTED (1 << 0)
260 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
261 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
262 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
263 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
264 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
265 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
266 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
267 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
268 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
269 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
270 #define HPSATMF_MASK_SUPPORTED (1 << 16)
271 #define HPSATMF_LOG_LUN_RESET (1 << 17)
272 #define HPSATMF_LOG_NEX_RESET (1 << 18)
273 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
274 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
275 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
276 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
277 #define HPSATMF_LOG_QRY_TASK (1 << 23)
278 #define HPSATMF_LOG_QRY_TSET (1 << 24)
279 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
280 u32 events;
281 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
282 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
283 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
284 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
285 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
286 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
287 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
288
289 #define RESCAN_REQUIRED_EVENT_BITS \
290 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
291 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
292 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
293 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
294 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
295 spinlock_t offline_device_lock;
296 struct list_head offline_device_list;
297 int acciopath_status;
298 int drv_req_rescan;
299 int raid_offload_debug;
300 int discovery_polling;
301 int legacy_board;
302 struct ReportLUNdata *lastlogicals;
303 int needs_abort_tags_swizzled;
304 struct workqueue_struct *resubmit_wq;
305 struct workqueue_struct *rescan_ctlr_wq;
306 struct workqueue_struct *monitor_ctlr_wq;
307 atomic_t abort_cmds_available;
308 wait_queue_head_t event_sync_wait_queue;
309 struct mutex reset_mutex;
310 u8 reset_in_progress;
311 struct hpsa_sas_node *sas_host;
312 spinlock_t reset_lock;
313 };
314
315 struct offline_device_entry {
316 unsigned char scsi3addr[8];
317 struct list_head offline_list;
318 };
319
320 #define HPSA_ABORT_MSG 0
321 #define HPSA_DEVICE_RESET_MSG 1
322 #define HPSA_RESET_TYPE_CONTROLLER 0x00
323 #define HPSA_RESET_TYPE_BUS 0x01
324 #define HPSA_RESET_TYPE_LUN 0x04
325 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
326 #define HPSA_MSG_SEND_RETRY_LIMIT 10
327 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
328
329 /* Maximum time in seconds driver will wait for command completions
330 * when polling before giving up.
331 */
332 #define HPSA_MAX_POLL_TIME_SECS (20)
333
334 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
335 * how many times to retry TEST UNIT READY on a device
336 * while waiting for it to become ready before giving up.
337 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
338 * between sending TURs while waiting for a device
339 * to become ready.
340 */
341 #define HPSA_TUR_RETRY_LIMIT (20)
342 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
343
344 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
345 * to become ready, in seconds, before giving up on it.
346 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
347 * between polling the board to see if it is ready, in
348 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
349 * HPSA_BOARD_READY_ITERATIONS are derived from those.
350 */
351 #define HPSA_BOARD_READY_WAIT_SECS (120)
352 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
353 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
354 #define HPSA_BOARD_READY_POLL_INTERVAL \
355 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
356 #define HPSA_BOARD_READY_ITERATIONS \
357 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
358 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
359 #define HPSA_BOARD_NOT_READY_ITERATIONS \
360 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
361 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
362 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
363 #define HPSA_POST_RESET_NOOP_RETRIES (12)
364
365 /* Defining the diffent access_menthods */
366 /*
367 * Memory mapped FIFO interface (SMART 53xx cards)
368 */
369 #define SA5_DOORBELL 0x20
370 #define SA5_REQUEST_PORT_OFFSET 0x40
371 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
372 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
373 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
374 #define SA5_REPLY_PORT_OFFSET 0x44
375 #define SA5_INTR_STATUS 0x30
376 #define SA5_SCRATCHPAD_OFFSET 0xB0
377
378 #define SA5_CTCFG_OFFSET 0xB4
379 #define SA5_CTMEM_OFFSET 0xB8
380
381 #define SA5_INTR_OFF 0x08
382 #define SA5B_INTR_OFF 0x04
383 #define SA5_INTR_PENDING 0x08
384 #define SA5B_INTR_PENDING 0x04
385 #define FIFO_EMPTY 0xffffffff
386 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
387
388 #define HPSA_ERROR_BIT 0x02
389
390 /* Performant mode flags */
391 #define SA5_PERF_INTR_PENDING 0x04
392 #define SA5_PERF_INTR_OFF 0x05
393 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
394 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
395 #define SA5_OUTDB_CLEAR 0xA0
396 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
397 #define SA5_OUTDB_STATUS 0x9C
398
399
400 #define HPSA_INTR_ON 1
401 #define HPSA_INTR_OFF 0
402
403 /*
404 * Inbound Post Queue offsets for IO Accelerator Mode 2
405 */
406 #define IOACCEL2_INBOUND_POSTQ_32 0x48
407 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
408 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
409
410 #define HPSA_PHYSICAL_DEVICE_BUS 0
411 #define HPSA_RAID_VOLUME_BUS 1
412 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
413 #define HPSA_HBA_BUS 0
414 #define HPSA_LEGACY_HBA_BUS 3
415
416 /*
417 Send the command to the hardware
418 */
SA5_submit_command(struct ctlr_info * h,struct CommandList * c)419 static void SA5_submit_command(struct ctlr_info *h,
420 struct CommandList *c)
421 {
422 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
423 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
424 }
425
SA5_submit_command_no_read(struct ctlr_info * h,struct CommandList * c)426 static void SA5_submit_command_no_read(struct ctlr_info *h,
427 struct CommandList *c)
428 {
429 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
430 }
431
SA5_submit_command_ioaccel2(struct ctlr_info * h,struct CommandList * c)432 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
433 struct CommandList *c)
434 {
435 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
436 }
437
438 /*
439 * This card is the opposite of the other cards.
440 * 0 turns interrupts on...
441 * 0x08 turns them off...
442 */
SA5_intr_mask(struct ctlr_info * h,unsigned long val)443 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
444 {
445 if (val) { /* Turn interrupts on */
446 h->interrupts_enabled = 1;
447 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
448 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
449 } else { /* Turn them off */
450 h->interrupts_enabled = 0;
451 writel(SA5_INTR_OFF,
452 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
453 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
454 }
455 }
456
457 /*
458 * Variant of the above; 0x04 turns interrupts off...
459 */
SA5B_intr_mask(struct ctlr_info * h,unsigned long val)460 static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
461 {
462 if (val) { /* Turn interrupts on */
463 h->interrupts_enabled = 1;
464 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
465 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
466 } else { /* Turn them off */
467 h->interrupts_enabled = 0;
468 writel(SA5B_INTR_OFF,
469 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
470 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
471 }
472 }
473
SA5_performant_intr_mask(struct ctlr_info * h,unsigned long val)474 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
475 {
476 if (val) { /* turn on interrupts */
477 h->interrupts_enabled = 1;
478 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
479 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
480 } else {
481 h->interrupts_enabled = 0;
482 writel(SA5_PERF_INTR_OFF,
483 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
484 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
485 }
486 }
487
SA5_performant_completed(struct ctlr_info * h,u8 q)488 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
489 {
490 struct reply_queue_buffer *rq = &h->reply_queue[q];
491 unsigned long register_value = FIFO_EMPTY;
492
493 /* msi auto clears the interrupt pending bit. */
494 if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
495 /* flush the controller write of the reply queue by reading
496 * outbound doorbell status register.
497 */
498 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
499 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
500 /* Do a read in order to flush the write to the controller
501 * (as per spec.)
502 */
503 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
504 }
505
506 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
507 register_value = rq->head[rq->current_entry];
508 rq->current_entry++;
509 atomic_dec(&h->commands_outstanding);
510 } else {
511 register_value = FIFO_EMPTY;
512 }
513 /* Check for wraparound */
514 if (rq->current_entry == h->max_commands) {
515 rq->current_entry = 0;
516 rq->wraparound ^= 1;
517 }
518 return register_value;
519 }
520
521 /*
522 * returns value read from hardware.
523 * returns FIFO_EMPTY if there is nothing to read
524 */
SA5_completed(struct ctlr_info * h,u8 q)525 static unsigned long SA5_completed(struct ctlr_info *h,
526 __attribute__((unused)) u8 q)
527 {
528 unsigned long register_value
529 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
530
531 if (register_value != FIFO_EMPTY)
532 atomic_dec(&h->commands_outstanding);
533
534 #ifdef HPSA_DEBUG
535 if (register_value != FIFO_EMPTY)
536 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
537 register_value);
538 else
539 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
540 #endif
541
542 return register_value;
543 }
544 /*
545 * Returns true if an interrupt is pending..
546 */
SA5_intr_pending(struct ctlr_info * h)547 static bool SA5_intr_pending(struct ctlr_info *h)
548 {
549 unsigned long register_value =
550 readl(h->vaddr + SA5_INTR_STATUS);
551 return register_value & SA5_INTR_PENDING;
552 }
553
SA5_performant_intr_pending(struct ctlr_info * h)554 static bool SA5_performant_intr_pending(struct ctlr_info *h)
555 {
556 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
557
558 if (!register_value)
559 return false;
560
561 /* Read outbound doorbell to flush */
562 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
563 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
564 }
565
566 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
567
SA5_ioaccel_mode1_intr_pending(struct ctlr_info * h)568 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
569 {
570 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
571
572 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
573 true : false;
574 }
575
576 /*
577 * Returns true if an interrupt is pending..
578 */
SA5B_intr_pending(struct ctlr_info * h)579 static bool SA5B_intr_pending(struct ctlr_info *h)
580 {
581 return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
582 }
583
584 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
585 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
586 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
587 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
588
SA5_ioaccel_mode1_completed(struct ctlr_info * h,u8 q)589 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
590 {
591 u64 register_value;
592 struct reply_queue_buffer *rq = &h->reply_queue[q];
593
594 BUG_ON(q >= h->nreply_queues);
595
596 register_value = rq->head[rq->current_entry];
597 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
598 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
599 if (++rq->current_entry == rq->size)
600 rq->current_entry = 0;
601 /*
602 * @todo
603 *
604 * Don't really need to write the new index after each command,
605 * but with current driver design this is easiest.
606 */
607 wmb();
608 writel((q << 24) | rq->current_entry, h->vaddr +
609 IOACCEL_MODE1_CONSUMER_INDEX);
610 atomic_dec(&h->commands_outstanding);
611 }
612 return (unsigned long) register_value;
613 }
614
615 static struct access_method SA5_access = {
616 .submit_command = SA5_submit_command,
617 .set_intr_mask = SA5_intr_mask,
618 .intr_pending = SA5_intr_pending,
619 .command_completed = SA5_completed,
620 };
621
622 /* Duplicate entry of the above to mark unsupported boards */
623 static struct access_method SA5A_access = {
624 .submit_command = SA5_submit_command,
625 .set_intr_mask = SA5_intr_mask,
626 .intr_pending = SA5_intr_pending,
627 .command_completed = SA5_completed,
628 };
629
630 static struct access_method SA5B_access = {
631 .submit_command = SA5_submit_command,
632 .set_intr_mask = SA5B_intr_mask,
633 .intr_pending = SA5B_intr_pending,
634 .command_completed = SA5_completed,
635 };
636
637 static struct access_method SA5_ioaccel_mode1_access = {
638 .submit_command = SA5_submit_command,
639 .set_intr_mask = SA5_performant_intr_mask,
640 .intr_pending = SA5_ioaccel_mode1_intr_pending,
641 .command_completed = SA5_ioaccel_mode1_completed,
642 };
643
644 static struct access_method SA5_ioaccel_mode2_access = {
645 .submit_command = SA5_submit_command_ioaccel2,
646 .set_intr_mask = SA5_performant_intr_mask,
647 .intr_pending = SA5_performant_intr_pending,
648 .command_completed = SA5_performant_completed,
649 };
650
651 static struct access_method SA5_performant_access = {
652 .submit_command = SA5_submit_command,
653 .set_intr_mask = SA5_performant_intr_mask,
654 .intr_pending = SA5_performant_intr_pending,
655 .command_completed = SA5_performant_completed,
656 };
657
658 static struct access_method SA5_performant_access_no_read = {
659 .submit_command = SA5_submit_command_no_read,
660 .set_intr_mask = SA5_performant_intr_mask,
661 .intr_pending = SA5_performant_intr_pending,
662 .command_completed = SA5_performant_completed,
663 };
664
665 struct board_type {
666 u32 board_id;
667 char *product_name;
668 struct access_method *access;
669 };
670
671 #endif /* HPSA_H */
672
673