1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43
44 /**
45 * pm8001_find_tag - from sas task to find out tag that belongs to this task
46 * @task: the task sent to the LLDD
47 * @tag: the found tag associated with the task
48 */
pm8001_find_tag(struct sas_task * task,u32 * tag)49 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
50 {
51 if (task->lldd_task) {
52 struct pm8001_ccb_info *ccb;
53 ccb = task->lldd_task;
54 *tag = ccb->ccb_tag;
55 return 1;
56 }
57 return 0;
58 }
59
60 /**
61 * pm8001_tag_free - free the no more needed tag
62 * @pm8001_ha: our hba struct
63 * @tag: the found tag associated with the task
64 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)65 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
66 {
67 void *bitmap = pm8001_ha->tags;
68 clear_bit(tag, bitmap);
69 }
70
71 /**
72 * pm8001_tag_alloc - allocate a empty tag for task used.
73 * @pm8001_ha: our hba struct
74 * @tag_out: the found empty tag .
75 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)76 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
77 {
78 unsigned int tag;
79 void *bitmap = pm8001_ha->tags;
80 unsigned long flags;
81
82 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
83 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
84 if (tag >= pm8001_ha->tags_num) {
85 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
86 return -SAS_QUEUE_FULL;
87 }
88 set_bit(tag, bitmap);
89 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
90 *tag_out = tag;
91 return 0;
92 }
93
pm8001_tag_init(struct pm8001_hba_info * pm8001_ha)94 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
95 {
96 int i;
97 for (i = 0; i < pm8001_ha->tags_num; ++i)
98 pm8001_tag_free(pm8001_ha, i);
99 }
100
101 /**
102 * pm8001_mem_alloc - allocate memory for pm8001.
103 * @pdev: pci device.
104 * @virt_addr: the allocated virtual address
105 * @pphys_addr_hi: the physical address high byte address.
106 * @pphys_addr_lo: the physical address low byte address.
107 * @mem_size: memory size.
108 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)109 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
110 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
111 u32 *pphys_addr_lo, u32 mem_size, u32 align)
112 {
113 caddr_t mem_virt_alloc;
114 dma_addr_t mem_dma_handle;
115 u64 phys_align;
116 u64 align_offset = 0;
117 if (align)
118 align_offset = (dma_addr_t)align - 1;
119 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
120 &mem_dma_handle, GFP_KERNEL);
121 if (!mem_virt_alloc) {
122 pr_err("pm80xx: memory allocation error\n");
123 return -1;
124 }
125 *pphys_addr = mem_dma_handle;
126 phys_align = (*pphys_addr + align_offset) & ~align_offset;
127 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
128 *pphys_addr_hi = upper_32_bits(phys_align);
129 *pphys_addr_lo = lower_32_bits(phys_align);
130 return 0;
131 }
132
133 /**
134 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
135 * find out our hba struct.
136 * @dev: the domain device which from sas layer.
137 */
138 static
pm8001_find_ha_by_dev(struct domain_device * dev)139 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
140 {
141 struct sas_ha_struct *sha = dev->port->ha;
142 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
143 return pm8001_ha;
144 }
145
146 /**
147 * pm8001_phy_control - this function should be registered to
148 * sas_domain_function_template to provide libsas used, note: this is just
149 * control the HBA phy rather than other expander phy if you want control
150 * other phy, you should use SMP command.
151 * @sas_phy: which phy in HBA phys.
152 * @func: the operation.
153 * @funcdata: always NULL.
154 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)155 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
156 void *funcdata)
157 {
158 int rc = 0, phy_id = sas_phy->id;
159 struct pm8001_hba_info *pm8001_ha = NULL;
160 struct sas_phy_linkrates *rates;
161 struct pm8001_phy *phy;
162 DECLARE_COMPLETION_ONSTACK(completion);
163 unsigned long flags;
164 pm8001_ha = sas_phy->ha->lldd_ha;
165 phy = &pm8001_ha->phy[phy_id];
166 pm8001_ha->phy[phy_id].enable_completion = &completion;
167 switch (func) {
168 case PHY_FUNC_SET_LINK_RATE:
169 rates = funcdata;
170 if (rates->minimum_linkrate) {
171 pm8001_ha->phy[phy_id].minimum_linkrate =
172 rates->minimum_linkrate;
173 }
174 if (rates->maximum_linkrate) {
175 pm8001_ha->phy[phy_id].maximum_linkrate =
176 rates->maximum_linkrate;
177 }
178 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
179 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
180 wait_for_completion(&completion);
181 }
182 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
183 PHY_LINK_RESET);
184 break;
185 case PHY_FUNC_HARD_RESET:
186 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
187 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
188 wait_for_completion(&completion);
189 }
190 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
191 PHY_HARD_RESET);
192 break;
193 case PHY_FUNC_LINK_RESET:
194 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
195 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
196 wait_for_completion(&completion);
197 }
198 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
199 PHY_LINK_RESET);
200 break;
201 case PHY_FUNC_RELEASE_SPINUP_HOLD:
202 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
203 PHY_LINK_RESET);
204 break;
205 case PHY_FUNC_DISABLE:
206 if (pm8001_ha->chip_id != chip_8001) {
207 if (pm8001_ha->phy[phy_id].phy_state ==
208 PHY_STATE_LINK_UP_SPCV) {
209 sas_phy_disconnected(&phy->sas_phy);
210 sas_notify_phy_event(&phy->sas_phy,
211 PHYE_LOSS_OF_SIGNAL);
212 phy->phy_attached = 0;
213 }
214 } else {
215 if (pm8001_ha->phy[phy_id].phy_state ==
216 PHY_STATE_LINK_UP_SPC) {
217 sas_phy_disconnected(&phy->sas_phy);
218 sas_notify_phy_event(&phy->sas_phy,
219 PHYE_LOSS_OF_SIGNAL);
220 phy->phy_attached = 0;
221 }
222 }
223 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
224 break;
225 case PHY_FUNC_GET_EVENTS:
226 spin_lock_irqsave(&pm8001_ha->lock, flags);
227 if (pm8001_ha->chip_id == chip_8001) {
228 if (-1 == pm8001_bar4_shift(pm8001_ha,
229 (phy_id < 4) ? 0x30000 : 0x40000)) {
230 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
231 return -EINVAL;
232 }
233 }
234 {
235 struct sas_phy *phy = sas_phy->phy;
236 uint32_t *qp = (uint32_t *)(((char *)
237 pm8001_ha->io_mem[2].memvirtaddr)
238 + 0x1034 + (0x4000 * (phy_id & 3)));
239
240 phy->invalid_dword_count = qp[0];
241 phy->running_disparity_error_count = qp[1];
242 phy->loss_of_dword_sync_count = qp[3];
243 phy->phy_reset_problem_count = qp[4];
244 }
245 if (pm8001_ha->chip_id == chip_8001)
246 pm8001_bar4_shift(pm8001_ha, 0);
247 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
248 return 0;
249 default:
250 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
251 rc = -EOPNOTSUPP;
252 }
253 msleep(300);
254 return rc;
255 }
256
257 /**
258 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
259 * command to HBA.
260 * @shost: the scsi host data.
261 */
pm8001_scan_start(struct Scsi_Host * shost)262 void pm8001_scan_start(struct Scsi_Host *shost)
263 {
264 int i;
265 struct pm8001_hba_info *pm8001_ha;
266 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
267 DECLARE_COMPLETION_ONSTACK(completion);
268 pm8001_ha = sha->lldd_ha;
269 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
270 if (pm8001_ha->chip_id == chip_8001)
271 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
272 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
273 pm8001_ha->phy[i].enable_completion = &completion;
274 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
275 wait_for_completion(&completion);
276 msleep(300);
277 }
278 }
279
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)280 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
281 {
282 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
283
284 /* give the phy enabling interrupt event time to come in (1s
285 * is empirically about all it takes) */
286 if (time < HZ)
287 return 0;
288 /* Wait for discovery to finish */
289 sas_drain_work(ha);
290 return 1;
291 }
292
293 /**
294 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
295 * @pm8001_ha: our hba card information
296 * @ccb: the ccb which attached to smp task
297 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)298 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
299 struct pm8001_ccb_info *ccb)
300 {
301 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
302 }
303
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)304 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
305 {
306 struct ata_queued_cmd *qc = task->uldd_task;
307 if (qc) {
308 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
309 qc->tf.command == ATA_CMD_FPDMA_READ ||
310 qc->tf.command == ATA_CMD_FPDMA_RECV ||
311 qc->tf.command == ATA_CMD_FPDMA_SEND ||
312 qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
313 *tag = qc->tag;
314 return 1;
315 }
316 }
317 return 0;
318 }
319
320 /**
321 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
322 * @pm8001_ha: our hba card information
323 * @ccb: the ccb which attached to sata task
324 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)325 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
326 struct pm8001_ccb_info *ccb)
327 {
328 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
329 }
330
331 /**
332 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
333 * @pm8001_ha: our hba card information
334 * @ccb: the ccb which attached to TM
335 * @tmf: the task management IU
336 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct pm8001_tmf_task * tmf)337 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
338 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
339 {
340 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
341 }
342
343 /**
344 * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
345 * @pm8001_ha: our hba card information
346 * @ccb: the ccb which attached to ssp task
347 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)348 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
349 struct pm8001_ccb_info *ccb)
350 {
351 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
352 }
353
354 /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)355 static int sas_find_local_port_id(struct domain_device *dev)
356 {
357 struct domain_device *pdev = dev->parent;
358
359 /* Directly attached device */
360 if (!pdev)
361 return dev->port->id;
362 while (pdev) {
363 struct domain_device *pdev_p = pdev->parent;
364 if (!pdev_p)
365 return pdev->port->id;
366 pdev = pdev->parent;
367 }
368 return 0;
369 }
370
371 #define DEV_IS_GONE(pm8001_dev) \
372 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
373 /**
374 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
375 * @task: the task to be execute.
376 * @gfp_flags: gfp_flags.
377 * @is_tmf: if it is task management task.
378 * @tmf: the task management IU
379 */
pm8001_task_exec(struct sas_task * task,gfp_t gfp_flags,int is_tmf,struct pm8001_tmf_task * tmf)380 static int pm8001_task_exec(struct sas_task *task,
381 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
382 {
383 struct domain_device *dev = task->dev;
384 struct pm8001_hba_info *pm8001_ha;
385 struct pm8001_device *pm8001_dev;
386 struct pm8001_port *port = NULL;
387 struct sas_task *t = task;
388 struct pm8001_ccb_info *ccb;
389 u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
390 unsigned long flags = 0;
391 enum sas_protocol task_proto = t->task_proto;
392
393 if (!dev->port) {
394 struct task_status_struct *tsm = &t->task_status;
395 tsm->resp = SAS_TASK_UNDELIVERED;
396 tsm->stat = SAS_PHY_DOWN;
397 if (dev->dev_type != SAS_SATA_DEV)
398 t->task_done(t);
399 return 0;
400 }
401 pm8001_ha = pm8001_find_ha_by_dev(task->dev);
402 if (pm8001_ha->controller_fatal_error) {
403 struct task_status_struct *ts = &t->task_status;
404
405 ts->resp = SAS_TASK_UNDELIVERED;
406 t->task_done(t);
407 return 0;
408 }
409 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
410 spin_lock_irqsave(&pm8001_ha->lock, flags);
411 do {
412 dev = t->dev;
413 pm8001_dev = dev->lldd_dev;
414 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
415 if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
416 if (sas_protocol_ata(task_proto)) {
417 struct task_status_struct *ts = &t->task_status;
418 ts->resp = SAS_TASK_UNDELIVERED;
419 ts->stat = SAS_PHY_DOWN;
420
421 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
422 t->task_done(t);
423 spin_lock_irqsave(&pm8001_ha->lock, flags);
424 continue;
425 } else {
426 struct task_status_struct *ts = &t->task_status;
427 ts->resp = SAS_TASK_UNDELIVERED;
428 ts->stat = SAS_PHY_DOWN;
429 t->task_done(t);
430 continue;
431 }
432 }
433 rc = pm8001_tag_alloc(pm8001_ha, &tag);
434 if (rc)
435 goto err_out;
436 ccb = &pm8001_ha->ccb_info[tag];
437
438 if (!sas_protocol_ata(task_proto)) {
439 if (t->num_scatter) {
440 n_elem = dma_map_sg(pm8001_ha->dev,
441 t->scatter,
442 t->num_scatter,
443 t->data_dir);
444 if (!n_elem) {
445 rc = -ENOMEM;
446 goto err_out_tag;
447 }
448 }
449 } else {
450 n_elem = t->num_scatter;
451 }
452
453 t->lldd_task = ccb;
454 ccb->n_elem = n_elem;
455 ccb->ccb_tag = tag;
456 ccb->task = t;
457 ccb->device = pm8001_dev;
458 switch (task_proto) {
459 case SAS_PROTOCOL_SMP:
460 atomic_inc(&pm8001_dev->running_req);
461 rc = pm8001_task_prep_smp(pm8001_ha, ccb);
462 break;
463 case SAS_PROTOCOL_SSP:
464 atomic_inc(&pm8001_dev->running_req);
465 if (is_tmf)
466 rc = pm8001_task_prep_ssp_tm(pm8001_ha,
467 ccb, tmf);
468 else
469 rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
470 break;
471 case SAS_PROTOCOL_SATA:
472 case SAS_PROTOCOL_STP:
473 atomic_inc(&pm8001_dev->running_req);
474 rc = pm8001_task_prep_ata(pm8001_ha, ccb);
475 break;
476 default:
477 dev_printk(KERN_ERR, pm8001_ha->dev,
478 "unknown sas_task proto: 0x%x\n", task_proto);
479 rc = -EINVAL;
480 break;
481 }
482
483 if (rc) {
484 pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc);
485 atomic_dec(&pm8001_dev->running_req);
486 goto err_out_tag;
487 }
488 /* TODO: select normal or high priority */
489 spin_lock(&t->task_state_lock);
490 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
491 spin_unlock(&t->task_state_lock);
492 } while (0);
493 rc = 0;
494 goto out_done;
495
496 err_out_tag:
497 pm8001_tag_free(pm8001_ha, tag);
498 err_out:
499 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
500 if (!sas_protocol_ata(task_proto))
501 if (n_elem)
502 dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
503 t->data_dir);
504 out_done:
505 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
506 return rc;
507 }
508
509 /**
510 * pm8001_queue_command - register for upper layer used, all IO commands sent
511 * to HBA are from this interface.
512 * @task: the task to be execute.
513 * @gfp_flags: gfp_flags
514 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)515 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
516 {
517 return pm8001_task_exec(task, gfp_flags, 0, NULL);
518 }
519
520 /**
521 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
522 * @pm8001_ha: our hba card information
523 * @ccb: the ccb which attached to ssp task
524 * @task: the task to be free.
525 * @ccb_idx: ccb index.
526 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct sas_task * task,struct pm8001_ccb_info * ccb,u32 ccb_idx)527 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
528 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
529 {
530 if (!ccb->task)
531 return;
532 if (!sas_protocol_ata(task->task_proto))
533 if (ccb->n_elem)
534 dma_unmap_sg(pm8001_ha->dev, task->scatter,
535 task->num_scatter, task->data_dir);
536
537 switch (task->task_proto) {
538 case SAS_PROTOCOL_SMP:
539 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
540 DMA_FROM_DEVICE);
541 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
542 DMA_TO_DEVICE);
543 break;
544
545 case SAS_PROTOCOL_SATA:
546 case SAS_PROTOCOL_STP:
547 case SAS_PROTOCOL_SSP:
548 default:
549 /* do nothing */
550 break;
551 }
552 task->lldd_task = NULL;
553 ccb->task = NULL;
554 ccb->ccb_tag = 0xFFFFFFFF;
555 ccb->open_retry = 0;
556 pm8001_tag_free(pm8001_ha, ccb_idx);
557 }
558
559 /**
560 * pm8001_alloc_dev - find a empty pm8001_device
561 * @pm8001_ha: our hba card information
562 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)563 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
564 {
565 u32 dev;
566 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
567 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
568 pm8001_ha->devices[dev].id = dev;
569 return &pm8001_ha->devices[dev];
570 }
571 }
572 if (dev == PM8001_MAX_DEVICES) {
573 pm8001_dbg(pm8001_ha, FAIL,
574 "max support %d devices, ignore ..\n",
575 PM8001_MAX_DEVICES);
576 }
577 return NULL;
578 }
579 /**
580 * pm8001_find_dev - find a matching pm8001_device
581 * @pm8001_ha: our hba card information
582 * @device_id: device ID to match against
583 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)584 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
585 u32 device_id)
586 {
587 u32 dev;
588 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
589 if (pm8001_ha->devices[dev].device_id == device_id)
590 return &pm8001_ha->devices[dev];
591 }
592 if (dev == PM8001_MAX_DEVICES) {
593 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
594 }
595 return NULL;
596 }
597
pm8001_free_dev(struct pm8001_device * pm8001_dev)598 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
599 {
600 u32 id = pm8001_dev->id;
601 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
602 pm8001_dev->id = id;
603 pm8001_dev->dev_type = SAS_PHY_UNUSED;
604 pm8001_dev->device_id = PM8001_MAX_DEVICES;
605 pm8001_dev->sas_device = NULL;
606 }
607
608 /**
609 * pm8001_dev_found_notify - libsas notify a device is found.
610 * @dev: the device structure which sas layer used.
611 *
612 * when libsas find a sas domain device, it should tell the LLDD that
613 * device is found, and then LLDD register this device to HBA firmware
614 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
615 * device ID(according to device's sas address) and returned it to LLDD. From
616 * now on, we communicate with HBA FW with the device ID which HBA assigned
617 * rather than sas address. it is the necessary step for our HBA but it is
618 * the optional for other HBA driver.
619 */
pm8001_dev_found_notify(struct domain_device * dev)620 static int pm8001_dev_found_notify(struct domain_device *dev)
621 {
622 unsigned long flags = 0;
623 int res = 0;
624 struct pm8001_hba_info *pm8001_ha = NULL;
625 struct domain_device *parent_dev = dev->parent;
626 struct pm8001_device *pm8001_device;
627 DECLARE_COMPLETION_ONSTACK(completion);
628 u32 flag = 0;
629 pm8001_ha = pm8001_find_ha_by_dev(dev);
630 spin_lock_irqsave(&pm8001_ha->lock, flags);
631
632 pm8001_device = pm8001_alloc_dev(pm8001_ha);
633 if (!pm8001_device) {
634 res = -1;
635 goto found_out;
636 }
637 pm8001_device->sas_device = dev;
638 dev->lldd_dev = pm8001_device;
639 pm8001_device->dev_type = dev->dev_type;
640 pm8001_device->dcompletion = &completion;
641 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
642 int phy_id;
643 struct ex_phy *phy;
644 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
645 phy_id++) {
646 phy = &parent_dev->ex_dev.ex_phy[phy_id];
647 if (SAS_ADDR(phy->attached_sas_addr)
648 == SAS_ADDR(dev->sas_addr)) {
649 pm8001_device->attached_phy = phy_id;
650 break;
651 }
652 }
653 if (phy_id == parent_dev->ex_dev.num_phys) {
654 pm8001_dbg(pm8001_ha, FAIL,
655 "Error: no attached dev:%016llx at ex:%016llx.\n",
656 SAS_ADDR(dev->sas_addr),
657 SAS_ADDR(parent_dev->sas_addr));
658 res = -1;
659 }
660 } else {
661 if (dev->dev_type == SAS_SATA_DEV) {
662 pm8001_device->attached_phy =
663 dev->rphy->identify.phy_identifier;
664 flag = 1; /* directly sata */
665 }
666 } /*register this device to HBA*/
667 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
668 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
669 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
670 wait_for_completion(&completion);
671 if (dev->dev_type == SAS_END_DEVICE)
672 msleep(50);
673 pm8001_ha->flags = PM8001F_RUN_TIME;
674 return 0;
675 found_out:
676 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
677 return res;
678 }
679
pm8001_dev_found(struct domain_device * dev)680 int pm8001_dev_found(struct domain_device *dev)
681 {
682 return pm8001_dev_found_notify(dev);
683 }
684
pm8001_task_done(struct sas_task * task)685 void pm8001_task_done(struct sas_task *task)
686 {
687 del_timer(&task->slow_task->timer);
688 complete(&task->slow_task->completion);
689 }
690
pm8001_tmf_timedout(struct timer_list * t)691 static void pm8001_tmf_timedout(struct timer_list *t)
692 {
693 struct sas_task_slow *slow = from_timer(slow, t, timer);
694 struct sas_task *task = slow->task;
695 unsigned long flags;
696
697 spin_lock_irqsave(&task->task_state_lock, flags);
698 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
699 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
700 complete(&task->slow_task->completion);
701 }
702 spin_unlock_irqrestore(&task->task_state_lock, flags);
703 }
704
705 #define PM8001_TASK_TIMEOUT 20
706 /**
707 * pm8001_exec_internal_tmf_task - execute some task management commands.
708 * @dev: the wanted device.
709 * @tmf: which task management wanted to be take.
710 * @para_len: para_len.
711 * @parameter: ssp task parameter.
712 *
713 * when errors or exception happened, we may want to do something, for example
714 * abort the issued task which result in this execption, it is done by calling
715 * this function, note it is also with the task execute interface.
716 */
pm8001_exec_internal_tmf_task(struct domain_device * dev,void * parameter,u32 para_len,struct pm8001_tmf_task * tmf)717 static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
718 void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
719 {
720 int res, retry;
721 struct sas_task *task = NULL;
722 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
723 struct pm8001_device *pm8001_dev = dev->lldd_dev;
724 DECLARE_COMPLETION_ONSTACK(completion_setstate);
725
726 for (retry = 0; retry < 3; retry++) {
727 task = sas_alloc_slow_task(GFP_KERNEL);
728 if (!task)
729 return -ENOMEM;
730
731 task->dev = dev;
732 task->task_proto = dev->tproto;
733 memcpy(&task->ssp_task, parameter, para_len);
734 task->task_done = pm8001_task_done;
735 task->slow_task->timer.function = pm8001_tmf_timedout;
736 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
737 add_timer(&task->slow_task->timer);
738
739 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
740
741 if (res) {
742 del_timer(&task->slow_task->timer);
743 pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
744 goto ex_err;
745 }
746 wait_for_completion(&task->slow_task->completion);
747 if (pm8001_ha->chip_id != chip_8001) {
748 pm8001_dev->setds_completion = &completion_setstate;
749 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
750 pm8001_dev, 0x01);
751 wait_for_completion(&completion_setstate);
752 }
753 res = -TMF_RESP_FUNC_FAILED;
754 /* Even TMF timed out, return direct. */
755 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
756 pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
757 tmf->tmf);
758 goto ex_err;
759 }
760
761 if (task->task_status.resp == SAS_TASK_COMPLETE &&
762 task->task_status.stat == SAM_STAT_GOOD) {
763 res = TMF_RESP_FUNC_COMPLETE;
764 break;
765 }
766
767 if (task->task_status.resp == SAS_TASK_COMPLETE &&
768 task->task_status.stat == SAS_DATA_UNDERRUN) {
769 /* no error, but return the number of bytes of
770 * underrun */
771 res = task->task_status.residual;
772 break;
773 }
774
775 if (task->task_status.resp == SAS_TASK_COMPLETE &&
776 task->task_status.stat == SAS_DATA_OVERRUN) {
777 pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n");
778 res = -EMSGSIZE;
779 break;
780 } else {
781 pm8001_dbg(pm8001_ha, EH,
782 " Task to dev %016llx response:0x%x status 0x%x\n",
783 SAS_ADDR(dev->sas_addr),
784 task->task_status.resp,
785 task->task_status.stat);
786 sas_free_task(task);
787 task = NULL;
788 }
789 }
790 ex_err:
791 BUG_ON(retry == 3 && task != NULL);
792 sas_free_task(task);
793 return res;
794 }
795
796 static int
pm8001_exec_internal_task_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * pm8001_dev,struct domain_device * dev,u32 flag,u32 task_tag)797 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
798 struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
799 u32 task_tag)
800 {
801 int res, retry;
802 u32 ccb_tag;
803 struct pm8001_ccb_info *ccb;
804 struct sas_task *task = NULL;
805
806 for (retry = 0; retry < 3; retry++) {
807 task = sas_alloc_slow_task(GFP_KERNEL);
808 if (!task)
809 return -ENOMEM;
810
811 task->dev = dev;
812 task->task_proto = dev->tproto;
813 task->task_done = pm8001_task_done;
814 task->slow_task->timer.function = pm8001_tmf_timedout;
815 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
816 add_timer(&task->slow_task->timer);
817
818 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
819 if (res)
820 goto ex_err;
821 ccb = &pm8001_ha->ccb_info[ccb_tag];
822 ccb->device = pm8001_dev;
823 ccb->ccb_tag = ccb_tag;
824 ccb->task = task;
825 ccb->n_elem = 0;
826
827 res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
828 pm8001_dev, flag, task_tag, ccb_tag);
829
830 if (res) {
831 del_timer(&task->slow_task->timer);
832 pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
833 goto ex_err;
834 }
835 wait_for_completion(&task->slow_task->completion);
836 res = TMF_RESP_FUNC_FAILED;
837 /* Even TMF timed out, return direct. */
838 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
839 pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
840 goto ex_err;
841 }
842
843 if (task->task_status.resp == SAS_TASK_COMPLETE &&
844 task->task_status.stat == SAM_STAT_GOOD) {
845 res = TMF_RESP_FUNC_COMPLETE;
846 break;
847
848 } else {
849 pm8001_dbg(pm8001_ha, EH,
850 " Task to dev %016llx response: 0x%x status 0x%x\n",
851 SAS_ADDR(dev->sas_addr),
852 task->task_status.resp,
853 task->task_status.stat);
854 sas_free_task(task);
855 task = NULL;
856 }
857 }
858 ex_err:
859 BUG_ON(retry == 3 && task != NULL);
860 sas_free_task(task);
861 return res;
862 }
863
864 /**
865 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
866 * @dev: the device structure which sas layer used.
867 */
pm8001_dev_gone_notify(struct domain_device * dev)868 static void pm8001_dev_gone_notify(struct domain_device *dev)
869 {
870 unsigned long flags = 0;
871 struct pm8001_hba_info *pm8001_ha;
872 struct pm8001_device *pm8001_dev = dev->lldd_dev;
873
874 pm8001_ha = pm8001_find_ha_by_dev(dev);
875 spin_lock_irqsave(&pm8001_ha->lock, flags);
876 if (pm8001_dev) {
877 u32 device_id = pm8001_dev->device_id;
878
879 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
880 pm8001_dev->device_id, pm8001_dev->dev_type);
881 if (atomic_read(&pm8001_dev->running_req)) {
882 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
883 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
884 dev, 1, 0);
885 while (atomic_read(&pm8001_dev->running_req))
886 msleep(20);
887 spin_lock_irqsave(&pm8001_ha->lock, flags);
888 }
889 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
890 pm8001_free_dev(pm8001_dev);
891 } else {
892 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
893 }
894 dev->lldd_dev = NULL;
895 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
896 }
897
pm8001_dev_gone(struct domain_device * dev)898 void pm8001_dev_gone(struct domain_device *dev)
899 {
900 pm8001_dev_gone_notify(dev);
901 }
902
pm8001_issue_ssp_tmf(struct domain_device * dev,u8 * lun,struct pm8001_tmf_task * tmf)903 static int pm8001_issue_ssp_tmf(struct domain_device *dev,
904 u8 *lun, struct pm8001_tmf_task *tmf)
905 {
906 struct sas_ssp_task ssp_task;
907 if (!(dev->tproto & SAS_PROTOCOL_SSP))
908 return TMF_RESP_FUNC_ESUPP;
909
910 strncpy((u8 *)&ssp_task.LUN, lun, 8);
911 return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
912 tmf);
913 }
914
915 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)916 void pm8001_open_reject_retry(
917 struct pm8001_hba_info *pm8001_ha,
918 struct sas_task *task_to_close,
919 struct pm8001_device *device_to_close)
920 {
921 int i;
922 unsigned long flags;
923
924 if (pm8001_ha == NULL)
925 return;
926
927 spin_lock_irqsave(&pm8001_ha->lock, flags);
928
929 for (i = 0; i < PM8001_MAX_CCB; i++) {
930 struct sas_task *task;
931 struct task_status_struct *ts;
932 struct pm8001_device *pm8001_dev;
933 unsigned long flags1;
934 u32 tag;
935 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
936
937 pm8001_dev = ccb->device;
938 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
939 continue;
940 if (!device_to_close) {
941 uintptr_t d = (uintptr_t)pm8001_dev
942 - (uintptr_t)&pm8001_ha->devices;
943 if (((d % sizeof(*pm8001_dev)) != 0)
944 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
945 continue;
946 } else if (pm8001_dev != device_to_close)
947 continue;
948 tag = ccb->ccb_tag;
949 if (!tag || (tag == 0xFFFFFFFF))
950 continue;
951 task = ccb->task;
952 if (!task || !task->task_done)
953 continue;
954 if (task_to_close && (task != task_to_close))
955 continue;
956 ts = &task->task_status;
957 ts->resp = SAS_TASK_COMPLETE;
958 /* Force the midlayer to retry */
959 ts->stat = SAS_OPEN_REJECT;
960 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
961 if (pm8001_dev)
962 atomic_dec(&pm8001_dev->running_req);
963 spin_lock_irqsave(&task->task_state_lock, flags1);
964 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
965 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
966 task->task_state_flags |= SAS_TASK_STATE_DONE;
967 if (unlikely((task->task_state_flags
968 & SAS_TASK_STATE_ABORTED))) {
969 spin_unlock_irqrestore(&task->task_state_lock,
970 flags1);
971 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
972 } else {
973 spin_unlock_irqrestore(&task->task_state_lock,
974 flags1);
975 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
976 mb();/* in order to force CPU ordering */
977 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
978 task->task_done(task);
979 spin_lock_irqsave(&pm8001_ha->lock, flags);
980 }
981 }
982
983 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
984 }
985
986 /**
987 * Standard mandates link reset for ATA (type 0) and hard reset for
988 * SSP (type 1) , only for RECOVERY
989 * @dev: the device structure for the device to reset.
990 */
pm8001_I_T_nexus_reset(struct domain_device * dev)991 int pm8001_I_T_nexus_reset(struct domain_device *dev)
992 {
993 int rc = TMF_RESP_FUNC_FAILED;
994 struct pm8001_device *pm8001_dev;
995 struct pm8001_hba_info *pm8001_ha;
996 struct sas_phy *phy;
997
998 if (!dev || !dev->lldd_dev)
999 return -ENODEV;
1000
1001 pm8001_dev = dev->lldd_dev;
1002 pm8001_ha = pm8001_find_ha_by_dev(dev);
1003 phy = sas_get_local_phy(dev);
1004
1005 if (dev_is_sata(dev)) {
1006 if (scsi_is_sas_phy_local(phy)) {
1007 rc = 0;
1008 goto out;
1009 }
1010 rc = sas_phy_reset(phy, 1);
1011 if (rc) {
1012 pm8001_dbg(pm8001_ha, EH,
1013 "phy reset failed for device %x\n"
1014 "with rc %d\n", pm8001_dev->device_id, rc);
1015 rc = TMF_RESP_FUNC_FAILED;
1016 goto out;
1017 }
1018 msleep(2000);
1019 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1020 dev, 1, 0);
1021 if (rc) {
1022 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
1023 "with rc %d\n", pm8001_dev->device_id, rc);
1024 rc = TMF_RESP_FUNC_FAILED;
1025 }
1026 } else {
1027 rc = sas_phy_reset(phy, 1);
1028 msleep(2000);
1029 }
1030 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
1031 pm8001_dev->device_id, rc);
1032 out:
1033 sas_put_local_phy(phy);
1034 return rc;
1035 }
1036
1037 /*
1038 * This function handle the IT_NEXUS_XXX event or completion
1039 * status code for SSP/SATA/SMP I/O request.
1040 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)1041 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1042 {
1043 int rc = TMF_RESP_FUNC_FAILED;
1044 struct pm8001_device *pm8001_dev;
1045 struct pm8001_hba_info *pm8001_ha;
1046 struct sas_phy *phy;
1047
1048 if (!dev || !dev->lldd_dev)
1049 return -1;
1050
1051 pm8001_dev = dev->lldd_dev;
1052 pm8001_ha = pm8001_find_ha_by_dev(dev);
1053
1054 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
1055
1056 phy = sas_get_local_phy(dev);
1057
1058 if (dev_is_sata(dev)) {
1059 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1060 if (scsi_is_sas_phy_local(phy)) {
1061 rc = 0;
1062 goto out;
1063 }
1064 /* send internal ssp/sata/smp abort command to FW */
1065 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1066 dev, 1, 0);
1067 msleep(100);
1068
1069 /* deregister the target device */
1070 pm8001_dev_gone_notify(dev);
1071 msleep(200);
1072
1073 /*send phy reset to hard reset target */
1074 rc = sas_phy_reset(phy, 1);
1075 msleep(2000);
1076 pm8001_dev->setds_completion = &completion_setstate;
1077
1078 wait_for_completion(&completion_setstate);
1079 } else {
1080 /* send internal ssp/sata/smp abort command to FW */
1081 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1082 dev, 1, 0);
1083 msleep(100);
1084
1085 /* deregister the target device */
1086 pm8001_dev_gone_notify(dev);
1087 msleep(200);
1088
1089 /*send phy reset to hard reset target */
1090 rc = sas_phy_reset(phy, 1);
1091 msleep(2000);
1092 }
1093 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
1094 pm8001_dev->device_id, rc);
1095 out:
1096 sas_put_local_phy(phy);
1097
1098 return rc;
1099 }
1100 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)1101 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1102 {
1103 int rc = TMF_RESP_FUNC_FAILED;
1104 struct pm8001_tmf_task tmf_task;
1105 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1106 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1107 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1108 if (dev_is_sata(dev)) {
1109 struct sas_phy *phy = sas_get_local_phy(dev);
1110 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1111 dev, 1, 0);
1112 rc = sas_phy_reset(phy, 1);
1113 sas_put_local_phy(phy);
1114 pm8001_dev->setds_completion = &completion_setstate;
1115 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1116 pm8001_dev, 0x01);
1117 wait_for_completion(&completion_setstate);
1118 } else {
1119 tmf_task.tmf = TMF_LU_RESET;
1120 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1121 }
1122 /* If failed, fall-through I_T_Nexus reset */
1123 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
1124 pm8001_dev->device_id, rc);
1125 return rc;
1126 }
1127
1128 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1129 int pm8001_query_task(struct sas_task *task)
1130 {
1131 u32 tag = 0xdeadbeef;
1132 struct scsi_lun lun;
1133 struct pm8001_tmf_task tmf_task;
1134 int rc = TMF_RESP_FUNC_FAILED;
1135 if (unlikely(!task || !task->lldd_task || !task->dev))
1136 return rc;
1137
1138 if (task->task_proto & SAS_PROTOCOL_SSP) {
1139 struct scsi_cmnd *cmnd = task->uldd_task;
1140 struct domain_device *dev = task->dev;
1141 struct pm8001_hba_info *pm8001_ha =
1142 pm8001_find_ha_by_dev(dev);
1143
1144 int_to_scsilun(cmnd->device->lun, &lun);
1145 rc = pm8001_find_tag(task, &tag);
1146 if (rc == 0) {
1147 rc = TMF_RESP_FUNC_FAILED;
1148 return rc;
1149 }
1150 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
1151 tmf_task.tmf = TMF_QUERY_TASK;
1152 tmf_task.tag_of_task_to_be_managed = tag;
1153
1154 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1155 switch (rc) {
1156 /* The task is still in Lun, release it then */
1157 case TMF_RESP_FUNC_SUCC:
1158 pm8001_dbg(pm8001_ha, EH,
1159 "The task is still in Lun\n");
1160 break;
1161 /* The task is not in Lun or failed, reset the phy */
1162 case TMF_RESP_FUNC_FAILED:
1163 case TMF_RESP_FUNC_COMPLETE:
1164 pm8001_dbg(pm8001_ha, EH,
1165 "The task is not in Lun or failed, reset the phy\n");
1166 break;
1167 }
1168 }
1169 pr_err("pm80xx: rc= %d\n", rc);
1170 return rc;
1171 }
1172
1173 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)1174 int pm8001_abort_task(struct sas_task *task)
1175 {
1176 unsigned long flags;
1177 u32 tag;
1178 struct domain_device *dev ;
1179 struct pm8001_hba_info *pm8001_ha;
1180 struct scsi_lun lun;
1181 struct pm8001_device *pm8001_dev;
1182 struct pm8001_tmf_task tmf_task;
1183 int rc = TMF_RESP_FUNC_FAILED, ret;
1184 u32 phy_id;
1185 struct sas_task_slow slow_task;
1186 if (unlikely(!task || !task->lldd_task || !task->dev))
1187 return TMF_RESP_FUNC_FAILED;
1188 dev = task->dev;
1189 pm8001_dev = dev->lldd_dev;
1190 pm8001_ha = pm8001_find_ha_by_dev(dev);
1191 phy_id = pm8001_dev->attached_phy;
1192 ret = pm8001_find_tag(task, &tag);
1193 if (ret == 0) {
1194 pm8001_printk("no tag for task:%p\n", task);
1195 return TMF_RESP_FUNC_FAILED;
1196 }
1197 spin_lock_irqsave(&task->task_state_lock, flags);
1198 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1199 spin_unlock_irqrestore(&task->task_state_lock, flags);
1200 return TMF_RESP_FUNC_COMPLETE;
1201 }
1202 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1203 if (task->slow_task == NULL) {
1204 init_completion(&slow_task.completion);
1205 task->slow_task = &slow_task;
1206 }
1207 spin_unlock_irqrestore(&task->task_state_lock, flags);
1208 if (task->task_proto & SAS_PROTOCOL_SSP) {
1209 struct scsi_cmnd *cmnd = task->uldd_task;
1210 int_to_scsilun(cmnd->device->lun, &lun);
1211 tmf_task.tmf = TMF_ABORT_TASK;
1212 tmf_task.tag_of_task_to_be_managed = tag;
1213 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1214 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1215 pm8001_dev->sas_device, 0, tag);
1216 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1217 task->task_proto & SAS_PROTOCOL_STP) {
1218 if (pm8001_ha->chip_id == chip_8006) {
1219 DECLARE_COMPLETION_ONSTACK(completion_reset);
1220 DECLARE_COMPLETION_ONSTACK(completion);
1221 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1222
1223 /* 1. Set Device state as Recovery */
1224 pm8001_dev->setds_completion = &completion;
1225 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1226 pm8001_dev, 0x03);
1227 wait_for_completion(&completion);
1228
1229 /* 2. Send Phy Control Hard Reset */
1230 reinit_completion(&completion);
1231 phy->port_reset_status = PORT_RESET_TMO;
1232 phy->reset_success = false;
1233 phy->enable_completion = &completion;
1234 phy->reset_completion = &completion_reset;
1235 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1236 PHY_HARD_RESET);
1237 if (ret) {
1238 phy->enable_completion = NULL;
1239 phy->reset_completion = NULL;
1240 goto out;
1241 }
1242
1243 /* In the case of the reset timeout/fail we still
1244 * abort the command at the firmware. The assumption
1245 * here is that the drive is off doing something so
1246 * that it's not processing requests, and we want to
1247 * avoid getting a completion for this and either
1248 * leaking the task in libsas or losing the race and
1249 * getting a double free.
1250 */
1251 pm8001_dbg(pm8001_ha, MSG,
1252 "Waiting for local phy ctl\n");
1253 ret = wait_for_completion_timeout(&completion,
1254 PM8001_TASK_TIMEOUT * HZ);
1255 if (!ret || !phy->reset_success) {
1256 phy->enable_completion = NULL;
1257 phy->reset_completion = NULL;
1258 } else {
1259 /* 3. Wait for Port Reset complete or
1260 * Port reset TMO
1261 */
1262 pm8001_dbg(pm8001_ha, MSG,
1263 "Waiting for Port reset\n");
1264 ret = wait_for_completion_timeout(
1265 &completion_reset,
1266 PM8001_TASK_TIMEOUT * HZ);
1267 if (!ret)
1268 phy->reset_completion = NULL;
1269 WARN_ON(phy->port_reset_status ==
1270 PORT_RESET_TMO);
1271 if (phy->port_reset_status == PORT_RESET_TMO) {
1272 pm8001_dev_gone_notify(dev);
1273 goto out;
1274 }
1275 }
1276
1277 /*
1278 * 4. SATA Abort ALL
1279 * we wait for the task to be aborted so that the task
1280 * is removed from the ccb. on success the caller is
1281 * going to free the task.
1282 */
1283 ret = pm8001_exec_internal_task_abort(pm8001_ha,
1284 pm8001_dev, pm8001_dev->sas_device, 1, tag);
1285 if (ret)
1286 goto out;
1287 ret = wait_for_completion_timeout(
1288 &task->slow_task->completion,
1289 PM8001_TASK_TIMEOUT * HZ);
1290 if (!ret)
1291 goto out;
1292
1293 /* 5. Set Device State as Operational */
1294 reinit_completion(&completion);
1295 pm8001_dev->setds_completion = &completion;
1296 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1297 pm8001_dev, 0x01);
1298 wait_for_completion(&completion);
1299 } else {
1300 rc = pm8001_exec_internal_task_abort(pm8001_ha,
1301 pm8001_dev, pm8001_dev->sas_device, 0, tag);
1302 }
1303 rc = TMF_RESP_FUNC_COMPLETE;
1304 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1305 /* SMP */
1306 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1307 pm8001_dev->sas_device, 0, tag);
1308
1309 }
1310 out:
1311 spin_lock_irqsave(&task->task_state_lock, flags);
1312 if (task->slow_task == &slow_task)
1313 task->slow_task = NULL;
1314 spin_unlock_irqrestore(&task->task_state_lock, flags);
1315 if (rc != TMF_RESP_FUNC_COMPLETE)
1316 pm8001_printk("rc= %d\n", rc);
1317 return rc;
1318 }
1319
pm8001_abort_task_set(struct domain_device * dev,u8 * lun)1320 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1321 {
1322 struct pm8001_tmf_task tmf_task;
1323
1324 tmf_task.tmf = TMF_ABORT_TASK_SET;
1325 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1326 }
1327
pm8001_clear_aca(struct domain_device * dev,u8 * lun)1328 int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1329 {
1330 struct pm8001_tmf_task tmf_task;
1331
1332 tmf_task.tmf = TMF_CLEAR_ACA;
1333 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1334 }
1335
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1336 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1337 {
1338 struct pm8001_tmf_task tmf_task;
1339 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1340 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1341
1342 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1343 pm8001_dev->device_id);
1344 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1345 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1346 }
1347
1348