1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44
45 /**
46 * pm8001_find_tag - from sas task to find out tag that belongs to this task
47 * @task: the task sent to the LLDD
48 * @tag: the found tag associated with the task
49 */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 if (task->lldd_task) {
53 struct pm8001_ccb_info *ccb;
54 ccb = task->lldd_task;
55 *tag = ccb->ccb_tag;
56 return 1;
57 }
58 return 0;
59 }
60
61 /**
62 * pm8001_tag_free - free the no more needed tag
63 * @pm8001_ha: our hba struct
64 * @tag: the found tag associated with the task
65 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 void *bitmap = pm8001_ha->rsvd_tags;
69 unsigned long flags;
70
71 if (tag >= PM8001_RESERVE_SLOT)
72 return;
73
74 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
75 __clear_bit(tag, bitmap);
76 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
77 }
78
79 /**
80 * pm8001_tag_alloc - allocate a empty tag for task used.
81 * @pm8001_ha: our hba struct
82 * @tag_out: the found empty tag .
83 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)84 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
85 {
86 void *bitmap = pm8001_ha->rsvd_tags;
87 unsigned long flags;
88 unsigned int tag;
89
90 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
91 tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT);
92 if (tag >= PM8001_RESERVE_SLOT) {
93 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
94 return -SAS_QUEUE_FULL;
95 }
96 __set_bit(tag, bitmap);
97 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
98
99 /* reserved tags are in the lower region of the tagset */
100 *tag_out = tag;
101 return 0;
102 }
103
104 /**
105 * pm8001_mem_alloc - allocate memory for pm8001.
106 * @pdev: pci device.
107 * @virt_addr: the allocated virtual address
108 * @pphys_addr: DMA address for this device
109 * @pphys_addr_hi: the physical address high byte address.
110 * @pphys_addr_lo: the physical address low byte address.
111 * @mem_size: memory size.
112 * @align: requested byte alignment
113 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)114 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
115 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
116 u32 *pphys_addr_lo, u32 mem_size, u32 align)
117 {
118 caddr_t mem_virt_alloc;
119 dma_addr_t mem_dma_handle;
120 u64 phys_align;
121 u64 align_offset = 0;
122 if (align)
123 align_offset = (dma_addr_t)align - 1;
124 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
125 &mem_dma_handle, GFP_KERNEL);
126 if (!mem_virt_alloc)
127 return -ENOMEM;
128 *pphys_addr = mem_dma_handle;
129 phys_align = (*pphys_addr + align_offset) & ~align_offset;
130 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
131 *pphys_addr_hi = upper_32_bits(phys_align);
132 *pphys_addr_lo = lower_32_bits(phys_align);
133 return 0;
134 }
135
136 /**
137 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
138 * find out our hba struct.
139 * @dev: the domain device which from sas layer.
140 */
141 static
pm8001_find_ha_by_dev(struct domain_device * dev)142 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
143 {
144 struct sas_ha_struct *sha = dev->port->ha;
145 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
146 return pm8001_ha;
147 }
148
149 /**
150 * pm8001_phy_control - this function should be registered to
151 * sas_domain_function_template to provide libsas used, note: this is just
152 * control the HBA phy rather than other expander phy if you want control
153 * other phy, you should use SMP command.
154 * @sas_phy: which phy in HBA phys.
155 * @func: the operation.
156 * @funcdata: always NULL.
157 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)158 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
159 void *funcdata)
160 {
161 int rc = 0, phy_id = sas_phy->id;
162 struct pm8001_hba_info *pm8001_ha = NULL;
163 struct sas_phy_linkrates *rates;
164 struct pm8001_phy *phy;
165 DECLARE_COMPLETION_ONSTACK(completion);
166 unsigned long flags;
167 pm8001_ha = sas_phy->ha->lldd_ha;
168 phy = &pm8001_ha->phy[phy_id];
169 pm8001_ha->phy[phy_id].enable_completion = &completion;
170
171 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
172 /*
173 * If the controller is in fatal error state,
174 * we will not get a response from the controller
175 */
176 pm8001_dbg(pm8001_ha, FAIL,
177 "Phy control failed due to fatal errors\n");
178 return -EFAULT;
179 }
180
181 switch (func) {
182 case PHY_FUNC_SET_LINK_RATE:
183 rates = funcdata;
184 if (rates->minimum_linkrate) {
185 pm8001_ha->phy[phy_id].minimum_linkrate =
186 rates->minimum_linkrate;
187 }
188 if (rates->maximum_linkrate) {
189 pm8001_ha->phy[phy_id].maximum_linkrate =
190 rates->maximum_linkrate;
191 }
192 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
193 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
194 wait_for_completion(&completion);
195 }
196 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
197 PHY_LINK_RESET);
198 break;
199 case PHY_FUNC_HARD_RESET:
200 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
201 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
202 wait_for_completion(&completion);
203 }
204 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
205 PHY_HARD_RESET);
206 break;
207 case PHY_FUNC_LINK_RESET:
208 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
209 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
210 wait_for_completion(&completion);
211 }
212 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
213 PHY_LINK_RESET);
214 break;
215 case PHY_FUNC_RELEASE_SPINUP_HOLD:
216 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
217 PHY_LINK_RESET);
218 break;
219 case PHY_FUNC_DISABLE:
220 if (pm8001_ha->chip_id != chip_8001) {
221 if (pm8001_ha->phy[phy_id].phy_state ==
222 PHY_STATE_LINK_UP_SPCV) {
223 sas_phy_disconnected(&phy->sas_phy);
224 sas_notify_phy_event(&phy->sas_phy,
225 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
226 phy->phy_attached = 0;
227 }
228 } else {
229 if (pm8001_ha->phy[phy_id].phy_state ==
230 PHY_STATE_LINK_UP_SPC) {
231 sas_phy_disconnected(&phy->sas_phy);
232 sas_notify_phy_event(&phy->sas_phy,
233 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
234 phy->phy_attached = 0;
235 }
236 }
237 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
238 break;
239 case PHY_FUNC_GET_EVENTS:
240 spin_lock_irqsave(&pm8001_ha->lock, flags);
241 if (pm8001_ha->chip_id == chip_8001) {
242 if (-1 == pm8001_bar4_shift(pm8001_ha,
243 (phy_id < 4) ? 0x30000 : 0x40000)) {
244 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
245 return -EINVAL;
246 }
247 }
248 {
249 struct sas_phy *phy = sas_phy->phy;
250 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
251 + 0x1034 + (0x4000 * (phy_id & 3));
252
253 phy->invalid_dword_count = readl(qp);
254 phy->running_disparity_error_count = readl(&qp[1]);
255 phy->loss_of_dword_sync_count = readl(&qp[3]);
256 phy->phy_reset_problem_count = readl(&qp[4]);
257 }
258 if (pm8001_ha->chip_id == chip_8001)
259 pm8001_bar4_shift(pm8001_ha, 0);
260 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
261 return 0;
262 default:
263 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
264 rc = -EOPNOTSUPP;
265 }
266 msleep(300);
267 return rc;
268 }
269
270 /**
271 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
272 * command to HBA.
273 * @shost: the scsi host data.
274 */
pm8001_scan_start(struct Scsi_Host * shost)275 void pm8001_scan_start(struct Scsi_Host *shost)
276 {
277 int i;
278 struct pm8001_hba_info *pm8001_ha;
279 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
280 DECLARE_COMPLETION_ONSTACK(completion);
281 pm8001_ha = sha->lldd_ha;
282 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
283 if (pm8001_ha->chip_id == chip_8001)
284 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
285 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
286 pm8001_ha->phy[i].enable_completion = &completion;
287 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
288 wait_for_completion(&completion);
289 msleep(300);
290 }
291 }
292
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)293 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
294 {
295 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
296
297 /* give the phy enabling interrupt event time to come in (1s
298 * is empirically about all it takes) */
299 if (time < HZ)
300 return 0;
301 /* Wait for discovery to finish */
302 sas_drain_work(ha);
303 return 1;
304 }
305
306 /**
307 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
308 * @pm8001_ha: our hba card information
309 * @ccb: the ccb which attached to smp task
310 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)311 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
312 struct pm8001_ccb_info *ccb)
313 {
314 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
315 }
316
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)317 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
318 {
319 struct ata_queued_cmd *qc = task->uldd_task;
320
321 if (qc && ata_is_ncq(qc->tf.protocol)) {
322 *tag = qc->tag;
323 return 1;
324 }
325
326 return 0;
327 }
328
329 /**
330 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
331 * @pm8001_ha: our hba card information
332 * @ccb: the ccb which attached to sata task
333 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)334 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
335 struct pm8001_ccb_info *ccb)
336 {
337 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
338 }
339
340 /**
341 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
342 * for internal abort task
343 * @pm8001_ha: our hba card information
344 * @ccb: the ccb which attached to sata task
345 */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)346 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
347 struct pm8001_ccb_info *ccb)
348 {
349 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
350 }
351
352 /**
353 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
354 * @pm8001_ha: our hba card information
355 * @ccb: the ccb which attached to TM
356 * @tmf: the task management IU
357 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)358 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
359 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
360 {
361 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
362 }
363
364 /**
365 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
366 * @pm8001_ha: our hba card information
367 * @ccb: the ccb which attached to ssp task
368 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)369 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
370 struct pm8001_ccb_info *ccb)
371 {
372 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
373 }
374
375 /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)376 static int sas_find_local_port_id(struct domain_device *dev)
377 {
378 struct domain_device *pdev = dev->parent;
379
380 /* Directly attached device */
381 if (!pdev)
382 return dev->port->id;
383 while (pdev) {
384 struct domain_device *pdev_p = pdev->parent;
385 if (!pdev_p)
386 return pdev->port->id;
387 pdev = pdev->parent;
388 }
389 return 0;
390 }
391
392 #define DEV_IS_GONE(pm8001_dev) \
393 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
394
395
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)396 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
397 struct pm8001_ccb_info *ccb)
398 {
399 struct sas_task *task = ccb->task;
400 enum sas_protocol task_proto = task->task_proto;
401 struct sas_tmf_task *tmf = task->tmf;
402 int is_tmf = !!tmf;
403
404 switch (task_proto) {
405 case SAS_PROTOCOL_SMP:
406 return pm8001_task_prep_smp(pm8001_ha, ccb);
407 case SAS_PROTOCOL_SSP:
408 if (is_tmf)
409 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
410 return pm8001_task_prep_ssp(pm8001_ha, ccb);
411 case SAS_PROTOCOL_SATA:
412 case SAS_PROTOCOL_STP:
413 return pm8001_task_prep_ata(pm8001_ha, ccb);
414 case SAS_PROTOCOL_INTERNAL_ABORT:
415 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
416 default:
417 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
418 task_proto);
419 }
420
421 return -EINVAL;
422 }
423
424 /**
425 * pm8001_queue_command - register for upper layer used, all IO commands sent
426 * to HBA are from this interface.
427 * @task: the task to be execute.
428 * @gfp_flags: gfp_flags
429 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)430 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
431 {
432 struct task_status_struct *ts = &task->task_status;
433 enum sas_protocol task_proto = task->task_proto;
434 struct domain_device *dev = task->dev;
435 struct pm8001_device *pm8001_dev = dev->lldd_dev;
436 bool internal_abort = sas_is_internal_abort(task);
437 struct pm8001_hba_info *pm8001_ha;
438 struct pm8001_port *port = NULL;
439 struct pm8001_ccb_info *ccb;
440 unsigned long flags;
441 u32 n_elem = 0;
442 int rc = 0;
443
444 if (!internal_abort && !dev->port) {
445 ts->resp = SAS_TASK_UNDELIVERED;
446 ts->stat = SAS_PHY_DOWN;
447 if (dev->dev_type != SAS_SATA_DEV)
448 task->task_done(task);
449 return 0;
450 }
451
452 pm8001_ha = pm8001_find_ha_by_dev(dev);
453 if (pm8001_ha->controller_fatal_error) {
454 ts->resp = SAS_TASK_UNDELIVERED;
455 task->task_done(task);
456 return 0;
457 }
458
459 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
460
461 spin_lock_irqsave(&pm8001_ha->lock, flags);
462
463 pm8001_dev = dev->lldd_dev;
464 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
465
466 if (!internal_abort &&
467 (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
468 ts->resp = SAS_TASK_UNDELIVERED;
469 ts->stat = SAS_PHY_DOWN;
470 if (sas_protocol_ata(task_proto)) {
471 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
472 task->task_done(task);
473 spin_lock_irqsave(&pm8001_ha->lock, flags);
474 } else {
475 task->task_done(task);
476 }
477 rc = -ENODEV;
478 goto err_out;
479 }
480
481 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
482 if (!ccb) {
483 rc = -SAS_QUEUE_FULL;
484 goto err_out;
485 }
486
487 if (!sas_protocol_ata(task_proto)) {
488 if (task->num_scatter) {
489 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
490 task->num_scatter, task->data_dir);
491 if (!n_elem) {
492 rc = -ENOMEM;
493 goto err_out_ccb;
494 }
495 }
496 } else {
497 n_elem = task->num_scatter;
498 }
499
500 task->lldd_task = ccb;
501 ccb->n_elem = n_elem;
502
503 atomic_inc(&pm8001_dev->running_req);
504
505 rc = pm8001_deliver_command(pm8001_ha, ccb);
506 if (rc) {
507 atomic_dec(&pm8001_dev->running_req);
508 if (!sas_protocol_ata(task_proto) && n_elem)
509 dma_unmap_sg(pm8001_ha->dev, task->scatter,
510 task->num_scatter, task->data_dir);
511 err_out_ccb:
512 pm8001_ccb_free(pm8001_ha, ccb);
513
514 err_out:
515 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
516 }
517
518 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
519
520 return rc;
521 }
522
523 /**
524 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
525 * @pm8001_ha: our hba card information
526 * @ccb: the ccb which attached to ssp task to free
527 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)528 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
529 struct pm8001_ccb_info *ccb)
530 {
531 struct sas_task *task = ccb->task;
532 struct ata_queued_cmd *qc;
533 struct pm8001_device *pm8001_dev;
534
535 if (!task)
536 return;
537
538 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
539 dma_unmap_sg(pm8001_ha->dev, task->scatter,
540 task->num_scatter, task->data_dir);
541
542 switch (task->task_proto) {
543 case SAS_PROTOCOL_SMP:
544 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
545 DMA_FROM_DEVICE);
546 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
547 DMA_TO_DEVICE);
548 break;
549
550 case SAS_PROTOCOL_SATA:
551 case SAS_PROTOCOL_STP:
552 case SAS_PROTOCOL_SSP:
553 default:
554 /* do nothing */
555 break;
556 }
557
558 if (sas_protocol_ata(task->task_proto)) {
559 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
560 qc = task->uldd_task;
561 pm8001_dev = ccb->device;
562 trace_pm80xx_request_complete(pm8001_ha->id,
563 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
564 ccb->ccb_tag, 0 /* ctlr_opcode not known */,
565 qc ? qc->tf.command : 0, // ata opcode
566 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
567 }
568
569 task->lldd_task = NULL;
570 pm8001_ccb_free(pm8001_ha, ccb);
571 }
572
573 /**
574 * pm8001_alloc_dev - find a empty pm8001_device
575 * @pm8001_ha: our hba card information
576 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)577 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
578 {
579 u32 dev;
580 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
581 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
582 pm8001_ha->devices[dev].id = dev;
583 return &pm8001_ha->devices[dev];
584 }
585 }
586 if (dev == PM8001_MAX_DEVICES) {
587 pm8001_dbg(pm8001_ha, FAIL,
588 "max support %d devices, ignore ..\n",
589 PM8001_MAX_DEVICES);
590 }
591 return NULL;
592 }
593 /**
594 * pm8001_find_dev - find a matching pm8001_device
595 * @pm8001_ha: our hba card information
596 * @device_id: device ID to match against
597 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)598 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
599 u32 device_id)
600 {
601 u32 dev;
602 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
603 if (pm8001_ha->devices[dev].device_id == device_id)
604 return &pm8001_ha->devices[dev];
605 }
606 if (dev == PM8001_MAX_DEVICES) {
607 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
608 }
609 return NULL;
610 }
611
pm8001_free_dev(struct pm8001_device * pm8001_dev)612 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
613 {
614 u32 id = pm8001_dev->id;
615 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
616 pm8001_dev->id = id;
617 pm8001_dev->dev_type = SAS_PHY_UNUSED;
618 pm8001_dev->device_id = PM8001_MAX_DEVICES;
619 pm8001_dev->sas_device = NULL;
620 }
621
622 /**
623 * pm8001_dev_found_notify - libsas notify a device is found.
624 * @dev: the device structure which sas layer used.
625 *
626 * when libsas find a sas domain device, it should tell the LLDD that
627 * device is found, and then LLDD register this device to HBA firmware
628 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
629 * device ID(according to device's sas address) and returned it to LLDD. From
630 * now on, we communicate with HBA FW with the device ID which HBA assigned
631 * rather than sas address. it is the necessary step for our HBA but it is
632 * the optional for other HBA driver.
633 */
pm8001_dev_found_notify(struct domain_device * dev)634 static int pm8001_dev_found_notify(struct domain_device *dev)
635 {
636 unsigned long flags = 0;
637 int res = 0;
638 struct pm8001_hba_info *pm8001_ha = NULL;
639 struct domain_device *parent_dev = dev->parent;
640 struct pm8001_device *pm8001_device;
641 DECLARE_COMPLETION_ONSTACK(completion);
642 u32 flag = 0;
643 pm8001_ha = pm8001_find_ha_by_dev(dev);
644 spin_lock_irqsave(&pm8001_ha->lock, flags);
645
646 pm8001_device = pm8001_alloc_dev(pm8001_ha);
647 if (!pm8001_device) {
648 res = -1;
649 goto found_out;
650 }
651 pm8001_device->sas_device = dev;
652 dev->lldd_dev = pm8001_device;
653 pm8001_device->dev_type = dev->dev_type;
654 pm8001_device->dcompletion = &completion;
655 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
656 int phy_id;
657
658 phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
659 if (phy_id < 0) {
660 pm8001_dbg(pm8001_ha, FAIL,
661 "Error: no attached dev:%016llx at ex:%016llx.\n",
662 SAS_ADDR(dev->sas_addr),
663 SAS_ADDR(parent_dev->sas_addr));
664 res = phy_id;
665 } else {
666 pm8001_device->attached_phy = phy_id;
667 }
668 } else {
669 if (dev->dev_type == SAS_SATA_DEV) {
670 pm8001_device->attached_phy =
671 dev->rphy->identify.phy_identifier;
672 flag = 1; /* directly sata */
673 }
674 } /*register this device to HBA*/
675 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
676 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
677 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
678 wait_for_completion(&completion);
679 if (dev->dev_type == SAS_END_DEVICE)
680 msleep(50);
681 pm8001_ha->flags = PM8001F_RUN_TIME;
682 return 0;
683 found_out:
684 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
685 return res;
686 }
687
pm8001_dev_found(struct domain_device * dev)688 int pm8001_dev_found(struct domain_device *dev)
689 {
690 return pm8001_dev_found_notify(dev);
691 }
692
693 #define PM8001_TASK_TIMEOUT 20
694
695 /**
696 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
697 * @dev: the device structure which sas layer used.
698 */
pm8001_dev_gone_notify(struct domain_device * dev)699 static void pm8001_dev_gone_notify(struct domain_device *dev)
700 {
701 unsigned long flags = 0;
702 struct pm8001_hba_info *pm8001_ha;
703 struct pm8001_device *pm8001_dev = dev->lldd_dev;
704
705 pm8001_ha = pm8001_find_ha_by_dev(dev);
706 spin_lock_irqsave(&pm8001_ha->lock, flags);
707 if (pm8001_dev) {
708 u32 device_id = pm8001_dev->device_id;
709
710 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
711 pm8001_dev->device_id, pm8001_dev->dev_type);
712 if (atomic_read(&pm8001_dev->running_req)) {
713 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
714 sas_execute_internal_abort_dev(dev, 0, NULL);
715 while (atomic_read(&pm8001_dev->running_req))
716 msleep(20);
717 spin_lock_irqsave(&pm8001_ha->lock, flags);
718 }
719 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
720 pm8001_free_dev(pm8001_dev);
721 } else {
722 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
723 }
724 dev->lldd_dev = NULL;
725 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
726 }
727
pm8001_dev_gone(struct domain_device * dev)728 void pm8001_dev_gone(struct domain_device *dev)
729 {
730 pm8001_dev_gone_notify(dev);
731 }
732
733 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)734 void pm8001_open_reject_retry(
735 struct pm8001_hba_info *pm8001_ha,
736 struct sas_task *task_to_close,
737 struct pm8001_device *device_to_close)
738 {
739 int i;
740 unsigned long flags;
741
742 if (pm8001_ha == NULL)
743 return;
744
745 spin_lock_irqsave(&pm8001_ha->lock, flags);
746
747 for (i = 0; i < PM8001_MAX_CCB; i++) {
748 struct sas_task *task;
749 struct task_status_struct *ts;
750 struct pm8001_device *pm8001_dev;
751 unsigned long flags1;
752 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
753
754 if (ccb->ccb_tag == PM8001_INVALID_TAG)
755 continue;
756
757 pm8001_dev = ccb->device;
758 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
759 continue;
760 if (!device_to_close) {
761 uintptr_t d = (uintptr_t)pm8001_dev
762 - (uintptr_t)&pm8001_ha->devices;
763 if (((d % sizeof(*pm8001_dev)) != 0)
764 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
765 continue;
766 } else if (pm8001_dev != device_to_close)
767 continue;
768 task = ccb->task;
769 if (!task || !task->task_done)
770 continue;
771 if (task_to_close && (task != task_to_close))
772 continue;
773 ts = &task->task_status;
774 ts->resp = SAS_TASK_COMPLETE;
775 /* Force the midlayer to retry */
776 ts->stat = SAS_OPEN_REJECT;
777 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
778 if (pm8001_dev)
779 atomic_dec(&pm8001_dev->running_req);
780 spin_lock_irqsave(&task->task_state_lock, flags1);
781 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
782 task->task_state_flags |= SAS_TASK_STATE_DONE;
783 if (unlikely((task->task_state_flags
784 & SAS_TASK_STATE_ABORTED))) {
785 spin_unlock_irqrestore(&task->task_state_lock,
786 flags1);
787 pm8001_ccb_task_free(pm8001_ha, ccb);
788 } else {
789 spin_unlock_irqrestore(&task->task_state_lock,
790 flags1);
791 pm8001_ccb_task_free(pm8001_ha, ccb);
792 mb();/* in order to force CPU ordering */
793 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
794 task->task_done(task);
795 spin_lock_irqsave(&pm8001_ha->lock, flags);
796 }
797 }
798
799 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
800 }
801
802 /**
803 * pm8001_I_T_nexus_reset() - reset the initiator/target connection
804 * @dev: the device structure for the device to reset.
805 *
806 * Standard mandates link reset for ATA (type 0) and hard reset for
807 * SSP (type 1), only for RECOVERY
808 */
pm8001_I_T_nexus_reset(struct domain_device * dev)809 int pm8001_I_T_nexus_reset(struct domain_device *dev)
810 {
811 int rc = TMF_RESP_FUNC_FAILED;
812 struct pm8001_device *pm8001_dev;
813 struct pm8001_hba_info *pm8001_ha;
814 struct sas_phy *phy;
815
816 if (!dev || !dev->lldd_dev)
817 return -ENODEV;
818
819 pm8001_dev = dev->lldd_dev;
820 pm8001_ha = pm8001_find_ha_by_dev(dev);
821 phy = sas_get_local_phy(dev);
822
823 if (dev_is_sata(dev)) {
824 if (scsi_is_sas_phy_local(phy)) {
825 rc = 0;
826 goto out;
827 }
828 rc = sas_phy_reset(phy, 1);
829 if (rc) {
830 pm8001_dbg(pm8001_ha, EH,
831 "phy reset failed for device %x\n"
832 "with rc %d\n", pm8001_dev->device_id, rc);
833 rc = TMF_RESP_FUNC_FAILED;
834 goto out;
835 }
836 msleep(2000);
837 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
838 if (rc) {
839 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
840 "with rc %d\n", pm8001_dev->device_id, rc);
841 rc = TMF_RESP_FUNC_FAILED;
842 }
843 } else {
844 rc = sas_phy_reset(phy, 1);
845 msleep(2000);
846 }
847 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
848 pm8001_dev->device_id, rc);
849 out:
850 sas_put_local_phy(phy);
851 return rc;
852 }
853
854 /*
855 * This function handle the IT_NEXUS_XXX event or completion
856 * status code for SSP/SATA/SMP I/O request.
857 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)858 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
859 {
860 int rc = TMF_RESP_FUNC_FAILED;
861 struct pm8001_device *pm8001_dev;
862 struct pm8001_hba_info *pm8001_ha;
863 struct sas_phy *phy;
864
865 if (!dev || !dev->lldd_dev)
866 return -1;
867
868 pm8001_dev = dev->lldd_dev;
869 pm8001_ha = pm8001_find_ha_by_dev(dev);
870
871 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
872
873 phy = sas_get_local_phy(dev);
874
875 if (dev_is_sata(dev)) {
876 DECLARE_COMPLETION_ONSTACK(completion_setstate);
877 if (scsi_is_sas_phy_local(phy)) {
878 rc = 0;
879 goto out;
880 }
881 /* send internal ssp/sata/smp abort command to FW */
882 sas_execute_internal_abort_dev(dev, 0, NULL);
883 msleep(100);
884
885 /* deregister the target device */
886 pm8001_dev_gone_notify(dev);
887 msleep(200);
888
889 /*send phy reset to hard reset target */
890 rc = sas_phy_reset(phy, 1);
891 msleep(2000);
892 pm8001_dev->setds_completion = &completion_setstate;
893
894 wait_for_completion(&completion_setstate);
895 } else {
896 /* send internal ssp/sata/smp abort command to FW */
897 sas_execute_internal_abort_dev(dev, 0, NULL);
898 msleep(100);
899
900 /* deregister the target device */
901 pm8001_dev_gone_notify(dev);
902 msleep(200);
903
904 /*send phy reset to hard reset target */
905 rc = sas_phy_reset(phy, 1);
906 msleep(2000);
907 }
908 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
909 pm8001_dev->device_id, rc);
910 out:
911 sas_put_local_phy(phy);
912
913 return rc;
914 }
915 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)916 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
917 {
918 int rc = TMF_RESP_FUNC_FAILED;
919 struct pm8001_device *pm8001_dev = dev->lldd_dev;
920 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
921 DECLARE_COMPLETION_ONSTACK(completion_setstate);
922
923 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
924 /*
925 * If the controller is in fatal error state,
926 * we will not get a response from the controller
927 */
928 pm8001_dbg(pm8001_ha, FAIL,
929 "LUN reset failed due to fatal errors\n");
930 return rc;
931 }
932
933 if (dev_is_sata(dev)) {
934 struct sas_phy *phy = sas_get_local_phy(dev);
935 sas_execute_internal_abort_dev(dev, 0, NULL);
936 rc = sas_phy_reset(phy, 1);
937 sas_put_local_phy(phy);
938 pm8001_dev->setds_completion = &completion_setstate;
939 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
940 pm8001_dev, DS_OPERATIONAL);
941 wait_for_completion(&completion_setstate);
942 } else {
943 rc = sas_lu_reset(dev, lun);
944 }
945 /* If failed, fall-through I_T_Nexus reset */
946 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
947 pm8001_dev->device_id, rc);
948 return rc;
949 }
950
951 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)952 int pm8001_query_task(struct sas_task *task)
953 {
954 u32 tag = 0xdeadbeef;
955 int rc = TMF_RESP_FUNC_FAILED;
956 if (unlikely(!task || !task->lldd_task || !task->dev))
957 return rc;
958
959 if (task->task_proto & SAS_PROTOCOL_SSP) {
960 struct scsi_cmnd *cmnd = task->uldd_task;
961 struct domain_device *dev = task->dev;
962 struct pm8001_hba_info *pm8001_ha =
963 pm8001_find_ha_by_dev(dev);
964
965 rc = pm8001_find_tag(task, &tag);
966 if (rc == 0) {
967 rc = TMF_RESP_FUNC_FAILED;
968 return rc;
969 }
970 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
971
972 rc = sas_query_task(task, tag);
973 switch (rc) {
974 /* The task is still in Lun, release it then */
975 case TMF_RESP_FUNC_SUCC:
976 pm8001_dbg(pm8001_ha, EH,
977 "The task is still in Lun\n");
978 break;
979 /* The task is not in Lun or failed, reset the phy */
980 case TMF_RESP_FUNC_FAILED:
981 case TMF_RESP_FUNC_COMPLETE:
982 pm8001_dbg(pm8001_ha, EH,
983 "The task is not in Lun or failed, reset the phy\n");
984 break;
985 }
986 }
987 pr_err("pm80xx: rc= %d\n", rc);
988 return rc;
989 }
990
991 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)992 int pm8001_abort_task(struct sas_task *task)
993 {
994 struct pm8001_ccb_info *ccb = task->lldd_task;
995 unsigned long flags;
996 u32 tag;
997 struct domain_device *dev ;
998 struct pm8001_hba_info *pm8001_ha;
999 struct pm8001_device *pm8001_dev;
1000 int rc = TMF_RESP_FUNC_FAILED, ret;
1001 u32 phy_id, port_id;
1002 struct sas_task_slow slow_task;
1003
1004 if (!task->lldd_task || !task->dev)
1005 return TMF_RESP_FUNC_FAILED;
1006
1007 dev = task->dev;
1008 pm8001_dev = dev->lldd_dev;
1009 pm8001_ha = pm8001_find_ha_by_dev(dev);
1010 phy_id = pm8001_dev->attached_phy;
1011
1012 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1013 // If the controller is seeing fatal errors
1014 // abort task will not get a response from the controller
1015 return TMF_RESP_FUNC_FAILED;
1016 }
1017
1018 ret = pm8001_find_tag(task, &tag);
1019 if (ret == 0) {
1020 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1021 return TMF_RESP_FUNC_FAILED;
1022 }
1023 spin_lock_irqsave(&task->task_state_lock, flags);
1024 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1025 spin_unlock_irqrestore(&task->task_state_lock, flags);
1026 return TMF_RESP_FUNC_COMPLETE;
1027 }
1028 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1029 if (task->slow_task == NULL) {
1030 init_completion(&slow_task.completion);
1031 task->slow_task = &slow_task;
1032 }
1033 spin_unlock_irqrestore(&task->task_state_lock, flags);
1034 if (task->task_proto & SAS_PROTOCOL_SSP) {
1035 rc = sas_abort_task(task, tag);
1036 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1037 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1038 task->task_proto & SAS_PROTOCOL_STP) {
1039 if (pm8001_ha->chip_id == chip_8006) {
1040 DECLARE_COMPLETION_ONSTACK(completion_reset);
1041 DECLARE_COMPLETION_ONSTACK(completion);
1042 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1043 port_id = phy->port->port_id;
1044
1045 /* 1. Set Device state as Recovery */
1046 pm8001_dev->setds_completion = &completion;
1047 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1048 pm8001_dev, DS_IN_RECOVERY);
1049 wait_for_completion(&completion);
1050
1051 /* 2. Send Phy Control Hard Reset */
1052 reinit_completion(&completion);
1053 phy->port_reset_status = PORT_RESET_TMO;
1054 phy->reset_success = false;
1055 phy->enable_completion = &completion;
1056 phy->reset_completion = &completion_reset;
1057 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1058 PHY_HARD_RESET);
1059 if (ret) {
1060 phy->enable_completion = NULL;
1061 phy->reset_completion = NULL;
1062 goto out;
1063 }
1064
1065 /* In the case of the reset timeout/fail we still
1066 * abort the command at the firmware. The assumption
1067 * here is that the drive is off doing something so
1068 * that it's not processing requests, and we want to
1069 * avoid getting a completion for this and either
1070 * leaking the task in libsas or losing the race and
1071 * getting a double free.
1072 */
1073 pm8001_dbg(pm8001_ha, MSG,
1074 "Waiting for local phy ctl\n");
1075 ret = wait_for_completion_timeout(&completion,
1076 PM8001_TASK_TIMEOUT * HZ);
1077 if (!ret || !phy->reset_success) {
1078 phy->enable_completion = NULL;
1079 phy->reset_completion = NULL;
1080 } else {
1081 /* 3. Wait for Port Reset complete or
1082 * Port reset TMO
1083 */
1084 pm8001_dbg(pm8001_ha, MSG,
1085 "Waiting for Port reset\n");
1086 ret = wait_for_completion_timeout(
1087 &completion_reset,
1088 PM8001_TASK_TIMEOUT * HZ);
1089 if (!ret)
1090 phy->reset_completion = NULL;
1091 WARN_ON(phy->port_reset_status ==
1092 PORT_RESET_TMO);
1093 if (phy->port_reset_status == PORT_RESET_TMO) {
1094 pm8001_dev_gone_notify(dev);
1095 PM8001_CHIP_DISP->hw_event_ack_req(
1096 pm8001_ha, 0,
1097 0x07, /*HW_EVENT_PHY_DOWN ack*/
1098 port_id, phy_id, 0, 0);
1099 goto out;
1100 }
1101 }
1102
1103 /*
1104 * 4. SATA Abort ALL
1105 * we wait for the task to be aborted so that the task
1106 * is removed from the ccb. on success the caller is
1107 * going to free the task.
1108 */
1109 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1110 if (ret)
1111 goto out;
1112 ret = wait_for_completion_timeout(
1113 &task->slow_task->completion,
1114 PM8001_TASK_TIMEOUT * HZ);
1115 if (!ret)
1116 goto out;
1117
1118 /* 5. Set Device State as Operational */
1119 reinit_completion(&completion);
1120 pm8001_dev->setds_completion = &completion;
1121 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1122 pm8001_dev, DS_OPERATIONAL);
1123 wait_for_completion(&completion);
1124 } else {
1125 /*
1126 * Ensure that if we see a completion for the ccb
1127 * associated with the task which we are trying to
1128 * abort then we should not touch the sas_task as it
1129 * may race with libsas freeing it when return here.
1130 */
1131 ccb->task = NULL;
1132 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1133 }
1134 rc = TMF_RESP_FUNC_COMPLETE;
1135 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1136 /* SMP */
1137 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1138
1139 }
1140 out:
1141 spin_lock_irqsave(&task->task_state_lock, flags);
1142 if (task->slow_task == &slow_task)
1143 task->slow_task = NULL;
1144 spin_unlock_irqrestore(&task->task_state_lock, flags);
1145 if (rc != TMF_RESP_FUNC_COMPLETE)
1146 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1147 return rc;
1148 }
1149
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1150 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1151 {
1152 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1153 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1154
1155 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1156 pm8001_dev->device_id);
1157 return sas_clear_task_set(dev, lun);
1158 }
1159
pm8001_port_formed(struct asd_sas_phy * sas_phy)1160 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1161 {
1162 struct sas_ha_struct *sas_ha = sas_phy->ha;
1163 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1164 struct pm8001_phy *phy = sas_phy->lldd_phy;
1165 struct asd_sas_port *sas_port = sas_phy->port;
1166 struct pm8001_port *port = phy->port;
1167
1168 if (!sas_port) {
1169 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1170 return;
1171 }
1172 sas_port->lldd_port = port;
1173 }
1174
pm8001_setds_completion(struct domain_device * dev)1175 void pm8001_setds_completion(struct domain_device *dev)
1176 {
1177 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1178 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1179 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1180
1181 if (pm8001_ha->chip_id != chip_8001) {
1182 pm8001_dev->setds_completion = &completion_setstate;
1183 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1184 pm8001_dev, DS_OPERATIONAL);
1185 wait_for_completion(&completion_setstate);
1186 }
1187 }
1188
pm8001_tmf_aborted(struct sas_task * task)1189 void pm8001_tmf_aborted(struct sas_task *task)
1190 {
1191 struct pm8001_ccb_info *ccb = task->lldd_task;
1192
1193 if (ccb)
1194 ccb->task = NULL;
1195 }
1196