• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3  *
4  * Copyright (c) 2008-2009 USI Co., Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  * 3. Neither the names of the above-listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * Alternatively, this software may be distributed under the terms of the
23  * GNU General Public License ("GPL") version 2 as published by the Free
24  * Software Foundation.
25  *
26  * NO WARRANTY
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGES.
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 
44 /**
45  * pm8001_find_tag - from sas task to find out  tag that belongs to this task
46  * @task: the task sent to the LLDD
47  * @tag: the found tag associated with the task
48  */
pm8001_find_tag(struct sas_task * task,u32 * tag)49 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
50 {
51 	if (task->lldd_task) {
52 		struct pm8001_ccb_info *ccb;
53 		ccb = task->lldd_task;
54 		*tag = ccb->ccb_tag;
55 		return 1;
56 	}
57 	return 0;
58 }
59 
60 /**
61   * pm8001_tag_free - free the no more needed tag
62   * @pm8001_ha: our hba struct
63   * @tag: the found tag associated with the task
64   */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)65 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
66 {
67 	void *bitmap = pm8001_ha->tags;
68 	clear_bit(tag, bitmap);
69 }
70 
71 /**
72   * pm8001_tag_alloc - allocate a empty tag for task used.
73   * @pm8001_ha: our hba struct
74   * @tag_out: the found empty tag .
75   */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)76 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
77 {
78 	unsigned int tag;
79 	void *bitmap = pm8001_ha->tags;
80 	unsigned long flags;
81 
82 	spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
83 	tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
84 	if (tag >= pm8001_ha->tags_num) {
85 		spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
86 		return -SAS_QUEUE_FULL;
87 	}
88 	set_bit(tag, bitmap);
89 	spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
90 	*tag_out = tag;
91 	return 0;
92 }
93 
pm8001_tag_init(struct pm8001_hba_info * pm8001_ha)94 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
95 {
96 	int i;
97 	for (i = 0; i < pm8001_ha->tags_num; ++i)
98 		pm8001_tag_free(pm8001_ha, i);
99 }
100 
101  /**
102   * pm8001_mem_alloc - allocate memory for pm8001.
103   * @pdev: pci device.
104   * @virt_addr: the allocated virtual address
105   * @pphys_addr_hi: the physical address high byte address.
106   * @pphys_addr_lo: the physical address low byte address.
107   * @mem_size: memory size.
108   */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)109 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
110 	dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
111 	u32 *pphys_addr_lo, u32 mem_size, u32 align)
112 {
113 	caddr_t mem_virt_alloc;
114 	dma_addr_t mem_dma_handle;
115 	u64 phys_align;
116 	u64 align_offset = 0;
117 	if (align)
118 		align_offset = (dma_addr_t)align - 1;
119 	mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
120 					       &mem_dma_handle);
121 	if (!mem_virt_alloc) {
122 		pm8001_printk("memory allocation error\n");
123 		return -1;
124 	}
125 	*pphys_addr = mem_dma_handle;
126 	phys_align = (*pphys_addr + align_offset) & ~align_offset;
127 	*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
128 	*pphys_addr_hi = upper_32_bits(phys_align);
129 	*pphys_addr_lo = lower_32_bits(phys_align);
130 	return 0;
131 }
132 /**
133   * pm8001_find_ha_by_dev - from domain device which come from sas layer to
134   * find out our hba struct.
135   * @dev: the domain device which from sas layer.
136   */
137 static
pm8001_find_ha_by_dev(struct domain_device * dev)138 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
139 {
140 	struct sas_ha_struct *sha = dev->port->ha;
141 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
142 	return pm8001_ha;
143 }
144 
145 /**
146   * pm8001_phy_control - this function should be registered to
147   * sas_domain_function_template to provide libsas used, note: this is just
148   * control the HBA phy rather than other expander phy if you want control
149   * other phy, you should use SMP command.
150   * @sas_phy: which phy in HBA phys.
151   * @func: the operation.
152   * @funcdata: always NULL.
153   */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)154 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
155 	void *funcdata)
156 {
157 	int rc = 0, phy_id = sas_phy->id;
158 	struct pm8001_hba_info *pm8001_ha = NULL;
159 	struct sas_phy_linkrates *rates;
160 	DECLARE_COMPLETION_ONSTACK(completion);
161 	unsigned long flags;
162 	pm8001_ha = sas_phy->ha->lldd_ha;
163 	pm8001_ha->phy[phy_id].enable_completion = &completion;
164 	switch (func) {
165 	case PHY_FUNC_SET_LINK_RATE:
166 		rates = funcdata;
167 		if (rates->minimum_linkrate) {
168 			pm8001_ha->phy[phy_id].minimum_linkrate =
169 				rates->minimum_linkrate;
170 		}
171 		if (rates->maximum_linkrate) {
172 			pm8001_ha->phy[phy_id].maximum_linkrate =
173 				rates->maximum_linkrate;
174 		}
175 		if (pm8001_ha->phy[phy_id].phy_state == 0) {
176 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
177 			wait_for_completion(&completion);
178 		}
179 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
180 					      PHY_LINK_RESET);
181 		break;
182 	case PHY_FUNC_HARD_RESET:
183 		if (pm8001_ha->phy[phy_id].phy_state == 0) {
184 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
185 			wait_for_completion(&completion);
186 		}
187 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
188 					      PHY_HARD_RESET);
189 		break;
190 	case PHY_FUNC_LINK_RESET:
191 		if (pm8001_ha->phy[phy_id].phy_state == 0) {
192 			PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
193 			wait_for_completion(&completion);
194 		}
195 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
196 					      PHY_LINK_RESET);
197 		break;
198 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
199 		PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
200 					      PHY_LINK_RESET);
201 		break;
202 	case PHY_FUNC_DISABLE:
203 		PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
204 		break;
205 	case PHY_FUNC_GET_EVENTS:
206 		spin_lock_irqsave(&pm8001_ha->lock, flags);
207 		if (pm8001_ha->chip_id == chip_8001) {
208 			if (-1 == pm8001_bar4_shift(pm8001_ha,
209 					(phy_id < 4) ? 0x30000 : 0x40000)) {
210 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
211 				return -EINVAL;
212 			}
213 		}
214 		{
215 			struct sas_phy *phy = sas_phy->phy;
216 			uint32_t *qp = (uint32_t *)(((char *)
217 				pm8001_ha->io_mem[2].memvirtaddr)
218 				+ 0x1034 + (0x4000 * (phy_id & 3)));
219 
220 			phy->invalid_dword_count = qp[0];
221 			phy->running_disparity_error_count = qp[1];
222 			phy->loss_of_dword_sync_count = qp[3];
223 			phy->phy_reset_problem_count = qp[4];
224 		}
225 		if (pm8001_ha->chip_id == chip_8001)
226 			pm8001_bar4_shift(pm8001_ha, 0);
227 		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
228 		return 0;
229 	default:
230 		rc = -EOPNOTSUPP;
231 	}
232 	msleep(300);
233 	return rc;
234 }
235 
236 /**
237   * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
238   * command to HBA.
239   * @shost: the scsi host data.
240   */
pm8001_scan_start(struct Scsi_Host * shost)241 void pm8001_scan_start(struct Scsi_Host *shost)
242 {
243 	int i;
244 	struct pm8001_hba_info *pm8001_ha;
245 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
246 	pm8001_ha = sha->lldd_ha;
247 	/* SAS_RE_INITIALIZATION not available in SPCv/ve */
248 	if (pm8001_ha->chip_id == chip_8001)
249 		PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
250 	for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
251 		PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
252 }
253 
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)254 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
255 {
256 	struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
257 
258 	/* give the phy enabling interrupt event time to come in (1s
259 	* is empirically about all it takes) */
260 	if (time < HZ)
261 		return 0;
262 	/* Wait for discovery to finish */
263 	sas_drain_work(ha);
264 	return 1;
265 }
266 
267 /**
268   * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
269   * @pm8001_ha: our hba card information
270   * @ccb: the ccb which attached to smp task
271   */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)272 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
273 	struct pm8001_ccb_info *ccb)
274 {
275 	return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
276 }
277 
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)278 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
279 {
280 	struct ata_queued_cmd *qc = task->uldd_task;
281 	if (qc) {
282 		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
283 		    qc->tf.command == ATA_CMD_FPDMA_READ ||
284 		    qc->tf.command == ATA_CMD_FPDMA_RECV ||
285 		    qc->tf.command == ATA_CMD_FPDMA_SEND ||
286 		    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
287 			*tag = qc->tag;
288 			return 1;
289 		}
290 	}
291 	return 0;
292 }
293 
294 /**
295   * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
296   * @pm8001_ha: our hba card information
297   * @ccb: the ccb which attached to sata task
298   */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)299 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
300 	struct pm8001_ccb_info *ccb)
301 {
302 	return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
303 }
304 
305 /**
306   * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
307   * @pm8001_ha: our hba card information
308   * @ccb: the ccb which attached to TM
309   * @tmf: the task management IU
310   */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct pm8001_tmf_task * tmf)311 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
312 	struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
313 {
314 	return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
315 }
316 
317 /**
318   * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
319   * @pm8001_ha: our hba card information
320   * @ccb: the ccb which attached to ssp task
321   */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)322 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
323 	struct pm8001_ccb_info *ccb)
324 {
325 	return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
326 }
327 
328  /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)329 static int sas_find_local_port_id(struct domain_device *dev)
330 {
331 	struct domain_device *pdev = dev->parent;
332 
333 	/* Directly attached device */
334 	if (!pdev)
335 		return dev->port->id;
336 	while (pdev) {
337 		struct domain_device *pdev_p = pdev->parent;
338 		if (!pdev_p)
339 			return pdev->port->id;
340 		pdev = pdev->parent;
341 	}
342 	return 0;
343 }
344 
345 /**
346   * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
347   * @task: the task to be execute.
348   * @num: if can_queue great than 1, the task can be queued up. for SMP task,
349   * we always execute one one time.
350   * @gfp_flags: gfp_flags.
351   * @is_tmf: if it is task management task.
352   * @tmf: the task management IU
353   */
354 #define DEV_IS_GONE(pm8001_dev)	\
355 	((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
pm8001_task_exec(struct sas_task * task,gfp_t gfp_flags,int is_tmf,struct pm8001_tmf_task * tmf)356 static int pm8001_task_exec(struct sas_task *task,
357 	gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
358 {
359 	struct domain_device *dev = task->dev;
360 	struct pm8001_hba_info *pm8001_ha;
361 	struct pm8001_device *pm8001_dev;
362 	struct pm8001_port *port = NULL;
363 	struct sas_task *t = task;
364 	struct pm8001_ccb_info *ccb;
365 	u32 tag = 0xdeadbeef, rc, n_elem = 0;
366 	unsigned long flags = 0;
367 
368 	if (!dev->port) {
369 		struct task_status_struct *tsm = &t->task_status;
370 		tsm->resp = SAS_TASK_UNDELIVERED;
371 		tsm->stat = SAS_PHY_DOWN;
372 		if (dev->dev_type != SAS_SATA_DEV)
373 			t->task_done(t);
374 		return 0;
375 	}
376 	pm8001_ha = pm8001_find_ha_by_dev(task->dev);
377 	if (pm8001_ha->controller_fatal_error) {
378 		struct task_status_struct *ts = &t->task_status;
379 
380 		ts->resp = SAS_TASK_UNDELIVERED;
381 		t->task_done(t);
382 		return 0;
383 	}
384 	PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
385 	spin_lock_irqsave(&pm8001_ha->lock, flags);
386 	do {
387 		dev = t->dev;
388 		pm8001_dev = dev->lldd_dev;
389 		port = &pm8001_ha->port[sas_find_local_port_id(dev)];
390 		if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
391 			if (sas_protocol_ata(t->task_proto)) {
392 				struct task_status_struct *ts = &t->task_status;
393 				ts->resp = SAS_TASK_UNDELIVERED;
394 				ts->stat = SAS_PHY_DOWN;
395 
396 				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
397 				t->task_done(t);
398 				spin_lock_irqsave(&pm8001_ha->lock, flags);
399 				continue;
400 			} else {
401 				struct task_status_struct *ts = &t->task_status;
402 				ts->resp = SAS_TASK_UNDELIVERED;
403 				ts->stat = SAS_PHY_DOWN;
404 				t->task_done(t);
405 				continue;
406 			}
407 		}
408 		rc = pm8001_tag_alloc(pm8001_ha, &tag);
409 		if (rc)
410 			goto err_out;
411 		ccb = &pm8001_ha->ccb_info[tag];
412 
413 		if (!sas_protocol_ata(t->task_proto)) {
414 			if (t->num_scatter) {
415 				n_elem = dma_map_sg(pm8001_ha->dev,
416 					t->scatter,
417 					t->num_scatter,
418 					t->data_dir);
419 				if (!n_elem) {
420 					rc = -ENOMEM;
421 					goto err_out_tag;
422 				}
423 			}
424 		} else {
425 			n_elem = t->num_scatter;
426 		}
427 
428 		t->lldd_task = ccb;
429 		ccb->n_elem = n_elem;
430 		ccb->ccb_tag = tag;
431 		ccb->task = t;
432 		ccb->device = pm8001_dev;
433 		switch (t->task_proto) {
434 		case SAS_PROTOCOL_SMP:
435 			rc = pm8001_task_prep_smp(pm8001_ha, ccb);
436 			break;
437 		case SAS_PROTOCOL_SSP:
438 			if (is_tmf)
439 				rc = pm8001_task_prep_ssp_tm(pm8001_ha,
440 					ccb, tmf);
441 			else
442 				rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
443 			break;
444 		case SAS_PROTOCOL_SATA:
445 		case SAS_PROTOCOL_STP:
446 			rc = pm8001_task_prep_ata(pm8001_ha, ccb);
447 			break;
448 		default:
449 			dev_printk(KERN_ERR, pm8001_ha->dev,
450 				"unknown sas_task proto: 0x%x\n",
451 				t->task_proto);
452 			rc = -EINVAL;
453 			break;
454 		}
455 
456 		if (rc) {
457 			PM8001_IO_DBG(pm8001_ha,
458 				pm8001_printk("rc is %x\n", rc));
459 			goto err_out_tag;
460 		}
461 		/* TODO: select normal or high priority */
462 		spin_lock(&t->task_state_lock);
463 		t->task_state_flags |= SAS_TASK_AT_INITIATOR;
464 		spin_unlock(&t->task_state_lock);
465 		pm8001_dev->running_req++;
466 	} while (0);
467 	rc = 0;
468 	goto out_done;
469 
470 err_out_tag:
471 	pm8001_tag_free(pm8001_ha, tag);
472 err_out:
473 	dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
474 	if (!sas_protocol_ata(t->task_proto))
475 		if (n_elem)
476 			dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
477 				t->data_dir);
478 out_done:
479 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
480 	return rc;
481 }
482 
483 /**
484   * pm8001_queue_command - register for upper layer used, all IO commands sent
485   * to HBA are from this interface.
486   * @task: the task to be execute.
487   * @gfp_flags: gfp_flags
488   */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)489 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
490 {
491 	return pm8001_task_exec(task, gfp_flags, 0, NULL);
492 }
493 
494 /**
495   * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
496   * @pm8001_ha: our hba card information
497   * @ccb: the ccb which attached to ssp task
498   * @task: the task to be free.
499   * @ccb_idx: ccb index.
500   */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct sas_task * task,struct pm8001_ccb_info * ccb,u32 ccb_idx)501 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
502 	struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
503 {
504 	if (!ccb->task)
505 		return;
506 	if (!sas_protocol_ata(task->task_proto))
507 		if (ccb->n_elem)
508 			dma_unmap_sg(pm8001_ha->dev, task->scatter,
509 				task->num_scatter, task->data_dir);
510 
511 	switch (task->task_proto) {
512 	case SAS_PROTOCOL_SMP:
513 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
514 			PCI_DMA_FROMDEVICE);
515 		dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
516 			PCI_DMA_TODEVICE);
517 		break;
518 
519 	case SAS_PROTOCOL_SATA:
520 	case SAS_PROTOCOL_STP:
521 	case SAS_PROTOCOL_SSP:
522 	default:
523 		/* do nothing */
524 		break;
525 	}
526 	task->lldd_task = NULL;
527 	ccb->task = NULL;
528 	ccb->ccb_tag = 0xFFFFFFFF;
529 	ccb->open_retry = 0;
530 	pm8001_tag_free(pm8001_ha, ccb_idx);
531 }
532 
533  /**
534   * pm8001_alloc_dev - find a empty pm8001_device
535   * @pm8001_ha: our hba card information
536   */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)537 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
538 {
539 	u32 dev;
540 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
541 		if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
542 			pm8001_ha->devices[dev].id = dev;
543 			return &pm8001_ha->devices[dev];
544 		}
545 	}
546 	if (dev == PM8001_MAX_DEVICES) {
547 		PM8001_FAIL_DBG(pm8001_ha,
548 			pm8001_printk("max support %d devices, ignore ..\n",
549 			PM8001_MAX_DEVICES));
550 	}
551 	return NULL;
552 }
553 /**
554   * pm8001_find_dev - find a matching pm8001_device
555   * @pm8001_ha: our hba card information
556   */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)557 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
558 					u32 device_id)
559 {
560 	u32 dev;
561 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
562 		if (pm8001_ha->devices[dev].device_id == device_id)
563 			return &pm8001_ha->devices[dev];
564 	}
565 	if (dev == PM8001_MAX_DEVICES) {
566 		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
567 				"DEVICE FOUND !!!\n"));
568 	}
569 	return NULL;
570 }
571 
pm8001_free_dev(struct pm8001_device * pm8001_dev)572 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
573 {
574 	u32 id = pm8001_dev->id;
575 	memset(pm8001_dev, 0, sizeof(*pm8001_dev));
576 	pm8001_dev->id = id;
577 	pm8001_dev->dev_type = SAS_PHY_UNUSED;
578 	pm8001_dev->device_id = PM8001_MAX_DEVICES;
579 	pm8001_dev->sas_device = NULL;
580 }
581 
582 /**
583   * pm8001_dev_found_notify - libsas notify a device is found.
584   * @dev: the device structure which sas layer used.
585   *
586   * when libsas find a sas domain device, it should tell the LLDD that
587   * device is found, and then LLDD register this device to HBA firmware
588   * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
589   * device ID(according to device's sas address) and returned it to LLDD. From
590   * now on, we communicate with HBA FW with the device ID which HBA assigned
591   * rather than sas address. it is the necessary step for our HBA but it is
592   * the optional for other HBA driver.
593   */
pm8001_dev_found_notify(struct domain_device * dev)594 static int pm8001_dev_found_notify(struct domain_device *dev)
595 {
596 	unsigned long flags = 0;
597 	int res = 0;
598 	struct pm8001_hba_info *pm8001_ha = NULL;
599 	struct domain_device *parent_dev = dev->parent;
600 	struct pm8001_device *pm8001_device;
601 	DECLARE_COMPLETION_ONSTACK(completion);
602 	u32 flag = 0;
603 	pm8001_ha = pm8001_find_ha_by_dev(dev);
604 	spin_lock_irqsave(&pm8001_ha->lock, flags);
605 
606 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
607 	if (!pm8001_device) {
608 		res = -1;
609 		goto found_out;
610 	}
611 	pm8001_device->sas_device = dev;
612 	dev->lldd_dev = pm8001_device;
613 	pm8001_device->dev_type = dev->dev_type;
614 	pm8001_device->dcompletion = &completion;
615 	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
616 		int phy_id;
617 		struct ex_phy *phy;
618 		for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
619 		phy_id++) {
620 			phy = &parent_dev->ex_dev.ex_phy[phy_id];
621 			if (SAS_ADDR(phy->attached_sas_addr)
622 				== SAS_ADDR(dev->sas_addr)) {
623 				pm8001_device->attached_phy = phy_id;
624 				break;
625 			}
626 		}
627 		if (phy_id == parent_dev->ex_dev.num_phys) {
628 			PM8001_FAIL_DBG(pm8001_ha,
629 			pm8001_printk("Error: no attached dev:%016llx"
630 			" at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
631 				SAS_ADDR(parent_dev->sas_addr)));
632 			res = -1;
633 		}
634 	} else {
635 		if (dev->dev_type == SAS_SATA_DEV) {
636 			pm8001_device->attached_phy =
637 				dev->rphy->identify.phy_identifier;
638 				flag = 1; /* directly sata*/
639 		}
640 	} /*register this device to HBA*/
641 	PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
642 	PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
643 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
644 	wait_for_completion(&completion);
645 	if (dev->dev_type == SAS_END_DEVICE)
646 		msleep(50);
647 	pm8001_ha->flags = PM8001F_RUN_TIME;
648 	return 0;
649 found_out:
650 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
651 	return res;
652 }
653 
pm8001_dev_found(struct domain_device * dev)654 int pm8001_dev_found(struct domain_device *dev)
655 {
656 	return pm8001_dev_found_notify(dev);
657 }
658 
pm8001_task_done(struct sas_task * task)659 void pm8001_task_done(struct sas_task *task)
660 {
661 	if (!del_timer(&task->slow_task->timer))
662 		return;
663 	complete(&task->slow_task->completion);
664 }
665 
pm8001_tmf_timedout(struct timer_list * t)666 static void pm8001_tmf_timedout(struct timer_list *t)
667 {
668 	struct sas_task_slow *slow = from_timer(slow, t, timer);
669 	struct sas_task *task = slow->task;
670 
671 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
672 	complete(&task->slow_task->completion);
673 }
674 
675 #define PM8001_TASK_TIMEOUT 20
676 /**
677   * pm8001_exec_internal_tmf_task - execute some task management commands.
678   * @dev: the wanted device.
679   * @tmf: which task management wanted to be take.
680   * @para_len: para_len.
681   * @parameter: ssp task parameter.
682   *
683   * when errors or exception happened, we may want to do something, for example
684   * abort the issued task which result in this execption, it is done by calling
685   * this function, note it is also with the task execute interface.
686   */
pm8001_exec_internal_tmf_task(struct domain_device * dev,void * parameter,u32 para_len,struct pm8001_tmf_task * tmf)687 static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
688 	void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
689 {
690 	int res, retry;
691 	struct sas_task *task = NULL;
692 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
693 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
694 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
695 
696 	for (retry = 0; retry < 3; retry++) {
697 		task = sas_alloc_slow_task(GFP_KERNEL);
698 		if (!task)
699 			return -ENOMEM;
700 
701 		task->dev = dev;
702 		task->task_proto = dev->tproto;
703 		memcpy(&task->ssp_task, parameter, para_len);
704 		task->task_done = pm8001_task_done;
705 		task->slow_task->timer.function = pm8001_tmf_timedout;
706 		task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
707 		add_timer(&task->slow_task->timer);
708 
709 		res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
710 
711 		if (res) {
712 			del_timer(&task->slow_task->timer);
713 			PM8001_FAIL_DBG(pm8001_ha,
714 				pm8001_printk("Executing internal task "
715 				"failed\n"));
716 			goto ex_err;
717 		}
718 		wait_for_completion(&task->slow_task->completion);
719 		if (pm8001_ha->chip_id != chip_8001) {
720 			pm8001_dev->setds_completion = &completion_setstate;
721 				PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
722 					pm8001_dev, 0x01);
723 			wait_for_completion(&completion_setstate);
724 		}
725 		res = -TMF_RESP_FUNC_FAILED;
726 		/* Even TMF timed out, return direct. */
727 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
728 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
729 				PM8001_FAIL_DBG(pm8001_ha,
730 					pm8001_printk("TMF task[%x]timeout.\n",
731 					tmf->tmf));
732 				goto ex_err;
733 			}
734 		}
735 
736 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
737 			task->task_status.stat == SAM_STAT_GOOD) {
738 			res = TMF_RESP_FUNC_COMPLETE;
739 			break;
740 		}
741 
742 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
743 		task->task_status.stat == SAS_DATA_UNDERRUN) {
744 			/* no error, but return the number of bytes of
745 			* underrun */
746 			res = task->task_status.residual;
747 			break;
748 		}
749 
750 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
751 			task->task_status.stat == SAS_DATA_OVERRUN) {
752 			PM8001_FAIL_DBG(pm8001_ha,
753 				pm8001_printk("Blocked task error.\n"));
754 			res = -EMSGSIZE;
755 			break;
756 		} else {
757 			PM8001_EH_DBG(pm8001_ha,
758 				pm8001_printk(" Task to dev %016llx response:"
759 				"0x%x status 0x%x\n",
760 				SAS_ADDR(dev->sas_addr),
761 				task->task_status.resp,
762 				task->task_status.stat));
763 			sas_free_task(task);
764 			task = NULL;
765 		}
766 	}
767 ex_err:
768 	BUG_ON(retry == 3 && task != NULL);
769 	sas_free_task(task);
770 	return res;
771 }
772 
773 static int
pm8001_exec_internal_task_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * pm8001_dev,struct domain_device * dev,u32 flag,u32 task_tag)774 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
775 	struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
776 	u32 task_tag)
777 {
778 	int res, retry;
779 	u32 ccb_tag;
780 	struct pm8001_ccb_info *ccb;
781 	struct sas_task *task = NULL;
782 
783 	for (retry = 0; retry < 3; retry++) {
784 		task = sas_alloc_slow_task(GFP_KERNEL);
785 		if (!task)
786 			return -ENOMEM;
787 
788 		task->dev = dev;
789 		task->task_proto = dev->tproto;
790 		task->task_done = pm8001_task_done;
791 		task->slow_task->timer.function = pm8001_tmf_timedout;
792 		task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
793 		add_timer(&task->slow_task->timer);
794 
795 		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
796 		if (res)
797 			goto ex_err;
798 		ccb = &pm8001_ha->ccb_info[ccb_tag];
799 		ccb->device = pm8001_dev;
800 		ccb->ccb_tag = ccb_tag;
801 		ccb->task = task;
802 		ccb->n_elem = 0;
803 
804 		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
805 			pm8001_dev, flag, task_tag, ccb_tag);
806 
807 		if (res) {
808 			del_timer(&task->slow_task->timer);
809 			PM8001_FAIL_DBG(pm8001_ha,
810 				pm8001_printk("Executing internal task "
811 				"failed\n"));
812 			goto ex_err;
813 		}
814 		wait_for_completion(&task->slow_task->completion);
815 		res = TMF_RESP_FUNC_FAILED;
816 		/* Even TMF timed out, return direct. */
817 		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
818 			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
819 				PM8001_FAIL_DBG(pm8001_ha,
820 					pm8001_printk("TMF task timeout.\n"));
821 				goto ex_err;
822 			}
823 		}
824 
825 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
826 			task->task_status.stat == SAM_STAT_GOOD) {
827 			res = TMF_RESP_FUNC_COMPLETE;
828 			break;
829 
830 		} else {
831 			PM8001_EH_DBG(pm8001_ha,
832 				pm8001_printk(" Task to dev %016llx response: "
833 					"0x%x status 0x%x\n",
834 				SAS_ADDR(dev->sas_addr),
835 				task->task_status.resp,
836 				task->task_status.stat));
837 			sas_free_task(task);
838 			task = NULL;
839 		}
840 	}
841 ex_err:
842 	BUG_ON(retry == 3 && task != NULL);
843 	sas_free_task(task);
844 	return res;
845 }
846 
847 /**
848   * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
849   * @dev: the device structure which sas layer used.
850   */
pm8001_dev_gone_notify(struct domain_device * dev)851 static void pm8001_dev_gone_notify(struct domain_device *dev)
852 {
853 	unsigned long flags = 0;
854 	struct pm8001_hba_info *pm8001_ha;
855 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
856 
857 	pm8001_ha = pm8001_find_ha_by_dev(dev);
858 	spin_lock_irqsave(&pm8001_ha->lock, flags);
859 	if (pm8001_dev) {
860 		u32 device_id = pm8001_dev->device_id;
861 
862 		PM8001_DISC_DBG(pm8001_ha,
863 			pm8001_printk("found dev[%d:%x] is gone.\n",
864 			pm8001_dev->device_id, pm8001_dev->dev_type));
865 		if (pm8001_dev->running_req) {
866 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
867 			pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
868 				dev, 1, 0);
869 			while (pm8001_dev->running_req)
870 				msleep(20);
871 			spin_lock_irqsave(&pm8001_ha->lock, flags);
872 		}
873 		PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
874 		pm8001_free_dev(pm8001_dev);
875 	} else {
876 		PM8001_DISC_DBG(pm8001_ha,
877 			pm8001_printk("Found dev has gone.\n"));
878 	}
879 	dev->lldd_dev = NULL;
880 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
881 }
882 
pm8001_dev_gone(struct domain_device * dev)883 void pm8001_dev_gone(struct domain_device *dev)
884 {
885 	pm8001_dev_gone_notify(dev);
886 }
887 
pm8001_issue_ssp_tmf(struct domain_device * dev,u8 * lun,struct pm8001_tmf_task * tmf)888 static int pm8001_issue_ssp_tmf(struct domain_device *dev,
889 	u8 *lun, struct pm8001_tmf_task *tmf)
890 {
891 	struct sas_ssp_task ssp_task;
892 	if (!(dev->tproto & SAS_PROTOCOL_SSP))
893 		return TMF_RESP_FUNC_ESUPP;
894 
895 	strncpy((u8 *)&ssp_task.LUN, lun, 8);
896 	return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
897 		tmf);
898 }
899 
900 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)901 void pm8001_open_reject_retry(
902 	struct pm8001_hba_info *pm8001_ha,
903 	struct sas_task *task_to_close,
904 	struct pm8001_device *device_to_close)
905 {
906 	int i;
907 	unsigned long flags;
908 
909 	if (pm8001_ha == NULL)
910 		return;
911 
912 	spin_lock_irqsave(&pm8001_ha->lock, flags);
913 
914 	for (i = 0; i < PM8001_MAX_CCB; i++) {
915 		struct sas_task *task;
916 		struct task_status_struct *ts;
917 		struct pm8001_device *pm8001_dev;
918 		unsigned long flags1;
919 		u32 tag;
920 		struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
921 
922 		pm8001_dev = ccb->device;
923 		if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
924 			continue;
925 		if (!device_to_close) {
926 			uintptr_t d = (uintptr_t)pm8001_dev
927 					- (uintptr_t)&pm8001_ha->devices;
928 			if (((d % sizeof(*pm8001_dev)) != 0)
929 			 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
930 				continue;
931 		} else if (pm8001_dev != device_to_close)
932 			continue;
933 		tag = ccb->ccb_tag;
934 		if (!tag || (tag == 0xFFFFFFFF))
935 			continue;
936 		task = ccb->task;
937 		if (!task || !task->task_done)
938 			continue;
939 		if (task_to_close && (task != task_to_close))
940 			continue;
941 		ts = &task->task_status;
942 		ts->resp = SAS_TASK_COMPLETE;
943 		/* Force the midlayer to retry */
944 		ts->stat = SAS_OPEN_REJECT;
945 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
946 		if (pm8001_dev)
947 			pm8001_dev->running_req--;
948 		spin_lock_irqsave(&task->task_state_lock, flags1);
949 		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
950 		task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
951 		task->task_state_flags |= SAS_TASK_STATE_DONE;
952 		if (unlikely((task->task_state_flags
953 				& SAS_TASK_STATE_ABORTED))) {
954 			spin_unlock_irqrestore(&task->task_state_lock,
955 				flags1);
956 			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
957 		} else {
958 			spin_unlock_irqrestore(&task->task_state_lock,
959 				flags1);
960 			pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
961 			mb();/* in order to force CPU ordering */
962 			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
963 			task->task_done(task);
964 			spin_lock_irqsave(&pm8001_ha->lock, flags);
965 		}
966 	}
967 
968 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
969 }
970 
971 /**
972   * Standard mandates link reset for ATA  (type 0) and hard reset for
973   * SSP (type 1) , only for RECOVERY
974   */
pm8001_I_T_nexus_reset(struct domain_device * dev)975 int pm8001_I_T_nexus_reset(struct domain_device *dev)
976 {
977 	int rc = TMF_RESP_FUNC_FAILED;
978 	struct pm8001_device *pm8001_dev;
979 	struct pm8001_hba_info *pm8001_ha;
980 	struct sas_phy *phy;
981 
982 	if (!dev || !dev->lldd_dev)
983 		return -ENODEV;
984 
985 	pm8001_dev = dev->lldd_dev;
986 	pm8001_ha = pm8001_find_ha_by_dev(dev);
987 	phy = sas_get_local_phy(dev);
988 
989 	if (dev_is_sata(dev)) {
990 		if (scsi_is_sas_phy_local(phy)) {
991 			rc = 0;
992 			goto out;
993 		}
994 		rc = sas_phy_reset(phy, 1);
995 		if (rc) {
996 			PM8001_EH_DBG(pm8001_ha,
997 			pm8001_printk("phy reset failed for device %x\n"
998 			"with rc %d\n", pm8001_dev->device_id, rc));
999 			rc = TMF_RESP_FUNC_FAILED;
1000 			goto out;
1001 		}
1002 		msleep(2000);
1003 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1004 			dev, 1, 0);
1005 		if (rc) {
1006 			PM8001_EH_DBG(pm8001_ha,
1007 			pm8001_printk("task abort failed %x\n"
1008 			"with rc %d\n", pm8001_dev->device_id, rc));
1009 			rc = TMF_RESP_FUNC_FAILED;
1010 		}
1011 	} else {
1012 		rc = sas_phy_reset(phy, 1);
1013 		msleep(2000);
1014 	}
1015 	PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
1016 		pm8001_dev->device_id, rc));
1017  out:
1018 	sas_put_local_phy(phy);
1019 	return rc;
1020 }
1021 
1022 /*
1023 * This function handle the IT_NEXUS_XXX event or completion
1024 * status code for SSP/SATA/SMP I/O request.
1025 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)1026 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1027 {
1028 	int rc = TMF_RESP_FUNC_FAILED;
1029 	struct pm8001_device *pm8001_dev;
1030 	struct pm8001_hba_info *pm8001_ha;
1031 	struct sas_phy *phy;
1032 	u32 device_id = 0;
1033 
1034 	if (!dev || !dev->lldd_dev)
1035 		return -1;
1036 
1037 	pm8001_dev = dev->lldd_dev;
1038 	device_id = pm8001_dev->device_id;
1039 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1040 
1041 	PM8001_EH_DBG(pm8001_ha,
1042 			pm8001_printk("I_T_Nexus handler invoked !!"));
1043 
1044 	phy = sas_get_local_phy(dev);
1045 
1046 	if (dev_is_sata(dev)) {
1047 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
1048 		if (scsi_is_sas_phy_local(phy)) {
1049 			rc = 0;
1050 			goto out;
1051 		}
1052 		/* send internal ssp/sata/smp abort command to FW */
1053 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1054 							dev, 1, 0);
1055 		msleep(100);
1056 
1057 		/* deregister the target device */
1058 		pm8001_dev_gone_notify(dev);
1059 		msleep(200);
1060 
1061 		/*send phy reset to hard reset target */
1062 		rc = sas_phy_reset(phy, 1);
1063 		msleep(2000);
1064 		pm8001_dev->setds_completion = &completion_setstate;
1065 
1066 		wait_for_completion(&completion_setstate);
1067 	} else {
1068 		/* send internal ssp/sata/smp abort command to FW */
1069 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1070 							dev, 1, 0);
1071 		msleep(100);
1072 
1073 		/* deregister the target device */
1074 		pm8001_dev_gone_notify(dev);
1075 		msleep(200);
1076 
1077 		/*send phy reset to hard reset target */
1078 		rc = sas_phy_reset(phy, 1);
1079 		msleep(2000);
1080 	}
1081 	PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
1082 		pm8001_dev->device_id, rc));
1083 out:
1084 	sas_put_local_phy(phy);
1085 
1086 	return rc;
1087 }
1088 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)1089 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1090 {
1091 	int rc = TMF_RESP_FUNC_FAILED;
1092 	struct pm8001_tmf_task tmf_task;
1093 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1094 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1095 	DECLARE_COMPLETION_ONSTACK(completion_setstate);
1096 	if (dev_is_sata(dev)) {
1097 		struct sas_phy *phy = sas_get_local_phy(dev);
1098 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1099 			dev, 1, 0);
1100 		rc = sas_phy_reset(phy, 1);
1101 		sas_put_local_phy(phy);
1102 		pm8001_dev->setds_completion = &completion_setstate;
1103 		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1104 			pm8001_dev, 0x01);
1105 		wait_for_completion(&completion_setstate);
1106 	} else {
1107 		tmf_task.tmf = TMF_LU_RESET;
1108 		rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1109 	}
1110 	/* If failed, fall-through I_T_Nexus reset */
1111 	PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
1112 		pm8001_dev->device_id, rc));
1113 	return rc;
1114 }
1115 
1116 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1117 int pm8001_query_task(struct sas_task *task)
1118 {
1119 	u32 tag = 0xdeadbeef;
1120 	int i = 0;
1121 	struct scsi_lun lun;
1122 	struct pm8001_tmf_task tmf_task;
1123 	int rc = TMF_RESP_FUNC_FAILED;
1124 	if (unlikely(!task || !task->lldd_task || !task->dev))
1125 		return rc;
1126 
1127 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1128 		struct scsi_cmnd *cmnd = task->uldd_task;
1129 		struct domain_device *dev = task->dev;
1130 		struct pm8001_hba_info *pm8001_ha =
1131 			pm8001_find_ha_by_dev(dev);
1132 
1133 		int_to_scsilun(cmnd->device->lun, &lun);
1134 		rc = pm8001_find_tag(task, &tag);
1135 		if (rc == 0) {
1136 			rc = TMF_RESP_FUNC_FAILED;
1137 			return rc;
1138 		}
1139 		PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
1140 		for (i = 0; i < 16; i++)
1141 			printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
1142 		printk(KERN_INFO "]\n");
1143 		tmf_task.tmf = 	TMF_QUERY_TASK;
1144 		tmf_task.tag_of_task_to_be_managed = tag;
1145 
1146 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1147 		switch (rc) {
1148 		/* The task is still in Lun, release it then */
1149 		case TMF_RESP_FUNC_SUCC:
1150 			PM8001_EH_DBG(pm8001_ha,
1151 				pm8001_printk("The task is still in Lun\n"));
1152 			break;
1153 		/* The task is not in Lun or failed, reset the phy */
1154 		case TMF_RESP_FUNC_FAILED:
1155 		case TMF_RESP_FUNC_COMPLETE:
1156 			PM8001_EH_DBG(pm8001_ha,
1157 			pm8001_printk("The task is not in Lun or failed,"
1158 			" reset the phy\n"));
1159 			break;
1160 		}
1161 	}
1162 	pm8001_printk(":rc= %d\n", rc);
1163 	return rc;
1164 }
1165 
1166 /*  mandatory SAM-3, still need free task/ccb info, abord the specified task */
pm8001_abort_task(struct sas_task * task)1167 int pm8001_abort_task(struct sas_task *task)
1168 {
1169 	unsigned long flags;
1170 	u32 tag;
1171 	u32 device_id;
1172 	struct domain_device *dev ;
1173 	struct pm8001_hba_info *pm8001_ha;
1174 	struct scsi_lun lun;
1175 	struct pm8001_device *pm8001_dev;
1176 	struct pm8001_tmf_task tmf_task;
1177 	int rc = TMF_RESP_FUNC_FAILED, ret;
1178 	u32 phy_id;
1179 	struct sas_task_slow slow_task;
1180 	if (unlikely(!task || !task->lldd_task || !task->dev))
1181 		return TMF_RESP_FUNC_FAILED;
1182 	dev = task->dev;
1183 	pm8001_dev = dev->lldd_dev;
1184 	pm8001_ha = pm8001_find_ha_by_dev(dev);
1185 	device_id = pm8001_dev->device_id;
1186 	phy_id = pm8001_dev->attached_phy;
1187 	ret = pm8001_find_tag(task, &tag);
1188 	if (ret == 0) {
1189 		pm8001_printk("no tag for task:%p\n", task);
1190 		return TMF_RESP_FUNC_FAILED;
1191 	}
1192 	spin_lock_irqsave(&task->task_state_lock, flags);
1193 	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1194 		spin_unlock_irqrestore(&task->task_state_lock, flags);
1195 		return TMF_RESP_FUNC_COMPLETE;
1196 	}
1197 	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1198 	if (task->slow_task == NULL) {
1199 		init_completion(&slow_task.completion);
1200 		task->slow_task = &slow_task;
1201 	}
1202 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1203 	if (task->task_proto & SAS_PROTOCOL_SSP) {
1204 		struct scsi_cmnd *cmnd = task->uldd_task;
1205 		int_to_scsilun(cmnd->device->lun, &lun);
1206 		tmf_task.tmf = TMF_ABORT_TASK;
1207 		tmf_task.tag_of_task_to_be_managed = tag;
1208 		rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1209 		pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1210 			pm8001_dev->sas_device, 0, tag);
1211 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1212 		task->task_proto & SAS_PROTOCOL_STP) {
1213 		if (pm8001_ha->chip_id == chip_8006) {
1214 			DECLARE_COMPLETION_ONSTACK(completion_reset);
1215 			DECLARE_COMPLETION_ONSTACK(completion);
1216 			struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1217 
1218 			/* 1. Set Device state as Recovery */
1219 			pm8001_dev->setds_completion = &completion;
1220 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1221 				pm8001_dev, 0x03);
1222 			wait_for_completion(&completion);
1223 
1224 			/* 2. Send Phy Control Hard Reset */
1225 			reinit_completion(&completion);
1226 			phy->port_reset_status = PORT_RESET_TMO;
1227 			phy->reset_success = false;
1228 			phy->enable_completion = &completion;
1229 			phy->reset_completion = &completion_reset;
1230 			ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1231 				PHY_HARD_RESET);
1232 			if (ret) {
1233 				phy->enable_completion = NULL;
1234 				phy->reset_completion = NULL;
1235 				goto out;
1236 			}
1237 
1238 			/* In the case of the reset timeout/fail we still
1239 			 * abort the command at the firmware. The assumption
1240 			 * here is that the drive is off doing something so
1241 			 * that it's not processing requests, and we want to
1242 			 * avoid getting a completion for this and either
1243 			 * leaking the task in libsas or losing the race and
1244 			 * getting a double free.
1245 			 */
1246 			PM8001_MSG_DBG(pm8001_ha,
1247 				pm8001_printk("Waiting for local phy ctl\n"));
1248 			ret = wait_for_completion_timeout(&completion,
1249 					PM8001_TASK_TIMEOUT * HZ);
1250 			if (!ret || !phy->reset_success) {
1251 				phy->enable_completion = NULL;
1252 				phy->reset_completion = NULL;
1253 			} else {
1254 				/* 3. Wait for Port Reset complete or
1255 				 * Port reset TMO
1256 				 */
1257 				PM8001_MSG_DBG(pm8001_ha,
1258 				pm8001_printk("Waiting for Port reset\n"));
1259 				ret = wait_for_completion_timeout(
1260 					&completion_reset,
1261 					PM8001_TASK_TIMEOUT * HZ);
1262 				if (!ret)
1263 					phy->reset_completion = NULL;
1264 				WARN_ON(phy->port_reset_status ==
1265 						PORT_RESET_TMO);
1266 				if (phy->port_reset_status == PORT_RESET_TMO) {
1267 					pm8001_dev_gone_notify(dev);
1268 					goto out;
1269 				}
1270 			}
1271 
1272 			/*
1273 			 * 4. SATA Abort ALL
1274 			 * we wait for the task to be aborted so that the task
1275 			 * is removed from the ccb. on success the caller is
1276 			 * going to free the task.
1277 			 */
1278 			ret = pm8001_exec_internal_task_abort(pm8001_ha,
1279 				pm8001_dev, pm8001_dev->sas_device, 1, tag);
1280 			if (ret)
1281 				goto out;
1282 			ret = wait_for_completion_timeout(
1283 				&task->slow_task->completion,
1284 				PM8001_TASK_TIMEOUT * HZ);
1285 			if (!ret)
1286 				goto out;
1287 
1288 			/* 5. Set Device State as Operational */
1289 			reinit_completion(&completion);
1290 			pm8001_dev->setds_completion = &completion;
1291 			PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1292 				pm8001_dev, 0x01);
1293 			wait_for_completion(&completion);
1294 		} else {
1295 			rc = pm8001_exec_internal_task_abort(pm8001_ha,
1296 				pm8001_dev, pm8001_dev->sas_device, 0, tag);
1297 		}
1298 		rc = TMF_RESP_FUNC_COMPLETE;
1299 	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
1300 		/* SMP */
1301 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1302 			pm8001_dev->sas_device, 0, tag);
1303 
1304 	}
1305 out:
1306 	spin_lock_irqsave(&task->task_state_lock, flags);
1307 	if (task->slow_task == &slow_task)
1308 		task->slow_task = NULL;
1309 	spin_unlock_irqrestore(&task->task_state_lock, flags);
1310 	if (rc != TMF_RESP_FUNC_COMPLETE)
1311 		pm8001_printk("rc= %d\n", rc);
1312 	return rc;
1313 }
1314 
pm8001_abort_task_set(struct domain_device * dev,u8 * lun)1315 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1316 {
1317 	int rc = TMF_RESP_FUNC_FAILED;
1318 	struct pm8001_tmf_task tmf_task;
1319 
1320 	tmf_task.tmf = TMF_ABORT_TASK_SET;
1321 	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1322 	return rc;
1323 }
1324 
pm8001_clear_aca(struct domain_device * dev,u8 * lun)1325 int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1326 {
1327 	int rc = TMF_RESP_FUNC_FAILED;
1328 	struct pm8001_tmf_task tmf_task;
1329 
1330 	tmf_task.tmf = TMF_CLEAR_ACA;
1331 	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1332 
1333 	return rc;
1334 }
1335 
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1336 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1337 {
1338 	int rc = TMF_RESP_FUNC_FAILED;
1339 	struct pm8001_tmf_task tmf_task;
1340 	struct pm8001_device *pm8001_dev = dev->lldd_dev;
1341 	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1342 
1343 	PM8001_EH_DBG(pm8001_ha,
1344 		pm8001_printk("I_T_L_Q clear task set[%x]\n",
1345 		pm8001_dev->device_id));
1346 	tmf_task.tmf = TMF_CLEAR_TASK_SET;
1347 	rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1348 	return rc;
1349 }
1350 
1351