• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Driver for Realtek PCI-Express card reader
4  *
5  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
6  *
7  * Author:
8  *   Wei WANG (wei_wang@realsil.com.cn)
9  *   Micky Ching (micky_ching@realsil.com.cn)
10  */
11 
12 #include <linux/blkdev.h>
13 #include <linux/kthread.h>
14 #include <linux/sched.h>
15 
16 #include "rtsx.h"
17 
18 /***********************************************************************
19  * Scatter-gather transfer buffer access routines
20  ***********************************************************************/
21 
22 /*
23  * Copy a buffer of length buflen to/from the srb's transfer buffer.
24  * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
25  * points to a list of s-g entries and we ignore srb->request_bufflen.
26  * For non-scatter-gather transfers, srb->request_buffer points to the
27  * transfer buffer itself and srb->request_bufflen is the buffer's length.)
28  * Update the *index and *offset variables so that the next copy will
29  * pick up from where this one left off.
30  */
31 
rtsx_stor_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,unsigned int * index,unsigned int * offset,enum xfer_buf_dir dir)32 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
33 				       unsigned int buflen,
34 				       struct scsi_cmnd *srb,
35 				       unsigned int *index,
36 				       unsigned int *offset,
37 				       enum xfer_buf_dir dir)
38 {
39 	unsigned int cnt;
40 
41 	/* If not using scatter-gather, just transfer the data directly. */
42 	if (scsi_sg_count(srb) == 0) {
43 		unsigned char *sgbuffer;
44 
45 		if (*offset >= scsi_bufflen(srb))
46 			return 0;
47 		cnt = min(buflen, scsi_bufflen(srb) - *offset);
48 
49 		sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
50 
51 		if (dir == TO_XFER_BUF)
52 			memcpy(sgbuffer, buffer, cnt);
53 		else
54 			memcpy(buffer, sgbuffer, cnt);
55 		*offset += cnt;
56 
57 	/*
58 	 * Using scatter-gather.  We have to go through the list one entry
59 	 * at a time.  Each s-g entry contains some number of pages, and
60 	 * each page has to be kmap()'ed separately.
61 	 */
62 	} else {
63 		struct scatterlist *sg =
64 				(struct scatterlist *)scsi_sglist(srb)
65 				+ *index;
66 
67 		/*
68 		 * This loop handles a single s-g list entry, which may
69 		 * include multiple pages.  Find the initial page structure
70 		 * and the starting offset within the page, and update
71 		 * the *offset and *index values for the next loop.
72 		 */
73 		cnt = 0;
74 		while (cnt < buflen && *index < scsi_sg_count(srb)) {
75 			struct page *page = sg_page(sg) +
76 					((sg->offset + *offset) >> PAGE_SHIFT);
77 			unsigned int poff = (sg->offset + *offset) &
78 					    (PAGE_SIZE - 1);
79 			unsigned int sglen = sg->length - *offset;
80 
81 			if (sglen > buflen - cnt) {
82 				/* Transfer ends within this s-g entry */
83 				sglen = buflen - cnt;
84 				*offset += sglen;
85 			} else {
86 				/* Transfer continues to next s-g entry */
87 				*offset = 0;
88 				++*index;
89 				++sg;
90 			}
91 
92 			while (sglen > 0) {
93 				unsigned int plen = min(sglen, (unsigned int)
94 						PAGE_SIZE - poff);
95 				unsigned char *ptr = kmap(page);
96 
97 				if (dir == TO_XFER_BUF)
98 					memcpy(ptr + poff, buffer + cnt, plen);
99 				else
100 					memcpy(buffer + cnt, ptr + poff, plen);
101 				kunmap(page);
102 
103 				/* Start at the beginning of the next page */
104 				poff = 0;
105 				++page;
106 				cnt += plen;
107 				sglen -= plen;
108 			}
109 		}
110 	}
111 
112 	/* Return the amount actually transferred */
113 	return cnt;
114 }
115 
116 /*
117  * Store the contents of buffer into srb's transfer buffer and set the
118  * SCSI residue.
119  */
rtsx_stor_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)120 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
121 			    unsigned int buflen, struct scsi_cmnd *srb)
122 {
123 	unsigned int index = 0, offset = 0;
124 
125 	rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
126 				  TO_XFER_BUF);
127 	if (buflen < scsi_bufflen(srb))
128 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
129 }
130 
rtsx_stor_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)131 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
132 			    unsigned int buflen, struct scsi_cmnd *srb)
133 {
134 	unsigned int index = 0, offset = 0;
135 
136 	rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
137 				  FROM_XFER_BUF);
138 	if (buflen < scsi_bufflen(srb))
139 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
140 }
141 
142 /***********************************************************************
143  * Transport routines
144  ***********************************************************************/
145 
146 /*
147  * Invoke the transport and basic error-handling/recovery methods
148  *
149  * This is used to send the message to the device and receive the response.
150  */
rtsx_invoke_transport(struct scsi_cmnd * srb,struct rtsx_chip * chip)151 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
152 {
153 	int result;
154 
155 	result = rtsx_scsi_handler(srb, chip);
156 
157 	/*
158 	 * if the command gets aborted by the higher layers, we need to
159 	 * short-circuit all other processing.
160 	 */
161 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
162 		dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
163 		srb->result = DID_ABORT << 16;
164 		goto handle_errors;
165 	}
166 
167 	/* if there is a transport error, reset and don't auto-sense */
168 	if (result == TRANSPORT_ERROR) {
169 		dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
170 		srb->result = DID_ERROR << 16;
171 		goto handle_errors;
172 	}
173 
174 	srb->result = SAM_STAT_GOOD;
175 
176 	/*
177 	 * If we have a failure, we're going to do a REQUEST_SENSE
178 	 * automatically.  Note that we differentiate between a command
179 	 * "failure" and an "error" in the transport mechanism.
180 	 */
181 	if (result == TRANSPORT_FAILED) {
182 		/* set the result so the higher layers expect this data */
183 		srb->result = SAM_STAT_CHECK_CONDITION;
184 		memcpy(srb->sense_buffer,
185 		       (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
186 		       sizeof(struct sense_data_t));
187 	}
188 
189 	return;
190 
191 handle_errors:
192 	return;
193 }
194 
rtsx_add_cmd(struct rtsx_chip * chip,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)195 void rtsx_add_cmd(struct rtsx_chip *chip,
196 		  u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
197 {
198 	__le32 *cb = (__le32 *)(chip->host_cmds_ptr);
199 	u32 val = 0;
200 
201 	val |= (u32)(cmd_type & 0x03) << 30;
202 	val |= (u32)(reg_addr & 0x3FFF) << 16;
203 	val |= (u32)mask << 8;
204 	val |= (u32)data;
205 
206 	spin_lock_irq(&chip->rtsx->reg_lock);
207 	if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
208 		cb[(chip->ci)++] = cpu_to_le32(val);
209 
210 	spin_unlock_irq(&chip->rtsx->reg_lock);
211 }
212 
rtsx_send_cmd_no_wait(struct rtsx_chip * chip)213 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
214 {
215 	u32 val = BIT(31);
216 
217 	rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
218 
219 	val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
220 	/* Hardware Auto Response */
221 	val |= 0x40000000;
222 	rtsx_writel(chip, RTSX_HCBCTLR, val);
223 }
224 
rtsx_send_cmd(struct rtsx_chip * chip,u8 card,int timeout)225 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
226 {
227 	struct rtsx_dev *rtsx = chip->rtsx;
228 	struct completion trans_done;
229 	u32 val = BIT(31);
230 	long timeleft;
231 	int err = 0;
232 
233 	if (card == SD_CARD)
234 		rtsx->check_card_cd = SD_EXIST;
235 	else if (card == MS_CARD)
236 		rtsx->check_card_cd = MS_EXIST;
237 	else if (card == XD_CARD)
238 		rtsx->check_card_cd = XD_EXIST;
239 	else
240 		rtsx->check_card_cd = 0;
241 
242 	spin_lock_irq(&rtsx->reg_lock);
243 
244 	/* set up data structures for the wakeup system */
245 	rtsx->done = &trans_done;
246 	rtsx->trans_result = TRANS_NOT_READY;
247 	init_completion(&trans_done);
248 	rtsx->trans_state = STATE_TRANS_CMD;
249 
250 	rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
251 
252 	val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
253 	/* Hardware Auto Response */
254 	val |= 0x40000000;
255 	rtsx_writel(chip, RTSX_HCBCTLR, val);
256 
257 	spin_unlock_irq(&rtsx->reg_lock);
258 
259 	/* Wait for TRANS_OK_INT */
260 	timeleft = wait_for_completion_interruptible_timeout(&trans_done,
261 							     msecs_to_jiffies(timeout));
262 	if (timeleft <= 0) {
263 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
264 			chip->int_reg);
265 		err = -ETIMEDOUT;
266 		goto finish_send_cmd;
267 	}
268 
269 	spin_lock_irq(&rtsx->reg_lock);
270 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
271 		err = -EIO;
272 	else if (rtsx->trans_result == TRANS_RESULT_OK)
273 		err = 0;
274 
275 	spin_unlock_irq(&rtsx->reg_lock);
276 
277 finish_send_cmd:
278 	rtsx->done = NULL;
279 	rtsx->trans_state = STATE_TRANS_NONE;
280 
281 	if (err < 0)
282 		rtsx_stop_cmd(chip, card);
283 
284 	return err;
285 }
286 
rtsx_add_sg_tbl(struct rtsx_chip * chip,u32 addr,u32 len,u8 option)287 static inline void rtsx_add_sg_tbl(struct rtsx_chip *chip,
288 				   u32 addr, u32 len, u8 option)
289 {
290 	__le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
291 	u64 val = 0;
292 	u32 temp_len = 0;
293 	u8  temp_opt = 0;
294 
295 	do {
296 		if (len > 0x80000) {
297 			temp_len = 0x80000;
298 			temp_opt = option & (~RTSX_SG_END);
299 		} else {
300 			temp_len = len;
301 			temp_opt = option;
302 		}
303 		val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
304 
305 		if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
306 			sgb[(chip->sgi)++] = cpu_to_le64(val);
307 
308 		len -= temp_len;
309 		addr += temp_len;
310 	} while (len);
311 }
312 
rtsx_transfer_sglist_adma_partial(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,unsigned int * index,unsigned int * offset,int size,enum dma_data_direction dma_dir,int timeout)313 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
314 					     struct scatterlist *sg, int num_sg,
315 					     unsigned int *index,
316 					     unsigned int *offset, int size,
317 					     enum dma_data_direction dma_dir,
318 					     int timeout)
319 {
320 	struct rtsx_dev *rtsx = chip->rtsx;
321 	struct completion trans_done;
322 	u8 dir;
323 	int sg_cnt, i, resid;
324 	int err = 0;
325 	long timeleft;
326 	struct scatterlist *sg_ptr;
327 	u32 val = TRIG_DMA;
328 
329 	if (!sg || (num_sg <= 0) || !offset || !index)
330 		return -EIO;
331 
332 	if (dma_dir == DMA_TO_DEVICE)
333 		dir = HOST_TO_DEVICE;
334 	else if (dma_dir == DMA_FROM_DEVICE)
335 		dir = DEVICE_TO_HOST;
336 	else
337 		return -ENXIO;
338 
339 	if (card == SD_CARD)
340 		rtsx->check_card_cd = SD_EXIST;
341 	else if (card == MS_CARD)
342 		rtsx->check_card_cd = MS_EXIST;
343 	else if (card == XD_CARD)
344 		rtsx->check_card_cd = XD_EXIST;
345 	else
346 		rtsx->check_card_cd = 0;
347 
348 	spin_lock_irq(&rtsx->reg_lock);
349 
350 	/* set up data structures for the wakeup system */
351 	rtsx->done = &trans_done;
352 
353 	rtsx->trans_state = STATE_TRANS_SG;
354 	rtsx->trans_result = TRANS_NOT_READY;
355 
356 	spin_unlock_irq(&rtsx->reg_lock);
357 
358 	sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
359 
360 	resid = size;
361 	sg_ptr = sg;
362 	chip->sgi = 0;
363 	/*
364 	 * Usually the next entry will be @sg@ + 1, but if this sg element
365 	 * is part of a chained scatterlist, it could jump to the start of
366 	 * a new scatterlist array. So here we use sg_next to move to
367 	 * the proper sg.
368 	 */
369 	for (i = 0; i < *index; i++)
370 		sg_ptr = sg_next(sg_ptr);
371 	for (i = *index; i < sg_cnt; i++) {
372 		dma_addr_t addr;
373 		unsigned int len;
374 		u8 option;
375 
376 		addr = sg_dma_address(sg_ptr);
377 		len = sg_dma_len(sg_ptr);
378 
379 		dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
380 			(unsigned int)addr, len);
381 		dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
382 			*index, *offset);
383 
384 		addr += *offset;
385 
386 		if ((len - *offset) > resid) {
387 			*offset += resid;
388 			len = resid;
389 			resid = 0;
390 		} else {
391 			resid -= (len - *offset);
392 			len -= *offset;
393 			*offset = 0;
394 			*index = *index + 1;
395 		}
396 		option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
397 		if ((i == sg_cnt - 1) || !resid)
398 			option |= RTSX_SG_END;
399 
400 		rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
401 
402 		if (!resid)
403 			break;
404 
405 		sg_ptr = sg_next(sg_ptr);
406 	}
407 
408 	dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
409 
410 	val |= (u32)(dir & 0x01) << 29;
411 	val |= ADMA_MODE;
412 
413 	spin_lock_irq(&rtsx->reg_lock);
414 
415 	init_completion(&trans_done);
416 
417 	rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
418 	rtsx_writel(chip, RTSX_HDBCTLR, val);
419 
420 	spin_unlock_irq(&rtsx->reg_lock);
421 
422 	timeleft = wait_for_completion_interruptible_timeout(&trans_done,
423 							     msecs_to_jiffies(timeout));
424 	if (timeleft <= 0) {
425 		dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
426 			__func__, __LINE__);
427 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
428 			chip->int_reg);
429 		err = -ETIMEDOUT;
430 		goto out;
431 	}
432 
433 	spin_lock_irq(&rtsx->reg_lock);
434 	if (rtsx->trans_result == TRANS_RESULT_FAIL) {
435 		err = -EIO;
436 		spin_unlock_irq(&rtsx->reg_lock);
437 		goto out;
438 	}
439 	spin_unlock_irq(&rtsx->reg_lock);
440 
441 	/* Wait for TRANS_OK_INT */
442 	spin_lock_irq(&rtsx->reg_lock);
443 	if (rtsx->trans_result == TRANS_NOT_READY) {
444 		init_completion(&trans_done);
445 		spin_unlock_irq(&rtsx->reg_lock);
446 		timeleft = wait_for_completion_interruptible_timeout(&trans_done,
447 								     msecs_to_jiffies(timeout));
448 		if (timeleft <= 0) {
449 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
450 				__func__, __LINE__);
451 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
452 				chip->int_reg);
453 			err = -ETIMEDOUT;
454 			goto out;
455 		}
456 	} else {
457 		spin_unlock_irq(&rtsx->reg_lock);
458 	}
459 
460 	spin_lock_irq(&rtsx->reg_lock);
461 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
462 		err = -EIO;
463 	else if (rtsx->trans_result == TRANS_RESULT_OK)
464 		err = 0;
465 
466 	spin_unlock_irq(&rtsx->reg_lock);
467 
468 out:
469 	rtsx->done = NULL;
470 	rtsx->trans_state = STATE_TRANS_NONE;
471 	dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
472 
473 	if (err < 0)
474 		rtsx_stop_cmd(chip, card);
475 
476 	return err;
477 }
478 
rtsx_transfer_sglist_adma(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,enum dma_data_direction dma_dir,int timeout)479 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
480 				     struct scatterlist *sg, int num_sg,
481 				     enum dma_data_direction dma_dir,
482 				     int timeout)
483 {
484 	struct rtsx_dev *rtsx = chip->rtsx;
485 	struct completion trans_done;
486 	u8 dir;
487 	int buf_cnt, i;
488 	int err = 0;
489 	long timeleft;
490 	struct scatterlist *sg_ptr;
491 
492 	if (!sg || (num_sg <= 0))
493 		return -EIO;
494 
495 	if (dma_dir == DMA_TO_DEVICE)
496 		dir = HOST_TO_DEVICE;
497 	else if (dma_dir == DMA_FROM_DEVICE)
498 		dir = DEVICE_TO_HOST;
499 	else
500 		return -ENXIO;
501 
502 	if (card == SD_CARD)
503 		rtsx->check_card_cd = SD_EXIST;
504 	else if (card == MS_CARD)
505 		rtsx->check_card_cd = MS_EXIST;
506 	else if (card == XD_CARD)
507 		rtsx->check_card_cd = XD_EXIST;
508 	else
509 		rtsx->check_card_cd = 0;
510 
511 	spin_lock_irq(&rtsx->reg_lock);
512 
513 	/* set up data structures for the wakeup system */
514 	rtsx->done = &trans_done;
515 
516 	rtsx->trans_state = STATE_TRANS_SG;
517 	rtsx->trans_result = TRANS_NOT_READY;
518 
519 	spin_unlock_irq(&rtsx->reg_lock);
520 
521 	buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
522 
523 	sg_ptr = sg;
524 
525 	for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
526 		u32 val = TRIG_DMA;
527 		int sg_cnt, j;
528 
529 		if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
530 			sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
531 		else
532 			sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
533 
534 		chip->sgi = 0;
535 		for (j = 0; j < sg_cnt; j++) {
536 			dma_addr_t addr = sg_dma_address(sg_ptr);
537 			unsigned int len = sg_dma_len(sg_ptr);
538 			u8 option;
539 
540 			dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
541 				(unsigned int)addr, len);
542 
543 			option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
544 			if (j == (sg_cnt - 1))
545 				option |= RTSX_SG_END;
546 
547 			rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
548 
549 			sg_ptr = sg_next(sg_ptr);
550 		}
551 
552 		dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
553 
554 		val |= (u32)(dir & 0x01) << 29;
555 		val |= ADMA_MODE;
556 
557 		spin_lock_irq(&rtsx->reg_lock);
558 
559 		init_completion(&trans_done);
560 
561 		rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
562 		rtsx_writel(chip, RTSX_HDBCTLR, val);
563 
564 		spin_unlock_irq(&rtsx->reg_lock);
565 
566 		timeleft = wait_for_completion_interruptible_timeout(&trans_done,
567 								     msecs_to_jiffies(timeout));
568 		if (timeleft <= 0) {
569 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
570 				__func__, __LINE__);
571 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
572 				chip->int_reg);
573 			err = -ETIMEDOUT;
574 			goto out;
575 		}
576 
577 		spin_lock_irq(&rtsx->reg_lock);
578 		if (rtsx->trans_result == TRANS_RESULT_FAIL) {
579 			err = -EIO;
580 			spin_unlock_irq(&rtsx->reg_lock);
581 			goto out;
582 		}
583 		spin_unlock_irq(&rtsx->reg_lock);
584 
585 		sg_ptr += sg_cnt;
586 	}
587 
588 	/* Wait for TRANS_OK_INT */
589 	spin_lock_irq(&rtsx->reg_lock);
590 	if (rtsx->trans_result == TRANS_NOT_READY) {
591 		init_completion(&trans_done);
592 		spin_unlock_irq(&rtsx->reg_lock);
593 		timeleft = wait_for_completion_interruptible_timeout(&trans_done,
594 								     msecs_to_jiffies(timeout));
595 		if (timeleft <= 0) {
596 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
597 				__func__, __LINE__);
598 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
599 				chip->int_reg);
600 			err = -ETIMEDOUT;
601 			goto out;
602 		}
603 	} else {
604 		spin_unlock_irq(&rtsx->reg_lock);
605 	}
606 
607 	spin_lock_irq(&rtsx->reg_lock);
608 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
609 		err = -EIO;
610 	else if (rtsx->trans_result == TRANS_RESULT_OK)
611 		err = 0;
612 
613 	spin_unlock_irq(&rtsx->reg_lock);
614 
615 out:
616 	rtsx->done = NULL;
617 	rtsx->trans_state = STATE_TRANS_NONE;
618 	dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
619 
620 	if (err < 0)
621 		rtsx_stop_cmd(chip, card);
622 
623 	return err;
624 }
625 
rtsx_transfer_buf(struct rtsx_chip * chip,u8 card,void * buf,size_t len,enum dma_data_direction dma_dir,int timeout)626 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
627 			     size_t len, enum dma_data_direction dma_dir,
628 			     int timeout)
629 {
630 	struct rtsx_dev *rtsx = chip->rtsx;
631 	struct completion trans_done;
632 	dma_addr_t addr;
633 	u8 dir;
634 	int err = 0;
635 	u32 val = BIT(31);
636 	long timeleft;
637 
638 	if (!buf || (len <= 0))
639 		return -EIO;
640 
641 	if (dma_dir == DMA_TO_DEVICE)
642 		dir = HOST_TO_DEVICE;
643 	else if (dma_dir == DMA_FROM_DEVICE)
644 		dir = DEVICE_TO_HOST;
645 	else
646 		return -ENXIO;
647 
648 	addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
649 	if (dma_mapping_error(&rtsx->pci->dev, addr))
650 		return -ENOMEM;
651 
652 	if (card == SD_CARD)
653 		rtsx->check_card_cd = SD_EXIST;
654 	else if (card == MS_CARD)
655 		rtsx->check_card_cd = MS_EXIST;
656 	else if (card == XD_CARD)
657 		rtsx->check_card_cd = XD_EXIST;
658 	else
659 		rtsx->check_card_cd = 0;
660 
661 	val |= (u32)(dir & 0x01) << 29;
662 	val |= (u32)(len & 0x00FFFFFF);
663 
664 	spin_lock_irq(&rtsx->reg_lock);
665 
666 	/* set up data structures for the wakeup system */
667 	rtsx->done = &trans_done;
668 
669 	init_completion(&trans_done);
670 
671 	rtsx->trans_state = STATE_TRANS_BUF;
672 	rtsx->trans_result = TRANS_NOT_READY;
673 
674 	rtsx_writel(chip, RTSX_HDBAR, addr);
675 	rtsx_writel(chip, RTSX_HDBCTLR, val);
676 
677 	spin_unlock_irq(&rtsx->reg_lock);
678 
679 	/* Wait for TRANS_OK_INT */
680 	timeleft = wait_for_completion_interruptible_timeout(&trans_done,
681 							     msecs_to_jiffies(timeout));
682 	if (timeleft <= 0) {
683 		dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
684 			__func__, __LINE__);
685 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
686 			chip->int_reg);
687 		err = -ETIMEDOUT;
688 		goto out;
689 	}
690 
691 	spin_lock_irq(&rtsx->reg_lock);
692 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
693 		err = -EIO;
694 	else if (rtsx->trans_result == TRANS_RESULT_OK)
695 		err = 0;
696 
697 	spin_unlock_irq(&rtsx->reg_lock);
698 
699 out:
700 	rtsx->done = NULL;
701 	rtsx->trans_state = STATE_TRANS_NONE;
702 	dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
703 
704 	if (err < 0)
705 		rtsx_stop_cmd(chip, card);
706 
707 	return err;
708 }
709 
rtsx_transfer_data_partial(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,unsigned int * index,unsigned int * offset,enum dma_data_direction dma_dir,int timeout)710 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
711 			       void *buf, size_t len, int use_sg,
712 			       unsigned int *index, unsigned int *offset,
713 			       enum dma_data_direction dma_dir, int timeout)
714 {
715 	int err = 0;
716 
717 	/* don't transfer data during abort processing */
718 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
719 		return -EIO;
720 
721 	if (use_sg) {
722 		struct scatterlist *sg = buf;
723 
724 		err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
725 							index, offset, (int)len,
726 							dma_dir, timeout);
727 	} else {
728 		err = rtsx_transfer_buf(chip, card,
729 					buf, len, dma_dir, timeout);
730 	}
731 	if (err < 0) {
732 		if (RTSX_TST_DELINK(chip)) {
733 			RTSX_CLR_DELINK(chip);
734 			chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
735 			rtsx_reinit_cards(chip, 1);
736 		}
737 	}
738 
739 	return err;
740 }
741 
rtsx_transfer_data(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,enum dma_data_direction dma_dir,int timeout)742 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
743 		       int use_sg, enum dma_data_direction dma_dir, int timeout)
744 {
745 	int err = 0;
746 
747 	dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
748 
749 	/* don't transfer data during abort processing */
750 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
751 		return -EIO;
752 
753 	if (use_sg) {
754 		err = rtsx_transfer_sglist_adma(chip, card, buf,
755 						use_sg, dma_dir, timeout);
756 	} else {
757 		err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
758 	}
759 
760 	if (err < 0) {
761 		if (RTSX_TST_DELINK(chip)) {
762 			RTSX_CLR_DELINK(chip);
763 			chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
764 			rtsx_reinit_cards(chip, 1);
765 		}
766 	}
767 
768 	return err;
769 }
770 
771