• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Driver for Realtek PCI-Express card reader
2  *
3  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the
7  * Free Software Foundation; either version 2, or (at your option) any
8  * later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, see <http://www.gnu.org/licenses/>.
17  *
18  * Author:
19  *   Wei WANG (wei_wang@realsil.com.cn)
20  *   Micky Ching (micky_ching@realsil.com.cn)
21  */
22 
23 #include <linux/blkdev.h>
24 #include <linux/kthread.h>
25 #include <linux/sched.h>
26 
27 #include "rtsx.h"
28 
29 /***********************************************************************
30  * Scatter-gather transfer buffer access routines
31  ***********************************************************************/
32 
33 /* Copy a buffer of length buflen to/from the srb's transfer buffer.
34  * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
35  * points to a list of s-g entries and we ignore srb->request_bufflen.
36  * For non-scatter-gather transfers, srb->request_buffer points to the
37  * transfer buffer itself and srb->request_bufflen is the buffer's length.)
38  * Update the *index and *offset variables so that the next copy will
39  * pick up from where this one left off. */
40 
rtsx_stor_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,unsigned int * index,unsigned int * offset,enum xfer_buf_dir dir)41 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
42 	unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
43 	unsigned int *offset, enum xfer_buf_dir dir)
44 {
45 	unsigned int cnt;
46 
47 	/* If not using scatter-gather, just transfer the data directly.
48 	 * Make certain it will fit in the available buffer space. */
49 	if (scsi_sg_count(srb) == 0) {
50 		if (*offset >= scsi_bufflen(srb))
51 			return 0;
52 		cnt = min(buflen, scsi_bufflen(srb) - *offset);
53 		if (dir == TO_XFER_BUF)
54 			memcpy((unsigned char *) scsi_sglist(srb) + *offset,
55 					buffer, cnt);
56 		else
57 			memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
58 					*offset, cnt);
59 		*offset += cnt;
60 
61 	/* Using scatter-gather.  We have to go through the list one entry
62 	 * at a time.  Each s-g entry contains some number of pages, and
63 	 * each page has to be kmap()'ed separately.  If the page is already
64 	 * in kernel-addressable memory then kmap() will return its address.
65 	 * If the page is not directly accessible -- such as a user buffer
66 	 * located in high memory -- then kmap() will map it to a temporary
67 	 * position in the kernel's virtual address space. */
68 	} else {
69 		struct scatterlist *sg =
70 				(struct scatterlist *) scsi_sglist(srb)
71 				+ *index;
72 
73 		/* This loop handles a single s-g list entry, which may
74 		 * include multiple pages.  Find the initial page structure
75 		 * and the starting offset within the page, and update
76 		 * the *offset and *index values for the next loop. */
77 		cnt = 0;
78 		while (cnt < buflen && *index < scsi_sg_count(srb)) {
79 			struct page *page = sg_page(sg) +
80 					((sg->offset + *offset) >> PAGE_SHIFT);
81 			unsigned int poff =
82 					(sg->offset + *offset) & (PAGE_SIZE-1);
83 			unsigned int sglen = sg->length - *offset;
84 
85 			if (sglen > buflen - cnt) {
86 
87 				/* Transfer ends within this s-g entry */
88 				sglen = buflen - cnt;
89 				*offset += sglen;
90 			} else {
91 
92 				/* Transfer continues to next s-g entry */
93 				*offset = 0;
94 				++*index;
95 				++sg;
96 			}
97 
98 			/* Transfer the data for all the pages in this
99 			 * s-g entry.  For each page: call kmap(), do the
100 			 * transfer, and call kunmap() immediately after. */
101 			while (sglen > 0) {
102 				unsigned int plen = min(sglen, (unsigned int)
103 						PAGE_SIZE - poff);
104 				unsigned char *ptr = kmap(page);
105 
106 				if (dir == TO_XFER_BUF)
107 					memcpy(ptr + poff, buffer + cnt, plen);
108 				else
109 					memcpy(buffer + cnt, ptr + poff, plen);
110 				kunmap(page);
111 
112 				/* Start at the beginning of the next page */
113 				poff = 0;
114 				++page;
115 				cnt += plen;
116 				sglen -= plen;
117 			}
118 		}
119 	}
120 
121 	/* Return the amount actually transferred */
122 	return cnt;
123 }
124 
125 /* Store the contents of buffer into srb's transfer buffer and set the
126 * SCSI residue. */
rtsx_stor_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)127 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
128 	unsigned int buflen, struct scsi_cmnd *srb)
129 {
130 	unsigned int index = 0, offset = 0;
131 
132 	rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
133 				  TO_XFER_BUF);
134 	if (buflen < scsi_bufflen(srb))
135 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
136 }
137 
rtsx_stor_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)138 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
139 	unsigned int buflen, struct scsi_cmnd *srb)
140 {
141 	unsigned int index = 0, offset = 0;
142 
143 	rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
144 				  FROM_XFER_BUF);
145 	if (buflen < scsi_bufflen(srb))
146 		scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
147 }
148 
149 
150 /***********************************************************************
151  * Transport routines
152  ***********************************************************************/
153 
154 /* Invoke the transport and basic error-handling/recovery methods
155  *
156  * This is used to send the message to the device and receive the response.
157  */
rtsx_invoke_transport(struct scsi_cmnd * srb,struct rtsx_chip * chip)158 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
159 {
160 	int result;
161 
162 	result = rtsx_scsi_handler(srb, chip);
163 
164 	/* if the command gets aborted by the higher layers, we need to
165 	 * short-circuit all other processing
166 	 */
167 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
168 		dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
169 		srb->result = DID_ABORT << 16;
170 		goto Handle_Errors;
171 	}
172 
173 	/* if there is a transport error, reset and don't auto-sense */
174 	if (result == TRANSPORT_ERROR) {
175 		dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
176 		srb->result = DID_ERROR << 16;
177 		goto Handle_Errors;
178 	}
179 
180 	srb->result = SAM_STAT_GOOD;
181 
182 	/*
183 	 * If we have a failure, we're going to do a REQUEST_SENSE
184 	 * automatically.  Note that we differentiate between a command
185 	 * "failure" and an "error" in the transport mechanism.
186 	 */
187 	if (result == TRANSPORT_FAILED) {
188 		/* set the result so the higher layers expect this data */
189 		srb->result = SAM_STAT_CHECK_CONDITION;
190 		memcpy(srb->sense_buffer,
191 			(unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
192 			sizeof(struct sense_data_t));
193 	}
194 
195 	return;
196 
197 	/* Error and abort processing: try to resynchronize with the device
198 	 * by issuing a port reset.  If that fails, try a class-specific
199 	 * device reset. */
200 Handle_Errors:
201 	return;
202 }
203 
rtsx_add_cmd(struct rtsx_chip * chip,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)204 void rtsx_add_cmd(struct rtsx_chip *chip,
205 		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
206 {
207 	u32 *cb = (u32 *)(chip->host_cmds_ptr);
208 	u32 val = 0;
209 
210 	val |= (u32)(cmd_type & 0x03) << 30;
211 	val |= (u32)(reg_addr & 0x3FFF) << 16;
212 	val |= (u32)mask << 8;
213 	val |= (u32)data;
214 
215 	spin_lock_irq(&chip->rtsx->reg_lock);
216 	if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
217 		cb[(chip->ci)++] = cpu_to_le32(val);
218 
219 	spin_unlock_irq(&chip->rtsx->reg_lock);
220 }
221 
rtsx_send_cmd_no_wait(struct rtsx_chip * chip)222 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
223 {
224 	u32 val = 1 << 31;
225 
226 	rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
227 
228 	val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
229 	/* Hardware Auto Response */
230 	val |= 0x40000000;
231 	rtsx_writel(chip, RTSX_HCBCTLR, val);
232 }
233 
rtsx_send_cmd(struct rtsx_chip * chip,u8 card,int timeout)234 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
235 {
236 	struct rtsx_dev *rtsx = chip->rtsx;
237 	struct completion trans_done;
238 	u32 val = 1 << 31;
239 	long timeleft;
240 	int err = 0;
241 
242 	if (card == SD_CARD)
243 		rtsx->check_card_cd = SD_EXIST;
244 	else if (card == MS_CARD)
245 		rtsx->check_card_cd = MS_EXIST;
246 	else if (card == XD_CARD)
247 		rtsx->check_card_cd = XD_EXIST;
248 	else
249 		rtsx->check_card_cd = 0;
250 
251 	spin_lock_irq(&rtsx->reg_lock);
252 
253 	/* set up data structures for the wakeup system */
254 	rtsx->done = &trans_done;
255 	rtsx->trans_result = TRANS_NOT_READY;
256 	init_completion(&trans_done);
257 	rtsx->trans_state = STATE_TRANS_CMD;
258 
259 	rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
260 
261 	val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
262 	/* Hardware Auto Response */
263 	val |= 0x40000000;
264 	rtsx_writel(chip, RTSX_HCBCTLR, val);
265 
266 	spin_unlock_irq(&rtsx->reg_lock);
267 
268 	/* Wait for TRANS_OK_INT */
269 	timeleft = wait_for_completion_interruptible_timeout(
270 		&trans_done, msecs_to_jiffies(timeout));
271 	if (timeleft <= 0) {
272 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
273 			chip->int_reg);
274 		err = -ETIMEDOUT;
275 		rtsx_trace(chip);
276 		goto finish_send_cmd;
277 	}
278 
279 	spin_lock_irq(&rtsx->reg_lock);
280 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
281 		err = -EIO;
282 	else if (rtsx->trans_result == TRANS_RESULT_OK)
283 		err = 0;
284 
285 	spin_unlock_irq(&rtsx->reg_lock);
286 
287 finish_send_cmd:
288 	rtsx->done = NULL;
289 	rtsx->trans_state = STATE_TRANS_NONE;
290 
291 	if (err < 0)
292 		rtsx_stop_cmd(chip, card);
293 
294 	return err;
295 }
296 
rtsx_add_sg_tbl(struct rtsx_chip * chip,u32 addr,u32 len,u8 option)297 static inline void rtsx_add_sg_tbl(
298 	struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
299 {
300 	u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
301 	u64 val = 0;
302 	u32 temp_len = 0;
303 	u8  temp_opt = 0;
304 
305 	do {
306 		if (len > 0x80000) {
307 			temp_len = 0x80000;
308 			temp_opt = option & (~SG_END);
309 		} else {
310 			temp_len = len;
311 			temp_opt = option;
312 		}
313 		val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
314 
315 		if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
316 			sgb[(chip->sgi)++] = cpu_to_le64(val);
317 
318 		len -= temp_len;
319 		addr += temp_len;
320 	} while (len);
321 }
322 
rtsx_transfer_sglist_adma_partial(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,unsigned int * index,unsigned int * offset,int size,enum dma_data_direction dma_dir,int timeout)323 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
324 		struct scatterlist *sg, int num_sg, unsigned int *index,
325 		unsigned int *offset, int size,
326 		enum dma_data_direction dma_dir, int timeout)
327 {
328 	struct rtsx_dev *rtsx = chip->rtsx;
329 	struct completion trans_done;
330 	u8 dir;
331 	int sg_cnt, i, resid;
332 	int err = 0;
333 	long timeleft;
334 	struct scatterlist *sg_ptr;
335 	u32 val = TRIG_DMA;
336 
337 	if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
338 		return -EIO;
339 
340 	if (dma_dir == DMA_TO_DEVICE)
341 		dir = HOST_TO_DEVICE;
342 	else if (dma_dir == DMA_FROM_DEVICE)
343 		dir = DEVICE_TO_HOST;
344 	else
345 		return -ENXIO;
346 
347 	if (card == SD_CARD)
348 		rtsx->check_card_cd = SD_EXIST;
349 	else if (card == MS_CARD)
350 		rtsx->check_card_cd = MS_EXIST;
351 	else if (card == XD_CARD)
352 		rtsx->check_card_cd = XD_EXIST;
353 	else
354 		rtsx->check_card_cd = 0;
355 
356 	spin_lock_irq(&rtsx->reg_lock);
357 
358 	/* set up data structures for the wakeup system */
359 	rtsx->done = &trans_done;
360 
361 	rtsx->trans_state = STATE_TRANS_SG;
362 	rtsx->trans_result = TRANS_NOT_READY;
363 
364 	spin_unlock_irq(&rtsx->reg_lock);
365 
366 	sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
367 
368 	resid = size;
369 	sg_ptr = sg;
370 	chip->sgi = 0;
371 	/* Usually the next entry will be @sg@ + 1, but if this sg element
372 	 * is part of a chained scatterlist, it could jump to the start of
373 	 * a new scatterlist array. So here we use sg_next to move to
374 	 * the proper sg
375 	 */
376 	for (i = 0; i < *index; i++)
377 		sg_ptr = sg_next(sg_ptr);
378 	for (i = *index; i < sg_cnt; i++) {
379 		dma_addr_t addr;
380 		unsigned int len;
381 		u8 option;
382 
383 		addr = sg_dma_address(sg_ptr);
384 		len = sg_dma_len(sg_ptr);
385 
386 		dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
387 			(unsigned int)addr, len);
388 		dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
389 			*index, *offset);
390 
391 		addr += *offset;
392 
393 		if ((len - *offset) > resid) {
394 			*offset += resid;
395 			len = resid;
396 			resid = 0;
397 		} else {
398 			resid -= (len - *offset);
399 			len -= *offset;
400 			*offset = 0;
401 			*index = *index + 1;
402 		}
403 		if ((i == (sg_cnt - 1)) || !resid)
404 			option = SG_VALID | SG_END | SG_TRANS_DATA;
405 		else
406 			option = SG_VALID | SG_TRANS_DATA;
407 
408 		rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
409 
410 		if (!resid)
411 			break;
412 
413 		sg_ptr = sg_next(sg_ptr);
414 	}
415 
416 	dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
417 
418 	val |= (u32)(dir & 0x01) << 29;
419 	val |= ADMA_MODE;
420 
421 	spin_lock_irq(&rtsx->reg_lock);
422 
423 	init_completion(&trans_done);
424 
425 	rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
426 	rtsx_writel(chip, RTSX_HDBCTLR, val);
427 
428 	spin_unlock_irq(&rtsx->reg_lock);
429 
430 	timeleft = wait_for_completion_interruptible_timeout(
431 		&trans_done, msecs_to_jiffies(timeout));
432 	if (timeleft <= 0) {
433 		dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
434 			__func__, __LINE__);
435 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
436 			chip->int_reg);
437 		err = -ETIMEDOUT;
438 		goto out;
439 	}
440 
441 	spin_lock_irq(&rtsx->reg_lock);
442 	if (rtsx->trans_result == TRANS_RESULT_FAIL) {
443 		err = -EIO;
444 		spin_unlock_irq(&rtsx->reg_lock);
445 		goto out;
446 	}
447 	spin_unlock_irq(&rtsx->reg_lock);
448 
449 	/* Wait for TRANS_OK_INT */
450 	spin_lock_irq(&rtsx->reg_lock);
451 	if (rtsx->trans_result == TRANS_NOT_READY) {
452 		init_completion(&trans_done);
453 		spin_unlock_irq(&rtsx->reg_lock);
454 		timeleft = wait_for_completion_interruptible_timeout(
455 			&trans_done, msecs_to_jiffies(timeout));
456 		if (timeleft <= 0) {
457 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
458 				__func__, __LINE__);
459 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
460 				chip->int_reg);
461 			err = -ETIMEDOUT;
462 			goto out;
463 		}
464 	} else {
465 		spin_unlock_irq(&rtsx->reg_lock);
466 	}
467 
468 	spin_lock_irq(&rtsx->reg_lock);
469 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
470 		err = -EIO;
471 	else if (rtsx->trans_result == TRANS_RESULT_OK)
472 		err = 0;
473 
474 	spin_unlock_irq(&rtsx->reg_lock);
475 
476 out:
477 	rtsx->done = NULL;
478 	rtsx->trans_state = STATE_TRANS_NONE;
479 	dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
480 
481 	if (err < 0)
482 		rtsx_stop_cmd(chip, card);
483 
484 	return err;
485 }
486 
rtsx_transfer_sglist_adma(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,enum dma_data_direction dma_dir,int timeout)487 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
488 		struct scatterlist *sg, int num_sg,
489 		enum dma_data_direction dma_dir, int timeout)
490 {
491 	struct rtsx_dev *rtsx = chip->rtsx;
492 	struct completion trans_done;
493 	u8 dir;
494 	int buf_cnt, i;
495 	int err = 0;
496 	long timeleft;
497 	struct scatterlist *sg_ptr;
498 
499 	if ((sg == NULL) || (num_sg <= 0))
500 		return -EIO;
501 
502 	if (dma_dir == DMA_TO_DEVICE)
503 		dir = HOST_TO_DEVICE;
504 	else if (dma_dir == DMA_FROM_DEVICE)
505 		dir = DEVICE_TO_HOST;
506 	else
507 		return -ENXIO;
508 
509 	if (card == SD_CARD)
510 		rtsx->check_card_cd = SD_EXIST;
511 	else if (card == MS_CARD)
512 		rtsx->check_card_cd = MS_EXIST;
513 	else if (card == XD_CARD)
514 		rtsx->check_card_cd = XD_EXIST;
515 	else
516 		rtsx->check_card_cd = 0;
517 
518 	spin_lock_irq(&rtsx->reg_lock);
519 
520 	/* set up data structures for the wakeup system */
521 	rtsx->done = &trans_done;
522 
523 	rtsx->trans_state = STATE_TRANS_SG;
524 	rtsx->trans_result = TRANS_NOT_READY;
525 
526 	spin_unlock_irq(&rtsx->reg_lock);
527 
528 	buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
529 
530 	sg_ptr = sg;
531 
532 	for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
533 		u32 val = TRIG_DMA;
534 		int sg_cnt, j;
535 
536 		if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
537 			sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
538 		else
539 			sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
540 
541 		chip->sgi = 0;
542 		for (j = 0; j < sg_cnt; j++) {
543 			dma_addr_t addr = sg_dma_address(sg_ptr);
544 			unsigned int len = sg_dma_len(sg_ptr);
545 			u8 option;
546 
547 			dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
548 				(unsigned int)addr, len);
549 
550 			if (j == (sg_cnt - 1))
551 				option = SG_VALID | SG_END | SG_TRANS_DATA;
552 			else
553 				option = SG_VALID | SG_TRANS_DATA;
554 
555 			rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
556 
557 			sg_ptr = sg_next(sg_ptr);
558 		}
559 
560 		dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
561 
562 		val |= (u32)(dir & 0x01) << 29;
563 		val |= ADMA_MODE;
564 
565 		spin_lock_irq(&rtsx->reg_lock);
566 
567 		init_completion(&trans_done);
568 
569 		rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
570 		rtsx_writel(chip, RTSX_HDBCTLR, val);
571 
572 		spin_unlock_irq(&rtsx->reg_lock);
573 
574 		timeleft = wait_for_completion_interruptible_timeout(
575 			&trans_done, msecs_to_jiffies(timeout));
576 		if (timeleft <= 0) {
577 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
578 				__func__, __LINE__);
579 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
580 				chip->int_reg);
581 			err = -ETIMEDOUT;
582 			goto out;
583 		}
584 
585 		spin_lock_irq(&rtsx->reg_lock);
586 		if (rtsx->trans_result == TRANS_RESULT_FAIL) {
587 			err = -EIO;
588 			spin_unlock_irq(&rtsx->reg_lock);
589 			goto out;
590 		}
591 		spin_unlock_irq(&rtsx->reg_lock);
592 
593 		sg_ptr += sg_cnt;
594 	}
595 
596 	/* Wait for TRANS_OK_INT */
597 	spin_lock_irq(&rtsx->reg_lock);
598 	if (rtsx->trans_result == TRANS_NOT_READY) {
599 		init_completion(&trans_done);
600 		spin_unlock_irq(&rtsx->reg_lock);
601 		timeleft = wait_for_completion_interruptible_timeout(
602 			&trans_done, msecs_to_jiffies(timeout));
603 		if (timeleft <= 0) {
604 			dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
605 				__func__, __LINE__);
606 			dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
607 				chip->int_reg);
608 			err = -ETIMEDOUT;
609 			goto out;
610 		}
611 	} else {
612 		spin_unlock_irq(&rtsx->reg_lock);
613 	}
614 
615 	spin_lock_irq(&rtsx->reg_lock);
616 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
617 		err = -EIO;
618 	else if (rtsx->trans_result == TRANS_RESULT_OK)
619 		err = 0;
620 
621 	spin_unlock_irq(&rtsx->reg_lock);
622 
623 out:
624 	rtsx->done = NULL;
625 	rtsx->trans_state = STATE_TRANS_NONE;
626 	dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
627 
628 	if (err < 0)
629 		rtsx_stop_cmd(chip, card);
630 
631 	return err;
632 }
633 
rtsx_transfer_buf(struct rtsx_chip * chip,u8 card,void * buf,size_t len,enum dma_data_direction dma_dir,int timeout)634 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
635 		size_t len, enum dma_data_direction dma_dir, int timeout)
636 {
637 	struct rtsx_dev *rtsx = chip->rtsx;
638 	struct completion trans_done;
639 	dma_addr_t addr;
640 	u8 dir;
641 	int err = 0;
642 	u32 val = 1 << 31;
643 	long timeleft;
644 
645 	if ((buf == NULL) || (len <= 0))
646 		return -EIO;
647 
648 	if (dma_dir == DMA_TO_DEVICE)
649 		dir = HOST_TO_DEVICE;
650 	else if (dma_dir == DMA_FROM_DEVICE)
651 		dir = DEVICE_TO_HOST;
652 	else
653 		return -ENXIO;
654 
655 	addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
656 	if (!addr)
657 		return -ENOMEM;
658 
659 	if (card == SD_CARD)
660 		rtsx->check_card_cd = SD_EXIST;
661 	else if (card == MS_CARD)
662 		rtsx->check_card_cd = MS_EXIST;
663 	else if (card == XD_CARD)
664 		rtsx->check_card_cd = XD_EXIST;
665 	else
666 		rtsx->check_card_cd = 0;
667 
668 	val |= (u32)(dir & 0x01) << 29;
669 	val |= (u32)(len & 0x00FFFFFF);
670 
671 	spin_lock_irq(&rtsx->reg_lock);
672 
673 	/* set up data structures for the wakeup system */
674 	rtsx->done = &trans_done;
675 
676 	init_completion(&trans_done);
677 
678 	rtsx->trans_state = STATE_TRANS_BUF;
679 	rtsx->trans_result = TRANS_NOT_READY;
680 
681 	rtsx_writel(chip, RTSX_HDBAR, addr);
682 	rtsx_writel(chip, RTSX_HDBCTLR, val);
683 
684 	spin_unlock_irq(&rtsx->reg_lock);
685 
686 	/* Wait for TRANS_OK_INT */
687 	timeleft = wait_for_completion_interruptible_timeout(
688 		&trans_done, msecs_to_jiffies(timeout));
689 	if (timeleft <= 0) {
690 		dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
691 			__func__, __LINE__);
692 		dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
693 			chip->int_reg);
694 		err = -ETIMEDOUT;
695 		goto out;
696 	}
697 
698 	spin_lock_irq(&rtsx->reg_lock);
699 	if (rtsx->trans_result == TRANS_RESULT_FAIL)
700 		err = -EIO;
701 	else if (rtsx->trans_result == TRANS_RESULT_OK)
702 		err = 0;
703 
704 	spin_unlock_irq(&rtsx->reg_lock);
705 
706 out:
707 	rtsx->done = NULL;
708 	rtsx->trans_state = STATE_TRANS_NONE;
709 	dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
710 
711 	if (err < 0)
712 		rtsx_stop_cmd(chip, card);
713 
714 	return err;
715 }
716 
rtsx_transfer_data_partial(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,unsigned int * index,unsigned int * offset,enum dma_data_direction dma_dir,int timeout)717 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
718 		void *buf, size_t len, int use_sg, unsigned int *index,
719 		unsigned int *offset, enum dma_data_direction dma_dir,
720 		int timeout)
721 {
722 	int err = 0;
723 
724 	/* don't transfer data during abort processing */
725 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
726 		return -EIO;
727 
728 	if (use_sg)
729 		err = rtsx_transfer_sglist_adma_partial(chip, card,
730 				(struct scatterlist *)buf, use_sg,
731 				index, offset, (int)len, dma_dir, timeout);
732 	else
733 		err = rtsx_transfer_buf(chip, card,
734 					buf, len, dma_dir, timeout);
735 	if (err < 0) {
736 		if (RTSX_TST_DELINK(chip)) {
737 			RTSX_CLR_DELINK(chip);
738 			chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
739 			rtsx_reinit_cards(chip, 1);
740 		}
741 	}
742 
743 	return err;
744 }
745 
rtsx_transfer_data(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,enum dma_data_direction dma_dir,int timeout)746 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
747 		int use_sg, enum dma_data_direction dma_dir, int timeout)
748 {
749 	int err = 0;
750 
751 	dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
752 
753 	/* don't transfer data during abort processing */
754 	if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
755 		return -EIO;
756 
757 	if (use_sg) {
758 		err = rtsx_transfer_sglist_adma(chip, card,
759 				(struct scatterlist *)buf,
760 				use_sg, dma_dir, timeout);
761 	} else {
762 		err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
763 	}
764 
765 	if (err < 0) {
766 		if (RTSX_TST_DELINK(chip)) {
767 			RTSX_CLR_DELINK(chip);
768 			chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
769 			rtsx_reinit_cards(chip, 1);
770 		}
771 	}
772 
773 	return err;
774 }
775 
776