1 /* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * Wei WANG (wei_wang@realsil.com.cn)
20 * Micky Ching (micky_ching@realsil.com.cn)
21 */
22
23 #include <linux/blkdev.h>
24 #include <linux/kthread.h>
25 #include <linux/sched.h>
26
27 #include "rtsx.h"
28 #include "rtsx_scsi.h"
29 #include "rtsx_transport.h"
30 #include "rtsx_chip.h"
31 #include "rtsx_card.h"
32
33 /***********************************************************************
34 * Scatter-gather transfer buffer access routines
35 ***********************************************************************/
36
37 /* Copy a buffer of length buflen to/from the srb's transfer buffer.
38 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
39 * points to a list of s-g entries and we ignore srb->request_bufflen.
40 * For non-scatter-gather transfers, srb->request_buffer points to the
41 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
42 * Update the *index and *offset variables so that the next copy will
43 * pick up from where this one left off. */
44
rtsx_stor_access_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb,unsigned int * index,unsigned int * offset,enum xfer_buf_dir dir)45 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
46 unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
47 unsigned int *offset, enum xfer_buf_dir dir)
48 {
49 unsigned int cnt;
50
51 /* If not using scatter-gather, just transfer the data directly.
52 * Make certain it will fit in the available buffer space. */
53 if (scsi_sg_count(srb) == 0) {
54 if (*offset >= scsi_bufflen(srb))
55 return 0;
56 cnt = min(buflen, scsi_bufflen(srb) - *offset);
57 if (dir == TO_XFER_BUF)
58 memcpy((unsigned char *) scsi_sglist(srb) + *offset,
59 buffer, cnt);
60 else
61 memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
62 *offset, cnt);
63 *offset += cnt;
64
65 /* Using scatter-gather. We have to go through the list one entry
66 * at a time. Each s-g entry contains some number of pages, and
67 * each page has to be kmap()'ed separately. If the page is already
68 * in kernel-addressable memory then kmap() will return its address.
69 * If the page is not directly accessible -- such as a user buffer
70 * located in high memory -- then kmap() will map it to a temporary
71 * position in the kernel's virtual address space. */
72 } else {
73 struct scatterlist *sg =
74 (struct scatterlist *) scsi_sglist(srb)
75 + *index;
76
77 /* This loop handles a single s-g list entry, which may
78 * include multiple pages. Find the initial page structure
79 * and the starting offset within the page, and update
80 * the *offset and *index values for the next loop. */
81 cnt = 0;
82 while (cnt < buflen && *index < scsi_sg_count(srb)) {
83 struct page *page = sg_page(sg) +
84 ((sg->offset + *offset) >> PAGE_SHIFT);
85 unsigned int poff =
86 (sg->offset + *offset) & (PAGE_SIZE-1);
87 unsigned int sglen = sg->length - *offset;
88
89 if (sglen > buflen - cnt) {
90
91 /* Transfer ends within this s-g entry */
92 sglen = buflen - cnt;
93 *offset += sglen;
94 } else {
95
96 /* Transfer continues to next s-g entry */
97 *offset = 0;
98 ++*index;
99 ++sg;
100 }
101
102 /* Transfer the data for all the pages in this
103 * s-g entry. For each page: call kmap(), do the
104 * transfer, and call kunmap() immediately after. */
105 while (sglen > 0) {
106 unsigned int plen = min(sglen, (unsigned int)
107 PAGE_SIZE - poff);
108 unsigned char *ptr = kmap(page);
109
110 if (dir == TO_XFER_BUF)
111 memcpy(ptr + poff, buffer + cnt, plen);
112 else
113 memcpy(buffer + cnt, ptr + poff, plen);
114 kunmap(page);
115
116 /* Start at the beginning of the next page */
117 poff = 0;
118 ++page;
119 cnt += plen;
120 sglen -= plen;
121 }
122 }
123 }
124
125 /* Return the amount actually transferred */
126 return cnt;
127 }
128
129 /* Store the contents of buffer into srb's transfer buffer and set the
130 * SCSI residue. */
rtsx_stor_set_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)131 void rtsx_stor_set_xfer_buf(unsigned char *buffer,
132 unsigned int buflen, struct scsi_cmnd *srb)
133 {
134 unsigned int index = 0, offset = 0;
135
136 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
137 TO_XFER_BUF);
138 if (buflen < scsi_bufflen(srb))
139 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
140 }
141
rtsx_stor_get_xfer_buf(unsigned char * buffer,unsigned int buflen,struct scsi_cmnd * srb)142 void rtsx_stor_get_xfer_buf(unsigned char *buffer,
143 unsigned int buflen, struct scsi_cmnd *srb)
144 {
145 unsigned int index = 0, offset = 0;
146
147 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
148 FROM_XFER_BUF);
149 if (buflen < scsi_bufflen(srb))
150 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
151 }
152
153
154 /***********************************************************************
155 * Transport routines
156 ***********************************************************************/
157
158 /* Invoke the transport and basic error-handling/recovery methods
159 *
160 * This is used to send the message to the device and receive the response.
161 */
rtsx_invoke_transport(struct scsi_cmnd * srb,struct rtsx_chip * chip)162 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
163 {
164 int result;
165
166 result = rtsx_scsi_handler(srb, chip);
167
168 /* if the command gets aborted by the higher layers, we need to
169 * short-circuit all other processing
170 */
171 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
172 dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
173 srb->result = DID_ABORT << 16;
174 goto Handle_Errors;
175 }
176
177 /* if there is a transport error, reset and don't auto-sense */
178 if (result == TRANSPORT_ERROR) {
179 dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
180 srb->result = DID_ERROR << 16;
181 goto Handle_Errors;
182 }
183
184 srb->result = SAM_STAT_GOOD;
185
186 /*
187 * If we have a failure, we're going to do a REQUEST_SENSE
188 * automatically. Note that we differentiate between a command
189 * "failure" and an "error" in the transport mechanism.
190 */
191 if (result == TRANSPORT_FAILED) {
192 /* set the result so the higher layers expect this data */
193 srb->result = SAM_STAT_CHECK_CONDITION;
194 memcpy(srb->sense_buffer,
195 (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
196 sizeof(struct sense_data_t));
197 }
198
199 return;
200
201 /* Error and abort processing: try to resynchronize with the device
202 * by issuing a port reset. If that fails, try a class-specific
203 * device reset. */
204 Handle_Errors:
205 return;
206 }
207
rtsx_add_cmd(struct rtsx_chip * chip,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)208 void rtsx_add_cmd(struct rtsx_chip *chip,
209 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
210 {
211 u32 *cb = (u32 *)(chip->host_cmds_ptr);
212 u32 val = 0;
213
214 val |= (u32)(cmd_type & 0x03) << 30;
215 val |= (u32)(reg_addr & 0x3FFF) << 16;
216 val |= (u32)mask << 8;
217 val |= (u32)data;
218
219 spin_lock_irq(&chip->rtsx->reg_lock);
220 if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
221 cb[(chip->ci)++] = cpu_to_le32(val);
222
223 spin_unlock_irq(&chip->rtsx->reg_lock);
224 }
225
rtsx_send_cmd_no_wait(struct rtsx_chip * chip)226 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
227 {
228 u32 val = 1 << 31;
229
230 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
231
232 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
233 /* Hardware Auto Response */
234 val |= 0x40000000;
235 rtsx_writel(chip, RTSX_HCBCTLR, val);
236 }
237
rtsx_send_cmd(struct rtsx_chip * chip,u8 card,int timeout)238 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
239 {
240 struct rtsx_dev *rtsx = chip->rtsx;
241 struct completion trans_done;
242 u32 val = 1 << 31;
243 long timeleft;
244 int err = 0;
245
246 if (card == SD_CARD)
247 rtsx->check_card_cd = SD_EXIST;
248 else if (card == MS_CARD)
249 rtsx->check_card_cd = MS_EXIST;
250 else if (card == XD_CARD)
251 rtsx->check_card_cd = XD_EXIST;
252 else
253 rtsx->check_card_cd = 0;
254
255 spin_lock_irq(&rtsx->reg_lock);
256
257 /* set up data structures for the wakeup system */
258 rtsx->done = &trans_done;
259 rtsx->trans_result = TRANS_NOT_READY;
260 init_completion(&trans_done);
261 rtsx->trans_state = STATE_TRANS_CMD;
262
263 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
264
265 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
266 /* Hardware Auto Response */
267 val |= 0x40000000;
268 rtsx_writel(chip, RTSX_HCBCTLR, val);
269
270 spin_unlock_irq(&rtsx->reg_lock);
271
272 /* Wait for TRANS_OK_INT */
273 timeleft = wait_for_completion_interruptible_timeout(
274 &trans_done, timeout * HZ / 1000);
275 if (timeleft <= 0) {
276 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
277 chip->int_reg);
278 err = -ETIMEDOUT;
279 TRACE_GOTO(chip, finish_send_cmd);
280 }
281
282 spin_lock_irq(&rtsx->reg_lock);
283 if (rtsx->trans_result == TRANS_RESULT_FAIL)
284 err = -EIO;
285 else if (rtsx->trans_result == TRANS_RESULT_OK)
286 err = 0;
287
288 spin_unlock_irq(&rtsx->reg_lock);
289
290 finish_send_cmd:
291 rtsx->done = NULL;
292 rtsx->trans_state = STATE_TRANS_NONE;
293
294 if (err < 0)
295 rtsx_stop_cmd(chip, card);
296
297 return err;
298 }
299
rtsx_add_sg_tbl(struct rtsx_chip * chip,u32 addr,u32 len,u8 option)300 static inline void rtsx_add_sg_tbl(
301 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
302 {
303 u64 *sgb = (u64 *)(chip->host_sg_tbl_ptr);
304 u64 val = 0;
305 u32 temp_len = 0;
306 u8 temp_opt = 0;
307
308 do {
309 if (len > 0x80000) {
310 temp_len = 0x80000;
311 temp_opt = option & (~SG_END);
312 } else {
313 temp_len = len;
314 temp_opt = option;
315 }
316 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
317
318 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
319 sgb[(chip->sgi)++] = cpu_to_le64(val);
320
321 len -= temp_len;
322 addr += temp_len;
323 } while (len);
324 }
325
rtsx_transfer_sglist_adma_partial(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,unsigned int * index,unsigned int * offset,int size,enum dma_data_direction dma_dir,int timeout)326 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
327 struct scatterlist *sg, int num_sg, unsigned int *index,
328 unsigned int *offset, int size,
329 enum dma_data_direction dma_dir, int timeout)
330 {
331 struct rtsx_dev *rtsx = chip->rtsx;
332 struct completion trans_done;
333 u8 dir;
334 int sg_cnt, i, resid;
335 int err = 0;
336 long timeleft;
337 struct scatterlist *sg_ptr;
338 u32 val = TRIG_DMA;
339
340 if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
341 return -EIO;
342
343 if (dma_dir == DMA_TO_DEVICE)
344 dir = HOST_TO_DEVICE;
345 else if (dma_dir == DMA_FROM_DEVICE)
346 dir = DEVICE_TO_HOST;
347 else
348 return -ENXIO;
349
350 if (card == SD_CARD)
351 rtsx->check_card_cd = SD_EXIST;
352 else if (card == MS_CARD)
353 rtsx->check_card_cd = MS_EXIST;
354 else if (card == XD_CARD)
355 rtsx->check_card_cd = XD_EXIST;
356 else
357 rtsx->check_card_cd = 0;
358
359 spin_lock_irq(&rtsx->reg_lock);
360
361 /* set up data structures for the wakeup system */
362 rtsx->done = &trans_done;
363
364 rtsx->trans_state = STATE_TRANS_SG;
365 rtsx->trans_result = TRANS_NOT_READY;
366
367 spin_unlock_irq(&rtsx->reg_lock);
368
369 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
370
371 resid = size;
372 sg_ptr = sg;
373 chip->sgi = 0;
374 /* Usually the next entry will be @sg@ + 1, but if this sg element
375 * is part of a chained scatterlist, it could jump to the start of
376 * a new scatterlist array. So here we use sg_next to move to
377 * the proper sg
378 */
379 for (i = 0; i < *index; i++)
380 sg_ptr = sg_next(sg_ptr);
381 for (i = *index; i < sg_cnt; i++) {
382 dma_addr_t addr;
383 unsigned int len;
384 u8 option;
385
386 addr = sg_dma_address(sg_ptr);
387 len = sg_dma_len(sg_ptr);
388
389 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
390 (unsigned int)addr, len);
391 dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
392 *index, *offset);
393
394 addr += *offset;
395
396 if ((len - *offset) > resid) {
397 *offset += resid;
398 len = resid;
399 resid = 0;
400 } else {
401 resid -= (len - *offset);
402 len -= *offset;
403 *offset = 0;
404 *index = *index + 1;
405 }
406 if ((i == (sg_cnt - 1)) || !resid)
407 option = SG_VALID | SG_END | SG_TRANS_DATA;
408 else
409 option = SG_VALID | SG_TRANS_DATA;
410
411 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
412
413 if (!resid)
414 break;
415
416 sg_ptr = sg_next(sg_ptr);
417 }
418
419 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
420
421 val |= (u32)(dir & 0x01) << 29;
422 val |= ADMA_MODE;
423
424 spin_lock_irq(&rtsx->reg_lock);
425
426 init_completion(&trans_done);
427
428 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
429 rtsx_writel(chip, RTSX_HDBCTLR, val);
430
431 spin_unlock_irq(&rtsx->reg_lock);
432
433 timeleft = wait_for_completion_interruptible_timeout(
434 &trans_done, timeout * HZ / 1000);
435 if (timeleft <= 0) {
436 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
437 __func__, __LINE__);
438 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
439 chip->int_reg);
440 err = -ETIMEDOUT;
441 goto out;
442 }
443
444 spin_lock_irq(&rtsx->reg_lock);
445 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
446 err = -EIO;
447 spin_unlock_irq(&rtsx->reg_lock);
448 goto out;
449 }
450 spin_unlock_irq(&rtsx->reg_lock);
451
452 /* Wait for TRANS_OK_INT */
453 spin_lock_irq(&rtsx->reg_lock);
454 if (rtsx->trans_result == TRANS_NOT_READY) {
455 init_completion(&trans_done);
456 spin_unlock_irq(&rtsx->reg_lock);
457 timeleft = wait_for_completion_interruptible_timeout(
458 &trans_done, timeout * HZ / 1000);
459 if (timeleft <= 0) {
460 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
461 __func__, __LINE__);
462 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
463 chip->int_reg);
464 err = -ETIMEDOUT;
465 goto out;
466 }
467 } else {
468 spin_unlock_irq(&rtsx->reg_lock);
469 }
470
471 spin_lock_irq(&rtsx->reg_lock);
472 if (rtsx->trans_result == TRANS_RESULT_FAIL)
473 err = -EIO;
474 else if (rtsx->trans_result == TRANS_RESULT_OK)
475 err = 0;
476
477 spin_unlock_irq(&rtsx->reg_lock);
478
479 out:
480 rtsx->done = NULL;
481 rtsx->trans_state = STATE_TRANS_NONE;
482 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
483
484 if (err < 0)
485 rtsx_stop_cmd(chip, card);
486
487 return err;
488 }
489
rtsx_transfer_sglist_adma(struct rtsx_chip * chip,u8 card,struct scatterlist * sg,int num_sg,enum dma_data_direction dma_dir,int timeout)490 static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
491 struct scatterlist *sg, int num_sg,
492 enum dma_data_direction dma_dir, int timeout)
493 {
494 struct rtsx_dev *rtsx = chip->rtsx;
495 struct completion trans_done;
496 u8 dir;
497 int buf_cnt, i;
498 int err = 0;
499 long timeleft;
500 struct scatterlist *sg_ptr;
501
502 if ((sg == NULL) || (num_sg <= 0))
503 return -EIO;
504
505 if (dma_dir == DMA_TO_DEVICE)
506 dir = HOST_TO_DEVICE;
507 else if (dma_dir == DMA_FROM_DEVICE)
508 dir = DEVICE_TO_HOST;
509 else
510 return -ENXIO;
511
512 if (card == SD_CARD)
513 rtsx->check_card_cd = SD_EXIST;
514 else if (card == MS_CARD)
515 rtsx->check_card_cd = MS_EXIST;
516 else if (card == XD_CARD)
517 rtsx->check_card_cd = XD_EXIST;
518 else
519 rtsx->check_card_cd = 0;
520
521 spin_lock_irq(&rtsx->reg_lock);
522
523 /* set up data structures for the wakeup system */
524 rtsx->done = &trans_done;
525
526 rtsx->trans_state = STATE_TRANS_SG;
527 rtsx->trans_result = TRANS_NOT_READY;
528
529 spin_unlock_irq(&rtsx->reg_lock);
530
531 buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
532
533 sg_ptr = sg;
534
535 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
536 u32 val = TRIG_DMA;
537 int sg_cnt, j;
538
539 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
540 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
541 else
542 sg_cnt = (HOST_SG_TBL_BUF_LEN / 8);
543
544 chip->sgi = 0;
545 for (j = 0; j < sg_cnt; j++) {
546 dma_addr_t addr = sg_dma_address(sg_ptr);
547 unsigned int len = sg_dma_len(sg_ptr);
548 u8 option;
549
550 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
551 (unsigned int)addr, len);
552
553 if (j == (sg_cnt - 1))
554 option = SG_VALID | SG_END | SG_TRANS_DATA;
555 else
556 option = SG_VALID | SG_TRANS_DATA;
557
558 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
559
560 sg_ptr = sg_next(sg_ptr);
561 }
562
563 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
564
565 val |= (u32)(dir & 0x01) << 29;
566 val |= ADMA_MODE;
567
568 spin_lock_irq(&rtsx->reg_lock);
569
570 init_completion(&trans_done);
571
572 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
573 rtsx_writel(chip, RTSX_HDBCTLR, val);
574
575 spin_unlock_irq(&rtsx->reg_lock);
576
577 timeleft = wait_for_completion_interruptible_timeout(
578 &trans_done, timeout * HZ / 1000);
579 if (timeleft <= 0) {
580 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
581 __func__, __LINE__);
582 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
583 chip->int_reg);
584 err = -ETIMEDOUT;
585 goto out;
586 }
587
588 spin_lock_irq(&rtsx->reg_lock);
589 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
590 err = -EIO;
591 spin_unlock_irq(&rtsx->reg_lock);
592 goto out;
593 }
594 spin_unlock_irq(&rtsx->reg_lock);
595
596 sg_ptr += sg_cnt;
597 }
598
599 /* Wait for TRANS_OK_INT */
600 spin_lock_irq(&rtsx->reg_lock);
601 if (rtsx->trans_result == TRANS_NOT_READY) {
602 init_completion(&trans_done);
603 spin_unlock_irq(&rtsx->reg_lock);
604 timeleft = wait_for_completion_interruptible_timeout(
605 &trans_done, timeout * HZ / 1000);
606 if (timeleft <= 0) {
607 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
608 __func__, __LINE__);
609 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
610 chip->int_reg);
611 err = -ETIMEDOUT;
612 goto out;
613 }
614 } else {
615 spin_unlock_irq(&rtsx->reg_lock);
616 }
617
618 spin_lock_irq(&rtsx->reg_lock);
619 if (rtsx->trans_result == TRANS_RESULT_FAIL)
620 err = -EIO;
621 else if (rtsx->trans_result == TRANS_RESULT_OK)
622 err = 0;
623
624 spin_unlock_irq(&rtsx->reg_lock);
625
626 out:
627 rtsx->done = NULL;
628 rtsx->trans_state = STATE_TRANS_NONE;
629 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
630
631 if (err < 0)
632 rtsx_stop_cmd(chip, card);
633
634 return err;
635 }
636
rtsx_transfer_buf(struct rtsx_chip * chip,u8 card,void * buf,size_t len,enum dma_data_direction dma_dir,int timeout)637 static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
638 size_t len, enum dma_data_direction dma_dir, int timeout)
639 {
640 struct rtsx_dev *rtsx = chip->rtsx;
641 struct completion trans_done;
642 dma_addr_t addr;
643 u8 dir;
644 int err = 0;
645 u32 val = (1 << 31);
646 long timeleft;
647
648 if ((buf == NULL) || (len <= 0))
649 return -EIO;
650
651 if (dma_dir == DMA_TO_DEVICE)
652 dir = HOST_TO_DEVICE;
653 else if (dma_dir == DMA_FROM_DEVICE)
654 dir = DEVICE_TO_HOST;
655 else
656 return -ENXIO;
657
658 addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
659 if (!addr)
660 return -ENOMEM;
661
662 if (card == SD_CARD)
663 rtsx->check_card_cd = SD_EXIST;
664 else if (card == MS_CARD)
665 rtsx->check_card_cd = MS_EXIST;
666 else if (card == XD_CARD)
667 rtsx->check_card_cd = XD_EXIST;
668 else
669 rtsx->check_card_cd = 0;
670
671 val |= (u32)(dir & 0x01) << 29;
672 val |= (u32)(len & 0x00FFFFFF);
673
674 spin_lock_irq(&rtsx->reg_lock);
675
676 /* set up data structures for the wakeup system */
677 rtsx->done = &trans_done;
678
679 init_completion(&trans_done);
680
681 rtsx->trans_state = STATE_TRANS_BUF;
682 rtsx->trans_result = TRANS_NOT_READY;
683
684 rtsx_writel(chip, RTSX_HDBAR, addr);
685 rtsx_writel(chip, RTSX_HDBCTLR, val);
686
687 spin_unlock_irq(&rtsx->reg_lock);
688
689 /* Wait for TRANS_OK_INT */
690 timeleft = wait_for_completion_interruptible_timeout(
691 &trans_done, timeout * HZ / 1000);
692 if (timeleft <= 0) {
693 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
694 __func__, __LINE__);
695 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
696 chip->int_reg);
697 err = -ETIMEDOUT;
698 goto out;
699 }
700
701 spin_lock_irq(&rtsx->reg_lock);
702 if (rtsx->trans_result == TRANS_RESULT_FAIL)
703 err = -EIO;
704 else if (rtsx->trans_result == TRANS_RESULT_OK)
705 err = 0;
706
707 spin_unlock_irq(&rtsx->reg_lock);
708
709 out:
710 rtsx->done = NULL;
711 rtsx->trans_state = STATE_TRANS_NONE;
712 dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
713
714 if (err < 0)
715 rtsx_stop_cmd(chip, card);
716
717 return err;
718 }
719
rtsx_transfer_data_partial(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,unsigned int * index,unsigned int * offset,enum dma_data_direction dma_dir,int timeout)720 int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
721 void *buf, size_t len, int use_sg, unsigned int *index,
722 unsigned int *offset, enum dma_data_direction dma_dir,
723 int timeout)
724 {
725 int err = 0;
726
727 /* don't transfer data during abort processing */
728 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
729 return -EIO;
730
731 if (use_sg) {
732 err = rtsx_transfer_sglist_adma_partial(chip, card,
733 (struct scatterlist *)buf, use_sg,
734 index, offset, (int)len, dma_dir, timeout);
735 } else {
736 err = rtsx_transfer_buf(chip, card,
737 buf, len, dma_dir, timeout);
738 }
739
740 if (err < 0) {
741 if (RTSX_TST_DELINK(chip)) {
742 RTSX_CLR_DELINK(chip);
743 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
744 rtsx_reinit_cards(chip, 1);
745 }
746 }
747
748 return err;
749 }
750
rtsx_transfer_data(struct rtsx_chip * chip,u8 card,void * buf,size_t len,int use_sg,enum dma_data_direction dma_dir,int timeout)751 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
752 int use_sg, enum dma_data_direction dma_dir, int timeout)
753 {
754 int err = 0;
755
756 dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
757
758 /* don't transfer data during abort processing */
759 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
760 return -EIO;
761
762 if (use_sg) {
763 err = rtsx_transfer_sglist_adma(chip, card,
764 (struct scatterlist *)buf,
765 use_sg, dma_dir, timeout);
766 } else {
767 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
768 }
769
770 if (err < 0) {
771 if (RTSX_TST_DELINK(chip)) {
772 RTSX_CLR_DELINK(chip);
773 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
774 rtsx_reinit_cards(chip, 1);
775 }
776 }
777
778 return err;
779 }
780
781