1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Realtek PCI-Express card reader
3 *
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 *
6 * Author:
7 * Wei WANG <wei_wang@realsil.com.cn>
8 */
9
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/idr.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/rtsx_pci.h>
21 #include <linux/mmc/card.h>
22 #include <asm/unaligned.h>
23
24 #include "rtsx_pcr.h"
25
26 static bool msi_en = true;
27 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
28 MODULE_PARM_DESC(msi_en, "Enable MSI");
29
30 static DEFINE_IDR(rtsx_pci_idr);
31 static DEFINE_SPINLOCK(rtsx_pci_lock);
32
33 static struct mfd_cell rtsx_pcr_cells[] = {
34 [RTSX_SD_CARD] = {
35 .name = DRV_NAME_RTSX_PCI_SDMMC,
36 },
37 [RTSX_MS_CARD] = {
38 .name = DRV_NAME_RTSX_PCI_MS,
39 },
40 };
41
42 static const struct pci_device_id rtsx_pci_ids[] = {
43 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
44 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { 0, }
55 };
56
57 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
58
rtsx_pci_enable_aspm(struct rtsx_pcr * pcr)59 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
60 {
61 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
62 0xFC, pcr->aspm_en);
63 }
64
rtsx_pci_disable_aspm(struct rtsx_pcr * pcr)65 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
66 {
67 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
68 0xFC, 0);
69 }
70
rtsx_comm_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)71 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
72 {
73 rtsx_pci_write_register(pcr, MSGTXDATA0,
74 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
75 rtsx_pci_write_register(pcr, MSGTXDATA1,
76 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
77 rtsx_pci_write_register(pcr, MSGTXDATA2,
78 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
79 rtsx_pci_write_register(pcr, MSGTXDATA3,
80 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
81 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
82 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
83
84 return 0;
85 }
86
rtsx_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)87 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
88 {
89 if (pcr->ops->set_ltr_latency)
90 return pcr->ops->set_ltr_latency(pcr, latency);
91 else
92 return rtsx_comm_set_ltr_latency(pcr, latency);
93 }
94
rtsx_comm_set_aspm(struct rtsx_pcr * pcr,bool enable)95 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
96 {
97 struct rtsx_cr_option *option = &pcr->option;
98
99 if (pcr->aspm_enabled == enable)
100 return;
101
102 if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
103 if (enable)
104 rtsx_pci_enable_aspm(pcr);
105 else
106 rtsx_pci_disable_aspm(pcr);
107 } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
108 u8 mask = FORCE_ASPM_VAL_MASK;
109 u8 val = 0;
110
111 if (enable)
112 val = pcr->aspm_en;
113 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
114 }
115
116 pcr->aspm_enabled = enable;
117 }
118
rtsx_disable_aspm(struct rtsx_pcr * pcr)119 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
120 {
121 if (pcr->ops->set_aspm)
122 pcr->ops->set_aspm(pcr, false);
123 else
124 rtsx_comm_set_aspm(pcr, false);
125 }
126
rtsx_set_l1off_sub(struct rtsx_pcr * pcr,u8 val)127 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
128 {
129 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
130
131 return 0;
132 }
133
rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr * pcr,int active)134 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
135 {
136 if (pcr->ops->set_l1off_cfg_sub_d0)
137 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
138 }
139
rtsx_comm_pm_full_on(struct rtsx_pcr * pcr)140 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
141 {
142 struct rtsx_cr_option *option = &pcr->option;
143
144 rtsx_disable_aspm(pcr);
145
146 if (option->ltr_enabled)
147 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
148
149 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
150 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
151 }
152
rtsx_pm_full_on(struct rtsx_pcr * pcr)153 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
154 {
155 if (pcr->ops->full_on)
156 pcr->ops->full_on(pcr);
157 else
158 rtsx_comm_pm_full_on(pcr);
159 }
160
rtsx_pci_start_run(struct rtsx_pcr * pcr)161 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
162 {
163 /* If pci device removed, don't queue idle work any more */
164 if (pcr->remove_pci)
165 return;
166
167 if (pcr->state != PDEV_STAT_RUN) {
168 pcr->state = PDEV_STAT_RUN;
169 if (pcr->ops->enable_auto_blink)
170 pcr->ops->enable_auto_blink(pcr);
171 rtsx_pm_full_on(pcr);
172 }
173
174 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
175 }
176 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
177
rtsx_pci_write_register(struct rtsx_pcr * pcr,u16 addr,u8 mask,u8 data)178 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
179 {
180 int i;
181 u32 val = HAIMR_WRITE_START;
182
183 val |= (u32)(addr & 0x3FFF) << 16;
184 val |= (u32)mask << 8;
185 val |= (u32)data;
186
187 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
188
189 for (i = 0; i < MAX_RW_REG_CNT; i++) {
190 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
191 if ((val & HAIMR_TRANS_END) == 0) {
192 if (data != (u8)val)
193 return -EIO;
194 return 0;
195 }
196 }
197
198 return -ETIMEDOUT;
199 }
200 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
201
rtsx_pci_read_register(struct rtsx_pcr * pcr,u16 addr,u8 * data)202 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
203 {
204 u32 val = HAIMR_READ_START;
205 int i;
206
207 val |= (u32)(addr & 0x3FFF) << 16;
208 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
209
210 for (i = 0; i < MAX_RW_REG_CNT; i++) {
211 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
212 if ((val & HAIMR_TRANS_END) == 0)
213 break;
214 }
215
216 if (i >= MAX_RW_REG_CNT)
217 return -ETIMEDOUT;
218
219 if (data)
220 *data = (u8)(val & 0xFF);
221
222 return 0;
223 }
224 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
225
__rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)226 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
227 {
228 int err, i, finished = 0;
229 u8 tmp;
230
231 rtsx_pci_init_cmd(pcr);
232
233 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
234 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
235 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
236 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
237
238 err = rtsx_pci_send_cmd(pcr, 100);
239 if (err < 0)
240 return err;
241
242 for (i = 0; i < 100000; i++) {
243 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
244 if (err < 0)
245 return err;
246
247 if (!(tmp & 0x80)) {
248 finished = 1;
249 break;
250 }
251 }
252
253 if (!finished)
254 return -ETIMEDOUT;
255
256 return 0;
257 }
258
rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)259 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
260 {
261 if (pcr->ops->write_phy)
262 return pcr->ops->write_phy(pcr, addr, val);
263
264 return __rtsx_pci_write_phy_register(pcr, addr, val);
265 }
266 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
267
__rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)268 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
269 {
270 int err, i, finished = 0;
271 u16 data;
272 u8 *ptr, tmp;
273
274 rtsx_pci_init_cmd(pcr);
275
276 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
277 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
278
279 err = rtsx_pci_send_cmd(pcr, 100);
280 if (err < 0)
281 return err;
282
283 for (i = 0; i < 100000; i++) {
284 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
285 if (err < 0)
286 return err;
287
288 if (!(tmp & 0x80)) {
289 finished = 1;
290 break;
291 }
292 }
293
294 if (!finished)
295 return -ETIMEDOUT;
296
297 rtsx_pci_init_cmd(pcr);
298
299 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
300 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
301
302 err = rtsx_pci_send_cmd(pcr, 100);
303 if (err < 0)
304 return err;
305
306 ptr = rtsx_pci_get_cmd_data(pcr);
307 data = ((u16)ptr[1] << 8) | ptr[0];
308
309 if (val)
310 *val = data;
311
312 return 0;
313 }
314
rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)315 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
316 {
317 if (pcr->ops->read_phy)
318 return pcr->ops->read_phy(pcr, addr, val);
319
320 return __rtsx_pci_read_phy_register(pcr, addr, val);
321 }
322 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
323
rtsx_pci_stop_cmd(struct rtsx_pcr * pcr)324 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
325 {
326 if (pcr->ops->stop_cmd)
327 return pcr->ops->stop_cmd(pcr);
328
329 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
330 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
331
332 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
333 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
334 }
335 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
336
rtsx_pci_add_cmd(struct rtsx_pcr * pcr,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)337 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
338 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
339 {
340 unsigned long flags;
341 u32 val = 0;
342 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
343
344 val |= (u32)(cmd_type & 0x03) << 30;
345 val |= (u32)(reg_addr & 0x3FFF) << 16;
346 val |= (u32)mask << 8;
347 val |= (u32)data;
348
349 spin_lock_irqsave(&pcr->lock, flags);
350 ptr += pcr->ci;
351 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
352 put_unaligned_le32(val, ptr);
353 ptr++;
354 pcr->ci++;
355 }
356 spin_unlock_irqrestore(&pcr->lock, flags);
357 }
358 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
359
rtsx_pci_send_cmd_no_wait(struct rtsx_pcr * pcr)360 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
361 {
362 u32 val = 1 << 31;
363
364 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
365
366 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
367 /* Hardware Auto Response */
368 val |= 0x40000000;
369 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
370 }
371 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
372
rtsx_pci_send_cmd(struct rtsx_pcr * pcr,int timeout)373 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
374 {
375 struct completion trans_done;
376 u32 val = 1 << 31;
377 long timeleft;
378 unsigned long flags;
379 int err = 0;
380
381 spin_lock_irqsave(&pcr->lock, flags);
382
383 /* set up data structures for the wakeup system */
384 pcr->done = &trans_done;
385 pcr->trans_result = TRANS_NOT_READY;
386 init_completion(&trans_done);
387
388 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
389
390 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
391 /* Hardware Auto Response */
392 val |= 0x40000000;
393 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
394
395 spin_unlock_irqrestore(&pcr->lock, flags);
396
397 /* Wait for TRANS_OK_INT */
398 timeleft = wait_for_completion_interruptible_timeout(
399 &trans_done, msecs_to_jiffies(timeout));
400 if (timeleft <= 0) {
401 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
402 err = -ETIMEDOUT;
403 goto finish_send_cmd;
404 }
405
406 spin_lock_irqsave(&pcr->lock, flags);
407 if (pcr->trans_result == TRANS_RESULT_FAIL)
408 err = -EINVAL;
409 else if (pcr->trans_result == TRANS_RESULT_OK)
410 err = 0;
411 else if (pcr->trans_result == TRANS_NO_DEVICE)
412 err = -ENODEV;
413 spin_unlock_irqrestore(&pcr->lock, flags);
414
415 finish_send_cmd:
416 spin_lock_irqsave(&pcr->lock, flags);
417 pcr->done = NULL;
418 spin_unlock_irqrestore(&pcr->lock, flags);
419
420 if ((err < 0) && (err != -ENODEV))
421 rtsx_pci_stop_cmd(pcr);
422
423 if (pcr->finish_me)
424 complete(pcr->finish_me);
425
426 return err;
427 }
428 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
429
rtsx_pci_add_sg_tbl(struct rtsx_pcr * pcr,dma_addr_t addr,unsigned int len,int end)430 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
431 dma_addr_t addr, unsigned int len, int end)
432 {
433 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
434 u64 val;
435 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
436
437 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
438
439 if (end)
440 option |= RTSX_SG_END;
441 val = ((u64)addr << 32) | ((u64)len << 12) | option;
442
443 put_unaligned_le64(val, ptr);
444 pcr->sgi++;
445 }
446
rtsx_pci_transfer_data(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read,int timeout)447 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
448 int num_sg, bool read, int timeout)
449 {
450 int err = 0, count;
451
452 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
453 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
454 if (count < 1)
455 return -EINVAL;
456 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
457
458 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
459
460 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
461
462 return err;
463 }
464 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
465
rtsx_pci_dma_map_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)466 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
467 int num_sg, bool read)
468 {
469 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
470
471 if (pcr->remove_pci)
472 return -EINVAL;
473
474 if ((sglist == NULL) || (num_sg <= 0))
475 return -EINVAL;
476
477 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
478 }
479 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
480
rtsx_pci_dma_unmap_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)481 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
482 int num_sg, bool read)
483 {
484 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
485
486 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
487 }
488 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
489
rtsx_pci_dma_transfer(struct rtsx_pcr * pcr,struct scatterlist * sglist,int count,bool read,int timeout)490 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
491 int count, bool read, int timeout)
492 {
493 struct completion trans_done;
494 struct scatterlist *sg;
495 dma_addr_t addr;
496 long timeleft;
497 unsigned long flags;
498 unsigned int len;
499 int i, err = 0;
500 u32 val;
501 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
502
503 if (pcr->remove_pci)
504 return -ENODEV;
505
506 if ((sglist == NULL) || (count < 1))
507 return -EINVAL;
508
509 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
510 pcr->sgi = 0;
511 for_each_sg(sglist, sg, count, i) {
512 addr = sg_dma_address(sg);
513 len = sg_dma_len(sg);
514 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
515 }
516
517 spin_lock_irqsave(&pcr->lock, flags);
518
519 pcr->done = &trans_done;
520 pcr->trans_result = TRANS_NOT_READY;
521 init_completion(&trans_done);
522 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
523 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
524
525 spin_unlock_irqrestore(&pcr->lock, flags);
526
527 timeleft = wait_for_completion_interruptible_timeout(
528 &trans_done, msecs_to_jiffies(timeout));
529 if (timeleft <= 0) {
530 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
531 err = -ETIMEDOUT;
532 goto out;
533 }
534
535 spin_lock_irqsave(&pcr->lock, flags);
536 if (pcr->trans_result == TRANS_RESULT_FAIL) {
537 err = -EILSEQ;
538 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
539 pcr->dma_error_count++;
540 }
541
542 else if (pcr->trans_result == TRANS_NO_DEVICE)
543 err = -ENODEV;
544 spin_unlock_irqrestore(&pcr->lock, flags);
545
546 out:
547 spin_lock_irqsave(&pcr->lock, flags);
548 pcr->done = NULL;
549 spin_unlock_irqrestore(&pcr->lock, flags);
550
551 if ((err < 0) && (err != -ENODEV))
552 rtsx_pci_stop_cmd(pcr);
553
554 if (pcr->finish_me)
555 complete(pcr->finish_me);
556
557 return err;
558 }
559 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
560
rtsx_pci_read_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)561 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
562 {
563 int err;
564 int i, j;
565 u16 reg;
566 u8 *ptr;
567
568 if (buf_len > 512)
569 buf_len = 512;
570
571 ptr = buf;
572 reg = PPBUF_BASE2;
573 for (i = 0; i < buf_len / 256; i++) {
574 rtsx_pci_init_cmd(pcr);
575
576 for (j = 0; j < 256; j++)
577 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
578
579 err = rtsx_pci_send_cmd(pcr, 250);
580 if (err < 0)
581 return err;
582
583 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
584 ptr += 256;
585 }
586
587 if (buf_len % 256) {
588 rtsx_pci_init_cmd(pcr);
589
590 for (j = 0; j < buf_len % 256; j++)
591 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
592
593 err = rtsx_pci_send_cmd(pcr, 250);
594 if (err < 0)
595 return err;
596 }
597
598 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
599
600 return 0;
601 }
602 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
603
rtsx_pci_write_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)604 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
605 {
606 int err;
607 int i, j;
608 u16 reg;
609 u8 *ptr;
610
611 if (buf_len > 512)
612 buf_len = 512;
613
614 ptr = buf;
615 reg = PPBUF_BASE2;
616 for (i = 0; i < buf_len / 256; i++) {
617 rtsx_pci_init_cmd(pcr);
618
619 for (j = 0; j < 256; j++) {
620 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
621 reg++, 0xFF, *ptr);
622 ptr++;
623 }
624
625 err = rtsx_pci_send_cmd(pcr, 250);
626 if (err < 0)
627 return err;
628 }
629
630 if (buf_len % 256) {
631 rtsx_pci_init_cmd(pcr);
632
633 for (j = 0; j < buf_len % 256; j++) {
634 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
635 reg++, 0xFF, *ptr);
636 ptr++;
637 }
638
639 err = rtsx_pci_send_cmd(pcr, 250);
640 if (err < 0)
641 return err;
642 }
643
644 return 0;
645 }
646 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
647
rtsx_pci_set_pull_ctl(struct rtsx_pcr * pcr,const u32 * tbl)648 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
649 {
650 rtsx_pci_init_cmd(pcr);
651
652 while (*tbl & 0xFFFF0000) {
653 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
654 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
655 tbl++;
656 }
657
658 return rtsx_pci_send_cmd(pcr, 100);
659 }
660
rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr * pcr,int card)661 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
662 {
663 const u32 *tbl;
664
665 if (card == RTSX_SD_CARD)
666 tbl = pcr->sd_pull_ctl_enable_tbl;
667 else if (card == RTSX_MS_CARD)
668 tbl = pcr->ms_pull_ctl_enable_tbl;
669 else
670 return -EINVAL;
671
672 return rtsx_pci_set_pull_ctl(pcr, tbl);
673 }
674 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
675
rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr * pcr,int card)676 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
677 {
678 const u32 *tbl;
679
680 if (card == RTSX_SD_CARD)
681 tbl = pcr->sd_pull_ctl_disable_tbl;
682 else if (card == RTSX_MS_CARD)
683 tbl = pcr->ms_pull_ctl_disable_tbl;
684 else
685 return -EINVAL;
686
687
688 return rtsx_pci_set_pull_ctl(pcr, tbl);
689 }
690 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
691
rtsx_pci_enable_bus_int(struct rtsx_pcr * pcr)692 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
693 {
694 struct rtsx_hw_param *hw_param = &pcr->hw_param;
695
696 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
697 | hw_param->interrupt_en;
698
699 if (pcr->num_slots > 1)
700 pcr->bier |= MS_INT_EN;
701
702 /* Enable Bus Interrupt */
703 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
704
705 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
706 }
707
double_ssc_depth(u8 depth)708 static inline u8 double_ssc_depth(u8 depth)
709 {
710 return ((depth > 1) ? (depth - 1) : depth);
711 }
712
revise_ssc_depth(u8 ssc_depth,u8 div)713 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
714 {
715 if (div > CLK_DIV_1) {
716 if (ssc_depth > (div - 1))
717 ssc_depth -= (div - 1);
718 else
719 ssc_depth = SSC_DEPTH_4M;
720 }
721
722 return ssc_depth;
723 }
724
rtsx_pci_switch_clock(struct rtsx_pcr * pcr,unsigned int card_clock,u8 ssc_depth,bool initial_mode,bool double_clk,bool vpclk)725 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
726 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
727 {
728 int err, clk;
729 u8 n, clk_divider, mcu_cnt, div;
730 static const u8 depth[] = {
731 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
732 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
733 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
734 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
735 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
736 };
737
738 if (initial_mode) {
739 /* We use 250k(around) here, in initial stage */
740 clk_divider = SD_CLK_DIVIDE_128;
741 card_clock = 30000000;
742 } else {
743 clk_divider = SD_CLK_DIVIDE_0;
744 }
745 err = rtsx_pci_write_register(pcr, SD_CFG1,
746 SD_CLK_DIVIDE_MASK, clk_divider);
747 if (err < 0)
748 return err;
749
750 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
751 if (card_clock == UHS_SDR104_MAX_DTR &&
752 pcr->dma_error_count &&
753 PCI_PID(pcr) == RTS5227_DEVICE_ID)
754 card_clock = UHS_SDR104_MAX_DTR -
755 (pcr->dma_error_count * 20000000);
756
757 card_clock /= 1000000;
758 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
759
760 clk = card_clock;
761 if (!initial_mode && double_clk)
762 clk = card_clock * 2;
763 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
764 clk, pcr->cur_clock);
765
766 if (clk == pcr->cur_clock)
767 return 0;
768
769 if (pcr->ops->conv_clk_and_div_n)
770 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
771 else
772 n = (u8)(clk - 2);
773 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
774 return -EINVAL;
775
776 mcu_cnt = (u8)(125/clk + 3);
777 if (mcu_cnt > 15)
778 mcu_cnt = 15;
779
780 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
781 div = CLK_DIV_1;
782 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
783 if (pcr->ops->conv_clk_and_div_n) {
784 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
785 DIV_N_TO_CLK) * 2;
786 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
787 CLK_TO_DIV_N);
788 } else {
789 n = (n + 2) * 2 - 2;
790 }
791 div++;
792 }
793 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
794
795 ssc_depth = depth[ssc_depth];
796 if (double_clk)
797 ssc_depth = double_ssc_depth(ssc_depth);
798
799 ssc_depth = revise_ssc_depth(ssc_depth, div);
800 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
801
802 rtsx_pci_init_cmd(pcr);
803 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
804 CLK_LOW_FREQ, CLK_LOW_FREQ);
805 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
806 0xFF, (div << 4) | mcu_cnt);
807 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
808 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
809 SSC_DEPTH_MASK, ssc_depth);
810 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
811 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
812 if (vpclk) {
813 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
814 PHASE_NOT_RESET, 0);
815 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
816 PHASE_NOT_RESET, PHASE_NOT_RESET);
817 }
818
819 err = rtsx_pci_send_cmd(pcr, 2000);
820 if (err < 0)
821 return err;
822
823 /* Wait SSC clock stable */
824 udelay(SSC_CLOCK_STABLE_WAIT);
825 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
826 if (err < 0)
827 return err;
828
829 pcr->cur_clock = clk;
830 return 0;
831 }
832 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
833
rtsx_pci_card_power_on(struct rtsx_pcr * pcr,int card)834 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
835 {
836 if (pcr->ops->card_power_on)
837 return pcr->ops->card_power_on(pcr, card);
838
839 return 0;
840 }
841 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
842
rtsx_pci_card_power_off(struct rtsx_pcr * pcr,int card)843 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
844 {
845 if (pcr->ops->card_power_off)
846 return pcr->ops->card_power_off(pcr, card);
847
848 return 0;
849 }
850 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
851
rtsx_pci_card_exclusive_check(struct rtsx_pcr * pcr,int card)852 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
853 {
854 static const unsigned int cd_mask[] = {
855 [RTSX_SD_CARD] = SD_EXIST,
856 [RTSX_MS_CARD] = MS_EXIST
857 };
858
859 if (!(pcr->flags & PCR_MS_PMOS)) {
860 /* When using single PMOS, accessing card is not permitted
861 * if the existing card is not the designated one.
862 */
863 if (pcr->card_exist & (~cd_mask[card]))
864 return -EIO;
865 }
866
867 return 0;
868 }
869 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
870
rtsx_pci_switch_output_voltage(struct rtsx_pcr * pcr,u8 voltage)871 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
872 {
873 if (pcr->ops->switch_output_voltage)
874 return pcr->ops->switch_output_voltage(pcr, voltage);
875
876 return 0;
877 }
878 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
879
rtsx_pci_card_exist(struct rtsx_pcr * pcr)880 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
881 {
882 unsigned int val;
883
884 val = rtsx_pci_readl(pcr, RTSX_BIPR);
885 if (pcr->ops->cd_deglitch)
886 val = pcr->ops->cd_deglitch(pcr);
887
888 return val;
889 }
890 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
891
rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr * pcr)892 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
893 {
894 struct completion finish;
895
896 pcr->finish_me = &finish;
897 init_completion(&finish);
898
899 if (pcr->done)
900 complete(pcr->done);
901
902 if (!pcr->remove_pci)
903 rtsx_pci_stop_cmd(pcr);
904
905 wait_for_completion_interruptible_timeout(&finish,
906 msecs_to_jiffies(2));
907 pcr->finish_me = NULL;
908 }
909 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
910
rtsx_pci_card_detect(struct work_struct * work)911 static void rtsx_pci_card_detect(struct work_struct *work)
912 {
913 struct delayed_work *dwork;
914 struct rtsx_pcr *pcr;
915 unsigned long flags;
916 unsigned int card_detect = 0, card_inserted, card_removed;
917 u32 irq_status;
918
919 dwork = to_delayed_work(work);
920 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
921
922 pcr_dbg(pcr, "--> %s\n", __func__);
923
924 mutex_lock(&pcr->pcr_mutex);
925 spin_lock_irqsave(&pcr->lock, flags);
926
927 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
928 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
929
930 irq_status &= CARD_EXIST;
931 card_inserted = pcr->card_inserted & irq_status;
932 card_removed = pcr->card_removed;
933 pcr->card_inserted = 0;
934 pcr->card_removed = 0;
935
936 spin_unlock_irqrestore(&pcr->lock, flags);
937
938 if (card_inserted || card_removed) {
939 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
940 card_inserted, card_removed);
941
942 if (pcr->ops->cd_deglitch)
943 card_inserted = pcr->ops->cd_deglitch(pcr);
944
945 card_detect = card_inserted | card_removed;
946
947 pcr->card_exist |= card_inserted;
948 pcr->card_exist &= ~card_removed;
949 }
950
951 mutex_unlock(&pcr->pcr_mutex);
952
953 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
954 pcr->slots[RTSX_SD_CARD].card_event(
955 pcr->slots[RTSX_SD_CARD].p_dev);
956 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
957 pcr->slots[RTSX_MS_CARD].card_event(
958 pcr->slots[RTSX_MS_CARD].p_dev);
959 }
960
rtsx_pci_process_ocp(struct rtsx_pcr * pcr)961 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
962 {
963 if (pcr->ops->process_ocp) {
964 pcr->ops->process_ocp(pcr);
965 } else {
966 if (!pcr->option.ocp_en)
967 return;
968 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
969 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
970 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
971 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
972 rtsx_pci_clear_ocpstat(pcr);
973 pcr->ocp_stat = 0;
974 }
975 }
976 }
977
rtsx_pci_process_ocp_interrupt(struct rtsx_pcr * pcr)978 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
979 {
980 if (pcr->option.ocp_en)
981 rtsx_pci_process_ocp(pcr);
982
983 return 0;
984 }
985
rtsx_pci_isr(int irq,void * dev_id)986 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
987 {
988 struct rtsx_pcr *pcr = dev_id;
989 u32 int_reg;
990
991 if (!pcr)
992 return IRQ_NONE;
993
994 spin_lock(&pcr->lock);
995
996 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
997 /* Clear interrupt flag */
998 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
999 if ((int_reg & pcr->bier) == 0) {
1000 spin_unlock(&pcr->lock);
1001 return IRQ_NONE;
1002 }
1003 if (int_reg == 0xFFFFFFFF) {
1004 spin_unlock(&pcr->lock);
1005 return IRQ_HANDLED;
1006 }
1007
1008 int_reg &= (pcr->bier | 0x7FFFFF);
1009
1010 if (int_reg & SD_OC_INT)
1011 rtsx_pci_process_ocp_interrupt(pcr);
1012
1013 if (int_reg & SD_INT) {
1014 if (int_reg & SD_EXIST) {
1015 pcr->card_inserted |= SD_EXIST;
1016 } else {
1017 pcr->card_removed |= SD_EXIST;
1018 pcr->card_inserted &= ~SD_EXIST;
1019 }
1020 pcr->dma_error_count = 0;
1021 }
1022
1023 if (int_reg & MS_INT) {
1024 if (int_reg & MS_EXIST) {
1025 pcr->card_inserted |= MS_EXIST;
1026 } else {
1027 pcr->card_removed |= MS_EXIST;
1028 pcr->card_inserted &= ~MS_EXIST;
1029 }
1030 }
1031
1032 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1033 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1034 pcr->trans_result = TRANS_RESULT_FAIL;
1035 if (pcr->done)
1036 complete(pcr->done);
1037 } else if (int_reg & TRANS_OK_INT) {
1038 pcr->trans_result = TRANS_RESULT_OK;
1039 if (pcr->done)
1040 complete(pcr->done);
1041 }
1042 }
1043
1044 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1045 schedule_delayed_work(&pcr->carddet_work,
1046 msecs_to_jiffies(200));
1047
1048 spin_unlock(&pcr->lock);
1049 return IRQ_HANDLED;
1050 }
1051
rtsx_pci_acquire_irq(struct rtsx_pcr * pcr)1052 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1053 {
1054 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1055 __func__, pcr->msi_en, pcr->pci->irq);
1056
1057 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1058 pcr->msi_en ? 0 : IRQF_SHARED,
1059 DRV_NAME_RTSX_PCI, pcr)) {
1060 dev_err(&(pcr->pci->dev),
1061 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1062 pcr->pci->irq);
1063 return -1;
1064 }
1065
1066 pcr->irq = pcr->pci->irq;
1067 pci_intx(pcr->pci, !pcr->msi_en);
1068
1069 return 0;
1070 }
1071
rtsx_enable_aspm(struct rtsx_pcr * pcr)1072 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1073 {
1074 if (pcr->ops->set_aspm)
1075 pcr->ops->set_aspm(pcr, true);
1076 else
1077 rtsx_comm_set_aspm(pcr, true);
1078 }
1079
rtsx_comm_pm_power_saving(struct rtsx_pcr * pcr)1080 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1081 {
1082 struct rtsx_cr_option *option = &pcr->option;
1083
1084 if (option->ltr_enabled) {
1085 u32 latency = option->ltr_l1off_latency;
1086
1087 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1088 mdelay(option->l1_snooze_delay);
1089
1090 rtsx_set_ltr_latency(pcr, latency);
1091 }
1092
1093 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1094 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1095
1096 rtsx_enable_aspm(pcr);
1097 }
1098
rtsx_pm_power_saving(struct rtsx_pcr * pcr)1099 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1100 {
1101 if (pcr->ops->power_saving)
1102 pcr->ops->power_saving(pcr);
1103 else
1104 rtsx_comm_pm_power_saving(pcr);
1105 }
1106
rtsx_pci_idle_work(struct work_struct * work)1107 static void rtsx_pci_idle_work(struct work_struct *work)
1108 {
1109 struct delayed_work *dwork = to_delayed_work(work);
1110 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1111
1112 pcr_dbg(pcr, "--> %s\n", __func__);
1113
1114 mutex_lock(&pcr->pcr_mutex);
1115
1116 pcr->state = PDEV_STAT_IDLE;
1117
1118 if (pcr->ops->disable_auto_blink)
1119 pcr->ops->disable_auto_blink(pcr);
1120 if (pcr->ops->turn_off_led)
1121 pcr->ops->turn_off_led(pcr);
1122
1123 rtsx_pm_power_saving(pcr);
1124
1125 mutex_unlock(&pcr->pcr_mutex);
1126 }
1127
1128 #ifdef CONFIG_PM
rtsx_pci_power_off(struct rtsx_pcr * pcr,u8 pm_state)1129 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1130 {
1131 if (pcr->ops->turn_off_led)
1132 pcr->ops->turn_off_led(pcr);
1133
1134 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1135 pcr->bier = 0;
1136
1137 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1138 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1139
1140 if (pcr->ops->force_power_down)
1141 pcr->ops->force_power_down(pcr, pm_state);
1142 }
1143 #endif
1144
rtsx_pci_enable_ocp(struct rtsx_pcr * pcr)1145 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1146 {
1147 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1148
1149 if (pcr->ops->enable_ocp) {
1150 pcr->ops->enable_ocp(pcr);
1151 } else {
1152 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1153 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1154 }
1155
1156 }
1157
rtsx_pci_disable_ocp(struct rtsx_pcr * pcr)1158 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1159 {
1160 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1161
1162 if (pcr->ops->disable_ocp) {
1163 pcr->ops->disable_ocp(pcr);
1164 } else {
1165 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1166 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1167 OC_POWER_DOWN);
1168 }
1169 }
1170
rtsx_pci_init_ocp(struct rtsx_pcr * pcr)1171 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1172 {
1173 if (pcr->ops->init_ocp) {
1174 pcr->ops->init_ocp(pcr);
1175 } else {
1176 struct rtsx_cr_option *option = &(pcr->option);
1177
1178 if (option->ocp_en) {
1179 u8 val = option->sd_800mA_ocp_thd;
1180
1181 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1182 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1183 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1184 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1185 SD_OCP_THD_MASK, val);
1186 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1187 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1188 rtsx_pci_enable_ocp(pcr);
1189 } else {
1190 /* OC power down */
1191 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1192 OC_POWER_DOWN);
1193 }
1194 }
1195 }
1196
rtsx_pci_get_ocpstat(struct rtsx_pcr * pcr,u8 * val)1197 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1198 {
1199 if (pcr->ops->get_ocpstat)
1200 return pcr->ops->get_ocpstat(pcr, val);
1201 else
1202 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1203 }
1204
rtsx_pci_clear_ocpstat(struct rtsx_pcr * pcr)1205 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1206 {
1207 if (pcr->ops->clear_ocpstat) {
1208 pcr->ops->clear_ocpstat(pcr);
1209 } else {
1210 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1211 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1212
1213 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1214 udelay(100);
1215 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1216 }
1217 }
1218
rtsx_sd_power_off_card3v3(struct rtsx_pcr * pcr)1219 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1220 {
1221 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1222 MS_CLK_EN | SD40_CLK_EN, 0);
1223 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1224 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1225
1226 msleep(50);
1227
1228 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1229
1230 return 0;
1231 }
1232
rtsx_ms_power_off_card3v3(struct rtsx_pcr * pcr)1233 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1234 {
1235 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1236 MS_CLK_EN | SD40_CLK_EN, 0);
1237
1238 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1239
1240 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1241 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1242
1243 return 0;
1244 }
1245
rtsx_pci_init_hw(struct rtsx_pcr * pcr)1246 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1247 {
1248 int err;
1249
1250 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
1251 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1252
1253 rtsx_pci_enable_bus_int(pcr);
1254
1255 /* Power on SSC */
1256 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1257 if (err < 0)
1258 return err;
1259
1260 /* Wait SSC power stable */
1261 udelay(200);
1262
1263 rtsx_pci_disable_aspm(pcr);
1264 if (pcr->ops->optimize_phy) {
1265 err = pcr->ops->optimize_phy(pcr);
1266 if (err < 0)
1267 return err;
1268 }
1269
1270 rtsx_pci_init_cmd(pcr);
1271
1272 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1273 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1274
1275 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1276 /* Disable card clock */
1277 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1278 /* Reset delink mode */
1279 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1280 /* Card driving select */
1281 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1282 0xFF, pcr->card_drive_sel);
1283 /* Enable SSC Clock */
1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1285 0xFF, SSC_8X_EN | SSC_SEL_4M);
1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1287 /* Disable cd_pwr_save */
1288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1289 /* Clear Link Ready Interrupt */
1290 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1291 LINK_RDY_INT, LINK_RDY_INT);
1292 /* Enlarge the estimation window of PERST# glitch
1293 * to reduce the chance of invalid card interrupt
1294 */
1295 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1296 /* Update RC oscillator to 400k
1297 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1298 * 1: 2M 0: 400k
1299 */
1300 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1301 /* Set interrupt write clear
1302 * bit 1: U_elbi_if_rd_clr_en
1303 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1304 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1305 */
1306 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1307
1308 err = rtsx_pci_send_cmd(pcr, 100);
1309 if (err < 0)
1310 return err;
1311
1312 switch (PCI_PID(pcr)) {
1313 case PID_5250:
1314 case PID_524A:
1315 case PID_525A:
1316 case PID_5260:
1317 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1318 break;
1319 default:
1320 break;
1321 }
1322
1323 /*init ocp*/
1324 rtsx_pci_init_ocp(pcr);
1325
1326 /* Enable clk_request_n to enable clock power management */
1327 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1328 /* Enter L1 when host tx idle */
1329 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1330
1331 if (pcr->ops->extra_init_hw) {
1332 err = pcr->ops->extra_init_hw(pcr);
1333 if (err < 0)
1334 return err;
1335 }
1336
1337 /* No CD interrupt if probing driver with card inserted.
1338 * So we need to initialize pcr->card_exist here.
1339 */
1340 if (pcr->ops->cd_deglitch)
1341 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1342 else
1343 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1344
1345 return 0;
1346 }
1347
rtsx_pci_init_chip(struct rtsx_pcr * pcr)1348 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1349 {
1350 int err;
1351
1352 spin_lock_init(&pcr->lock);
1353 mutex_init(&pcr->pcr_mutex);
1354
1355 switch (PCI_PID(pcr)) {
1356 default:
1357 case 0x5209:
1358 rts5209_init_params(pcr);
1359 break;
1360
1361 case 0x5229:
1362 rts5229_init_params(pcr);
1363 break;
1364
1365 case 0x5289:
1366 rtl8411_init_params(pcr);
1367 break;
1368
1369 case 0x5227:
1370 rts5227_init_params(pcr);
1371 break;
1372
1373 case 0x522A:
1374 rts522a_init_params(pcr);
1375 break;
1376
1377 case 0x5249:
1378 rts5249_init_params(pcr);
1379 break;
1380
1381 case 0x524A:
1382 rts524a_init_params(pcr);
1383 break;
1384
1385 case 0x525A:
1386 rts525a_init_params(pcr);
1387 break;
1388
1389 case 0x5287:
1390 rtl8411b_init_params(pcr);
1391 break;
1392
1393 case 0x5286:
1394 rtl8402_init_params(pcr);
1395 break;
1396 case 0x5260:
1397 rts5260_init_params(pcr);
1398 break;
1399 }
1400
1401 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1402 PCI_PID(pcr), pcr->ic_version);
1403
1404 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1405 GFP_KERNEL);
1406 if (!pcr->slots)
1407 return -ENOMEM;
1408
1409 if (pcr->ops->fetch_vendor_settings)
1410 pcr->ops->fetch_vendor_settings(pcr);
1411
1412 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1413 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1414 pcr->sd30_drive_sel_1v8);
1415 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1416 pcr->sd30_drive_sel_3v3);
1417 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1418 pcr->card_drive_sel);
1419 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1420
1421 pcr->state = PDEV_STAT_IDLE;
1422 err = rtsx_pci_init_hw(pcr);
1423 if (err < 0) {
1424 kfree(pcr->slots);
1425 return err;
1426 }
1427
1428 return 0;
1429 }
1430
rtsx_pci_probe(struct pci_dev * pcidev,const struct pci_device_id * id)1431 static int rtsx_pci_probe(struct pci_dev *pcidev,
1432 const struct pci_device_id *id)
1433 {
1434 struct rtsx_pcr *pcr;
1435 struct pcr_handle *handle;
1436 u32 base, len;
1437 int ret, i, bar = 0;
1438
1439 dev_dbg(&(pcidev->dev),
1440 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1441 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1442 (int)pcidev->revision);
1443
1444 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1445 if (ret < 0)
1446 return ret;
1447
1448 ret = pci_enable_device(pcidev);
1449 if (ret)
1450 return ret;
1451
1452 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1453 if (ret)
1454 goto disable;
1455
1456 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1457 if (!pcr) {
1458 ret = -ENOMEM;
1459 goto release_pci;
1460 }
1461
1462 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1463 if (!handle) {
1464 ret = -ENOMEM;
1465 goto free_pcr;
1466 }
1467 handle->pcr = pcr;
1468
1469 idr_preload(GFP_KERNEL);
1470 spin_lock(&rtsx_pci_lock);
1471 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1472 if (ret >= 0)
1473 pcr->id = ret;
1474 spin_unlock(&rtsx_pci_lock);
1475 idr_preload_end();
1476 if (ret < 0)
1477 goto free_handle;
1478
1479 pcr->pci = pcidev;
1480 dev_set_drvdata(&pcidev->dev, handle);
1481
1482 if (CHK_PCI_PID(pcr, 0x525A))
1483 bar = 1;
1484 len = pci_resource_len(pcidev, bar);
1485 base = pci_resource_start(pcidev, bar);
1486 pcr->remap_addr = ioremap_nocache(base, len);
1487 if (!pcr->remap_addr) {
1488 ret = -ENOMEM;
1489 goto free_handle;
1490 }
1491
1492 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1493 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1494 GFP_KERNEL);
1495 if (pcr->rtsx_resv_buf == NULL) {
1496 ret = -ENXIO;
1497 goto unmap;
1498 }
1499 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1500 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1501 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1502 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1503
1504 pcr->card_inserted = 0;
1505 pcr->card_removed = 0;
1506 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1507 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1508
1509 pcr->msi_en = msi_en;
1510 if (pcr->msi_en) {
1511 ret = pci_enable_msi(pcidev);
1512 if (ret)
1513 pcr->msi_en = false;
1514 }
1515
1516 ret = rtsx_pci_acquire_irq(pcr);
1517 if (ret < 0)
1518 goto disable_msi;
1519
1520 pci_set_master(pcidev);
1521 synchronize_irq(pcr->irq);
1522
1523 ret = rtsx_pci_init_chip(pcr);
1524 if (ret < 0)
1525 goto disable_irq;
1526
1527 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1528 rtsx_pcr_cells[i].platform_data = handle;
1529 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1530 }
1531 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1532 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1533 if (ret < 0)
1534 goto disable_irq;
1535
1536 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1537
1538 return 0;
1539
1540 disable_irq:
1541 free_irq(pcr->irq, (void *)pcr);
1542 disable_msi:
1543 if (pcr->msi_en)
1544 pci_disable_msi(pcr->pci);
1545 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1546 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1547 unmap:
1548 iounmap(pcr->remap_addr);
1549 free_handle:
1550 kfree(handle);
1551 free_pcr:
1552 kfree(pcr);
1553 release_pci:
1554 pci_release_regions(pcidev);
1555 disable:
1556 pci_disable_device(pcidev);
1557
1558 return ret;
1559 }
1560
rtsx_pci_remove(struct pci_dev * pcidev)1561 static void rtsx_pci_remove(struct pci_dev *pcidev)
1562 {
1563 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1564 struct rtsx_pcr *pcr = handle->pcr;
1565
1566 pcr->remove_pci = true;
1567
1568 /* Disable interrupts at the pcr level */
1569 spin_lock_irq(&pcr->lock);
1570 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1571 pcr->bier = 0;
1572 spin_unlock_irq(&pcr->lock);
1573
1574 cancel_delayed_work_sync(&pcr->carddet_work);
1575 cancel_delayed_work_sync(&pcr->idle_work);
1576
1577 mfd_remove_devices(&pcidev->dev);
1578
1579 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1580 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1581 free_irq(pcr->irq, (void *)pcr);
1582 if (pcr->msi_en)
1583 pci_disable_msi(pcr->pci);
1584 iounmap(pcr->remap_addr);
1585
1586 pci_release_regions(pcidev);
1587 pci_disable_device(pcidev);
1588
1589 spin_lock(&rtsx_pci_lock);
1590 idr_remove(&rtsx_pci_idr, pcr->id);
1591 spin_unlock(&rtsx_pci_lock);
1592
1593 kfree(pcr->slots);
1594 kfree(pcr);
1595 kfree(handle);
1596
1597 dev_dbg(&(pcidev->dev),
1598 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1599 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1600 }
1601
1602 #ifdef CONFIG_PM
1603
rtsx_pci_suspend(struct pci_dev * pcidev,pm_message_t state)1604 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1605 {
1606 struct pcr_handle *handle;
1607 struct rtsx_pcr *pcr;
1608
1609 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1610
1611 handle = pci_get_drvdata(pcidev);
1612 pcr = handle->pcr;
1613
1614 cancel_delayed_work(&pcr->carddet_work);
1615 cancel_delayed_work(&pcr->idle_work);
1616
1617 mutex_lock(&pcr->pcr_mutex);
1618
1619 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1620
1621 pci_save_state(pcidev);
1622 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1623 pci_disable_device(pcidev);
1624 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1625
1626 mutex_unlock(&pcr->pcr_mutex);
1627 return 0;
1628 }
1629
rtsx_pci_resume(struct pci_dev * pcidev)1630 static int rtsx_pci_resume(struct pci_dev *pcidev)
1631 {
1632 struct pcr_handle *handle;
1633 struct rtsx_pcr *pcr;
1634 int ret = 0;
1635
1636 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1637
1638 handle = pci_get_drvdata(pcidev);
1639 pcr = handle->pcr;
1640
1641 mutex_lock(&pcr->pcr_mutex);
1642
1643 pci_set_power_state(pcidev, PCI_D0);
1644 pci_restore_state(pcidev);
1645 ret = pci_enable_device(pcidev);
1646 if (ret)
1647 goto out;
1648 pci_set_master(pcidev);
1649
1650 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1651 if (ret)
1652 goto out;
1653
1654 ret = rtsx_pci_init_hw(pcr);
1655 if (ret)
1656 goto out;
1657
1658 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1659
1660 out:
1661 mutex_unlock(&pcr->pcr_mutex);
1662 return ret;
1663 }
1664
rtsx_pci_shutdown(struct pci_dev * pcidev)1665 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1666 {
1667 struct pcr_handle *handle;
1668 struct rtsx_pcr *pcr;
1669
1670 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1671
1672 handle = pci_get_drvdata(pcidev);
1673 pcr = handle->pcr;
1674 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1675
1676 pci_disable_device(pcidev);
1677 free_irq(pcr->irq, (void *)pcr);
1678 if (pcr->msi_en)
1679 pci_disable_msi(pcr->pci);
1680 }
1681
1682 #else /* CONFIG_PM */
1683
1684 #define rtsx_pci_suspend NULL
1685 #define rtsx_pci_resume NULL
1686 #define rtsx_pci_shutdown NULL
1687
1688 #endif /* CONFIG_PM */
1689
1690 static struct pci_driver rtsx_pci_driver = {
1691 .name = DRV_NAME_RTSX_PCI,
1692 .id_table = rtsx_pci_ids,
1693 .probe = rtsx_pci_probe,
1694 .remove = rtsx_pci_remove,
1695 .suspend = rtsx_pci_suspend,
1696 .resume = rtsx_pci_resume,
1697 .shutdown = rtsx_pci_shutdown,
1698 };
1699 module_pci_driver(rtsx_pci_driver);
1700
1701 MODULE_LICENSE("GPL");
1702 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1703 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1704