• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Realtek PCI-Express card reader
3  *
4  * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5  *
6  * Author:
7  *   Wei WANG <wei_wang@realsil.com.cn>
8  */
9 
10 #include <linux/pci.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <linux/delay.h>
17 #include <linux/idr.h>
18 #include <linux/platform_device.h>
19 #include <linux/mfd/core.h>
20 #include <linux/rtsx_pci.h>
21 #include <linux/mmc/card.h>
22 #include <asm/unaligned.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 
26 #include "rtsx_pcr.h"
27 #include "rts5261.h"
28 #include "rts5228.h"
29 
30 static bool msi_en = true;
31 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
32 MODULE_PARM_DESC(msi_en, "Enable MSI");
33 
34 static DEFINE_IDR(rtsx_pci_idr);
35 static DEFINE_SPINLOCK(rtsx_pci_lock);
36 
37 static struct mfd_cell rtsx_pcr_cells[] = {
38 	[RTSX_SD_CARD] = {
39 		.name = DRV_NAME_RTSX_PCI_SDMMC,
40 	},
41 };
42 
43 static const struct pci_device_id rtsx_pci_ids[] = {
44 	{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 	{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 	{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 	{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 	{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 	{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 	{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 	{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 	{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 	{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 	{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 	{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 	{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 	{ 0, }
58 };
59 
60 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
61 
rtsx_comm_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)62 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
63 {
64 	rtsx_pci_write_register(pcr, MSGTXDATA0,
65 				MASK_8_BIT_DEF, (u8) (latency & 0xFF));
66 	rtsx_pci_write_register(pcr, MSGTXDATA1,
67 				MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
68 	rtsx_pci_write_register(pcr, MSGTXDATA2,
69 				MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
70 	rtsx_pci_write_register(pcr, MSGTXDATA3,
71 				MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
72 	rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
73 		LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
74 
75 	return 0;
76 }
77 
rtsx_set_ltr_latency(struct rtsx_pcr * pcr,u32 latency)78 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
79 {
80 	return rtsx_comm_set_ltr_latency(pcr, latency);
81 }
82 
rtsx_comm_set_aspm(struct rtsx_pcr * pcr,bool enable)83 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
84 {
85 	if (pcr->aspm_enabled == enable)
86 		return;
87 
88 	if (pcr->aspm_mode == ASPM_MODE_CFG) {
89 		pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
90 						PCI_EXP_LNKCTL_ASPMC,
91 						enable ? pcr->aspm_en : 0);
92 	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
93 		if (pcr->aspm_en & 0x02)
94 			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
95 				FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
96 		else
97 			rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
98 				FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
99 	}
100 
101 	if (!enable && (pcr->aspm_en & 0x02))
102 		mdelay(10);
103 
104 	pcr->aspm_enabled = enable;
105 }
106 
rtsx_disable_aspm(struct rtsx_pcr * pcr)107 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
108 {
109 	if (pcr->ops->set_aspm)
110 		pcr->ops->set_aspm(pcr, false);
111 	else
112 		rtsx_comm_set_aspm(pcr, false);
113 }
114 
rtsx_set_l1off_sub(struct rtsx_pcr * pcr,u8 val)115 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
116 {
117 	rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
118 
119 	return 0;
120 }
121 
rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr * pcr,int active)122 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
123 {
124 	if (pcr->ops->set_l1off_cfg_sub_d0)
125 		pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
126 }
127 
rtsx_comm_pm_full_on(struct rtsx_pcr * pcr)128 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
129 {
130 	struct rtsx_cr_option *option = &pcr->option;
131 
132 	rtsx_disable_aspm(pcr);
133 
134 	/* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
135 	msleep(1);
136 
137 	if (option->ltr_enabled)
138 		rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
139 
140 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
141 		rtsx_set_l1off_sub_cfg_d0(pcr, 1);
142 }
143 
rtsx_pm_full_on(struct rtsx_pcr * pcr)144 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
145 {
146 	rtsx_comm_pm_full_on(pcr);
147 }
148 
rtsx_pci_start_run(struct rtsx_pcr * pcr)149 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
150 {
151 	/* If pci device removed, don't queue idle work any more */
152 	if (pcr->remove_pci)
153 		return;
154 
155 	if (pcr->state != PDEV_STAT_RUN) {
156 		pcr->state = PDEV_STAT_RUN;
157 		if (pcr->ops->enable_auto_blink)
158 			pcr->ops->enable_auto_blink(pcr);
159 		rtsx_pm_full_on(pcr);
160 	}
161 }
162 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
163 
rtsx_pci_write_register(struct rtsx_pcr * pcr,u16 addr,u8 mask,u8 data)164 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
165 {
166 	int i;
167 	u32 val = HAIMR_WRITE_START;
168 
169 	val |= (u32)(addr & 0x3FFF) << 16;
170 	val |= (u32)mask << 8;
171 	val |= (u32)data;
172 
173 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
174 
175 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
176 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
177 		if ((val & HAIMR_TRANS_END) == 0) {
178 			if (data != (u8)val)
179 				return -EIO;
180 			return 0;
181 		}
182 	}
183 
184 	return -ETIMEDOUT;
185 }
186 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
187 
rtsx_pci_read_register(struct rtsx_pcr * pcr,u16 addr,u8 * data)188 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
189 {
190 	u32 val = HAIMR_READ_START;
191 	int i;
192 
193 	val |= (u32)(addr & 0x3FFF) << 16;
194 	rtsx_pci_writel(pcr, RTSX_HAIMR, val);
195 
196 	for (i = 0; i < MAX_RW_REG_CNT; i++) {
197 		val = rtsx_pci_readl(pcr, RTSX_HAIMR);
198 		if ((val & HAIMR_TRANS_END) == 0)
199 			break;
200 	}
201 
202 	if (i >= MAX_RW_REG_CNT)
203 		return -ETIMEDOUT;
204 
205 	if (data)
206 		*data = (u8)(val & 0xFF);
207 
208 	return 0;
209 }
210 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
211 
__rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)212 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
213 {
214 	int err, i, finished = 0;
215 	u8 tmp;
216 
217 	rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
218 	rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
219 	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
220 	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
221 
222 	for (i = 0; i < 100000; i++) {
223 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
224 		if (err < 0)
225 			return err;
226 
227 		if (!(tmp & 0x80)) {
228 			finished = 1;
229 			break;
230 		}
231 	}
232 
233 	if (!finished)
234 		return -ETIMEDOUT;
235 
236 	return 0;
237 }
238 
rtsx_pci_write_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 val)239 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
240 {
241 	if (pcr->ops->write_phy)
242 		return pcr->ops->write_phy(pcr, addr, val);
243 
244 	return __rtsx_pci_write_phy_register(pcr, addr, val);
245 }
246 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
247 
__rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)248 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
249 {
250 	int err, i, finished = 0;
251 	u16 data;
252 	u8 tmp, val1, val2;
253 
254 	rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
255 	rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
256 
257 	for (i = 0; i < 100000; i++) {
258 		err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
259 		if (err < 0)
260 			return err;
261 
262 		if (!(tmp & 0x80)) {
263 			finished = 1;
264 			break;
265 		}
266 	}
267 
268 	if (!finished)
269 		return -ETIMEDOUT;
270 
271 	rtsx_pci_read_register(pcr, PHYDATA0, &val1);
272 	rtsx_pci_read_register(pcr, PHYDATA1, &val2);
273 	data = val1 | (val2 << 8);
274 
275 	if (val)
276 		*val = data;
277 
278 	return 0;
279 }
280 
rtsx_pci_read_phy_register(struct rtsx_pcr * pcr,u8 addr,u16 * val)281 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
282 {
283 	if (pcr->ops->read_phy)
284 		return pcr->ops->read_phy(pcr, addr, val);
285 
286 	return __rtsx_pci_read_phy_register(pcr, addr, val);
287 }
288 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
289 
rtsx_pci_stop_cmd(struct rtsx_pcr * pcr)290 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
291 {
292 	if (pcr->ops->stop_cmd)
293 		return pcr->ops->stop_cmd(pcr);
294 
295 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
296 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
297 
298 	rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
299 	rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
300 }
301 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
302 
rtsx_pci_add_cmd(struct rtsx_pcr * pcr,u8 cmd_type,u16 reg_addr,u8 mask,u8 data)303 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
304 		u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
305 {
306 	unsigned long flags;
307 	u32 val = 0;
308 	u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
309 
310 	val |= (u32)(cmd_type & 0x03) << 30;
311 	val |= (u32)(reg_addr & 0x3FFF) << 16;
312 	val |= (u32)mask << 8;
313 	val |= (u32)data;
314 
315 	spin_lock_irqsave(&pcr->lock, flags);
316 	ptr += pcr->ci;
317 	if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
318 		put_unaligned_le32(val, ptr);
319 		ptr++;
320 		pcr->ci++;
321 	}
322 	spin_unlock_irqrestore(&pcr->lock, flags);
323 }
324 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
325 
rtsx_pci_send_cmd_no_wait(struct rtsx_pcr * pcr)326 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
327 {
328 	u32 val = 1 << 31;
329 
330 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
331 
332 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
333 	/* Hardware Auto Response */
334 	val |= 0x40000000;
335 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
336 }
337 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
338 
rtsx_pci_send_cmd(struct rtsx_pcr * pcr,int timeout)339 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
340 {
341 	struct completion trans_done;
342 	u32 val = 1 << 31;
343 	long timeleft;
344 	unsigned long flags;
345 	int err = 0;
346 
347 	spin_lock_irqsave(&pcr->lock, flags);
348 
349 	/* set up data structures for the wakeup system */
350 	pcr->done = &trans_done;
351 	pcr->trans_result = TRANS_NOT_READY;
352 	init_completion(&trans_done);
353 
354 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
355 
356 	val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
357 	/* Hardware Auto Response */
358 	val |= 0x40000000;
359 	rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
360 
361 	spin_unlock_irqrestore(&pcr->lock, flags);
362 
363 	/* Wait for TRANS_OK_INT */
364 	timeleft = wait_for_completion_interruptible_timeout(
365 			&trans_done, msecs_to_jiffies(timeout));
366 	if (timeleft <= 0) {
367 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
368 		err = -ETIMEDOUT;
369 		goto finish_send_cmd;
370 	}
371 
372 	spin_lock_irqsave(&pcr->lock, flags);
373 	if (pcr->trans_result == TRANS_RESULT_FAIL)
374 		err = -EINVAL;
375 	else if (pcr->trans_result == TRANS_RESULT_OK)
376 		err = 0;
377 	else if (pcr->trans_result == TRANS_NO_DEVICE)
378 		err = -ENODEV;
379 	spin_unlock_irqrestore(&pcr->lock, flags);
380 
381 finish_send_cmd:
382 	spin_lock_irqsave(&pcr->lock, flags);
383 	pcr->done = NULL;
384 	spin_unlock_irqrestore(&pcr->lock, flags);
385 
386 	if ((err < 0) && (err != -ENODEV))
387 		rtsx_pci_stop_cmd(pcr);
388 
389 	if (pcr->finish_me)
390 		complete(pcr->finish_me);
391 
392 	return err;
393 }
394 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
395 
rtsx_pci_add_sg_tbl(struct rtsx_pcr * pcr,dma_addr_t addr,unsigned int len,int end)396 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
397 		dma_addr_t addr, unsigned int len, int end)
398 {
399 	u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
400 	u64 val;
401 	u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
402 
403 	pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
404 
405 	if (end)
406 		option |= RTSX_SG_END;
407 
408 	if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
409 		if (len > 0xFFFF)
410 			val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
411 				| (((u64)len >> 16) << 6) | option;
412 		else
413 			val = ((u64)addr << 32) | ((u64)len << 16) | option;
414 	} else {
415 		val = ((u64)addr << 32) | ((u64)len << 12) | option;
416 	}
417 	put_unaligned_le64(val, ptr);
418 	pcr->sgi++;
419 }
420 
rtsx_pci_transfer_data(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read,int timeout)421 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
422 		int num_sg, bool read, int timeout)
423 {
424 	int err = 0, count;
425 
426 	pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
427 	count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
428 	if (count < 1)
429 		return -EINVAL;
430 	pcr_dbg(pcr, "DMA mapping count: %d\n", count);
431 
432 	err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
433 
434 	rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
435 
436 	return err;
437 }
438 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
439 
rtsx_pci_dma_map_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)440 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
441 		int num_sg, bool read)
442 {
443 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
444 
445 	if (pcr->remove_pci)
446 		return -EINVAL;
447 
448 	if ((sglist == NULL) || (num_sg <= 0))
449 		return -EINVAL;
450 
451 	return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
452 }
453 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
454 
rtsx_pci_dma_unmap_sg(struct rtsx_pcr * pcr,struct scatterlist * sglist,int num_sg,bool read)455 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
456 		int num_sg, bool read)
457 {
458 	enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
459 
460 	dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
461 }
462 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
463 
rtsx_pci_dma_transfer(struct rtsx_pcr * pcr,struct scatterlist * sglist,int count,bool read,int timeout)464 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
465 		int count, bool read, int timeout)
466 {
467 	struct completion trans_done;
468 	struct scatterlist *sg;
469 	dma_addr_t addr;
470 	long timeleft;
471 	unsigned long flags;
472 	unsigned int len;
473 	int i, err = 0;
474 	u32 val;
475 	u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
476 
477 	if (pcr->remove_pci)
478 		return -ENODEV;
479 
480 	if ((sglist == NULL) || (count < 1))
481 		return -EINVAL;
482 
483 	val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
484 	pcr->sgi = 0;
485 	for_each_sg(sglist, sg, count, i) {
486 		addr = sg_dma_address(sg);
487 		len = sg_dma_len(sg);
488 		rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
489 	}
490 
491 	spin_lock_irqsave(&pcr->lock, flags);
492 
493 	pcr->done = &trans_done;
494 	pcr->trans_result = TRANS_NOT_READY;
495 	init_completion(&trans_done);
496 	rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
497 	rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
498 
499 	spin_unlock_irqrestore(&pcr->lock, flags);
500 
501 	timeleft = wait_for_completion_interruptible_timeout(
502 			&trans_done, msecs_to_jiffies(timeout));
503 	if (timeleft <= 0) {
504 		pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
505 		err = -ETIMEDOUT;
506 		goto out;
507 	}
508 
509 	spin_lock_irqsave(&pcr->lock, flags);
510 	if (pcr->trans_result == TRANS_RESULT_FAIL) {
511 		err = -EILSEQ;
512 		if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
513 			pcr->dma_error_count++;
514 	}
515 
516 	else if (pcr->trans_result == TRANS_NO_DEVICE)
517 		err = -ENODEV;
518 	spin_unlock_irqrestore(&pcr->lock, flags);
519 
520 out:
521 	spin_lock_irqsave(&pcr->lock, flags);
522 	pcr->done = NULL;
523 	spin_unlock_irqrestore(&pcr->lock, flags);
524 
525 	if ((err < 0) && (err != -ENODEV))
526 		rtsx_pci_stop_cmd(pcr);
527 
528 	if (pcr->finish_me)
529 		complete(pcr->finish_me);
530 
531 	return err;
532 }
533 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
534 
rtsx_pci_read_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)535 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
536 {
537 	int err;
538 	int i, j;
539 	u16 reg;
540 	u8 *ptr;
541 
542 	if (buf_len > 512)
543 		buf_len = 512;
544 
545 	ptr = buf;
546 	reg = PPBUF_BASE2;
547 	for (i = 0; i < buf_len / 256; i++) {
548 		rtsx_pci_init_cmd(pcr);
549 
550 		for (j = 0; j < 256; j++)
551 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
552 
553 		err = rtsx_pci_send_cmd(pcr, 250);
554 		if (err < 0)
555 			return err;
556 
557 		memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
558 		ptr += 256;
559 	}
560 
561 	if (buf_len % 256) {
562 		rtsx_pci_init_cmd(pcr);
563 
564 		for (j = 0; j < buf_len % 256; j++)
565 			rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
566 
567 		err = rtsx_pci_send_cmd(pcr, 250);
568 		if (err < 0)
569 			return err;
570 	}
571 
572 	memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
573 
574 	return 0;
575 }
576 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
577 
rtsx_pci_write_ppbuf(struct rtsx_pcr * pcr,u8 * buf,int buf_len)578 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
579 {
580 	int err;
581 	int i, j;
582 	u16 reg;
583 	u8 *ptr;
584 
585 	if (buf_len > 512)
586 		buf_len = 512;
587 
588 	ptr = buf;
589 	reg = PPBUF_BASE2;
590 	for (i = 0; i < buf_len / 256; i++) {
591 		rtsx_pci_init_cmd(pcr);
592 
593 		for (j = 0; j < 256; j++) {
594 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
595 					reg++, 0xFF, *ptr);
596 			ptr++;
597 		}
598 
599 		err = rtsx_pci_send_cmd(pcr, 250);
600 		if (err < 0)
601 			return err;
602 	}
603 
604 	if (buf_len % 256) {
605 		rtsx_pci_init_cmd(pcr);
606 
607 		for (j = 0; j < buf_len % 256; j++) {
608 			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
609 					reg++, 0xFF, *ptr);
610 			ptr++;
611 		}
612 
613 		err = rtsx_pci_send_cmd(pcr, 250);
614 		if (err < 0)
615 			return err;
616 	}
617 
618 	return 0;
619 }
620 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
621 
rtsx_pci_set_pull_ctl(struct rtsx_pcr * pcr,const u32 * tbl)622 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
623 {
624 	rtsx_pci_init_cmd(pcr);
625 
626 	while (*tbl & 0xFFFF0000) {
627 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
628 				(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
629 		tbl++;
630 	}
631 
632 	return rtsx_pci_send_cmd(pcr, 100);
633 }
634 
rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr * pcr,int card)635 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
636 {
637 	const u32 *tbl;
638 
639 	if (card == RTSX_SD_CARD)
640 		tbl = pcr->sd_pull_ctl_enable_tbl;
641 	else if (card == RTSX_MS_CARD)
642 		tbl = pcr->ms_pull_ctl_enable_tbl;
643 	else
644 		return -EINVAL;
645 
646 	return rtsx_pci_set_pull_ctl(pcr, tbl);
647 }
648 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
649 
rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr * pcr,int card)650 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
651 {
652 	const u32 *tbl;
653 
654 	if (card == RTSX_SD_CARD)
655 		tbl = pcr->sd_pull_ctl_disable_tbl;
656 	else if (card == RTSX_MS_CARD)
657 		tbl = pcr->ms_pull_ctl_disable_tbl;
658 	else
659 		return -EINVAL;
660 
661 	return rtsx_pci_set_pull_ctl(pcr, tbl);
662 }
663 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
664 
rtsx_pci_enable_bus_int(struct rtsx_pcr * pcr)665 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
666 {
667 	struct rtsx_hw_param *hw_param = &pcr->hw_param;
668 
669 	pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
670 		| hw_param->interrupt_en;
671 
672 	if (pcr->num_slots > 1)
673 		pcr->bier |= MS_INT_EN;
674 
675 	/* Enable Bus Interrupt */
676 	rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
677 
678 	pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
679 }
680 
double_ssc_depth(u8 depth)681 static inline u8 double_ssc_depth(u8 depth)
682 {
683 	return ((depth > 1) ? (depth - 1) : depth);
684 }
685 
revise_ssc_depth(u8 ssc_depth,u8 div)686 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
687 {
688 	if (div > CLK_DIV_1) {
689 		if (ssc_depth > (div - 1))
690 			ssc_depth -= (div - 1);
691 		else
692 			ssc_depth = SSC_DEPTH_4M;
693 	}
694 
695 	return ssc_depth;
696 }
697 
rtsx_pci_switch_clock(struct rtsx_pcr * pcr,unsigned int card_clock,u8 ssc_depth,bool initial_mode,bool double_clk,bool vpclk)698 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
699 		u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
700 {
701 	int err, clk;
702 	u8 n, clk_divider, mcu_cnt, div;
703 	static const u8 depth[] = {
704 		[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
705 		[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
706 		[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
707 		[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
708 		[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
709 	};
710 
711 	if (PCI_PID(pcr) == PID_5261)
712 		return rts5261_pci_switch_clock(pcr, card_clock,
713 				ssc_depth, initial_mode, double_clk, vpclk);
714 	if (PCI_PID(pcr) == PID_5228)
715 		return rts5228_pci_switch_clock(pcr, card_clock,
716 				ssc_depth, initial_mode, double_clk, vpclk);
717 
718 	if (initial_mode) {
719 		/* We use 250k(around) here, in initial stage */
720 		clk_divider = SD_CLK_DIVIDE_128;
721 		card_clock = 30000000;
722 	} else {
723 		clk_divider = SD_CLK_DIVIDE_0;
724 	}
725 	err = rtsx_pci_write_register(pcr, SD_CFG1,
726 			SD_CLK_DIVIDE_MASK, clk_divider);
727 	if (err < 0)
728 		return err;
729 
730 	/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
731 	if (card_clock == UHS_SDR104_MAX_DTR &&
732 	    pcr->dma_error_count &&
733 	    PCI_PID(pcr) == RTS5227_DEVICE_ID)
734 		card_clock = UHS_SDR104_MAX_DTR -
735 			(pcr->dma_error_count * 20000000);
736 
737 	card_clock /= 1000000;
738 	pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
739 
740 	clk = card_clock;
741 	if (!initial_mode && double_clk)
742 		clk = card_clock * 2;
743 	pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
744 		clk, pcr->cur_clock);
745 
746 	if (clk == pcr->cur_clock)
747 		return 0;
748 
749 	if (pcr->ops->conv_clk_and_div_n)
750 		n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
751 	else
752 		n = (u8)(clk - 2);
753 	if ((clk <= 2) || (n > MAX_DIV_N_PCR))
754 		return -EINVAL;
755 
756 	mcu_cnt = (u8)(125/clk + 3);
757 	if (mcu_cnt > 15)
758 		mcu_cnt = 15;
759 
760 	/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
761 	div = CLK_DIV_1;
762 	while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
763 		if (pcr->ops->conv_clk_and_div_n) {
764 			int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
765 					DIV_N_TO_CLK) * 2;
766 			n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
767 					CLK_TO_DIV_N);
768 		} else {
769 			n = (n + 2) * 2 - 2;
770 		}
771 		div++;
772 	}
773 	pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
774 
775 	ssc_depth = depth[ssc_depth];
776 	if (double_clk)
777 		ssc_depth = double_ssc_depth(ssc_depth);
778 
779 	ssc_depth = revise_ssc_depth(ssc_depth, div);
780 	pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
781 
782 	rtsx_pci_init_cmd(pcr);
783 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
784 			CLK_LOW_FREQ, CLK_LOW_FREQ);
785 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
786 			0xFF, (div << 4) | mcu_cnt);
787 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
788 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
789 			SSC_DEPTH_MASK, ssc_depth);
790 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
791 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
792 	if (vpclk) {
793 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
794 				PHASE_NOT_RESET, 0);
795 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
796 				PHASE_NOT_RESET, PHASE_NOT_RESET);
797 	}
798 
799 	err = rtsx_pci_send_cmd(pcr, 2000);
800 	if (err < 0)
801 		return err;
802 
803 	/* Wait SSC clock stable */
804 	udelay(SSC_CLOCK_STABLE_WAIT);
805 	err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
806 	if (err < 0)
807 		return err;
808 
809 	pcr->cur_clock = clk;
810 	return 0;
811 }
812 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
813 
rtsx_pci_card_power_on(struct rtsx_pcr * pcr,int card)814 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
815 {
816 	if (pcr->ops->card_power_on)
817 		return pcr->ops->card_power_on(pcr, card);
818 
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
822 
rtsx_pci_card_power_off(struct rtsx_pcr * pcr,int card)823 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
824 {
825 	if (pcr->ops->card_power_off)
826 		return pcr->ops->card_power_off(pcr, card);
827 
828 	return 0;
829 }
830 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
831 
rtsx_pci_card_exclusive_check(struct rtsx_pcr * pcr,int card)832 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
833 {
834 	static const unsigned int cd_mask[] = {
835 		[RTSX_SD_CARD] = SD_EXIST,
836 		[RTSX_MS_CARD] = MS_EXIST
837 	};
838 
839 	if (!(pcr->flags & PCR_MS_PMOS)) {
840 		/* When using single PMOS, accessing card is not permitted
841 		 * if the existing card is not the designated one.
842 		 */
843 		if (pcr->card_exist & (~cd_mask[card]))
844 			return -EIO;
845 	}
846 
847 	return 0;
848 }
849 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
850 
rtsx_pci_switch_output_voltage(struct rtsx_pcr * pcr,u8 voltage)851 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
852 {
853 	if (pcr->ops->switch_output_voltage)
854 		return pcr->ops->switch_output_voltage(pcr, voltage);
855 
856 	return 0;
857 }
858 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
859 
rtsx_pci_card_exist(struct rtsx_pcr * pcr)860 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
861 {
862 	unsigned int val;
863 
864 	val = rtsx_pci_readl(pcr, RTSX_BIPR);
865 	if (pcr->ops->cd_deglitch)
866 		val = pcr->ops->cd_deglitch(pcr);
867 
868 	return val;
869 }
870 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
871 
rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr * pcr)872 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
873 {
874 	struct completion finish;
875 
876 	pcr->finish_me = &finish;
877 	init_completion(&finish);
878 
879 	if (pcr->done)
880 		complete(pcr->done);
881 
882 	if (!pcr->remove_pci)
883 		rtsx_pci_stop_cmd(pcr);
884 
885 	wait_for_completion_interruptible_timeout(&finish,
886 			msecs_to_jiffies(2));
887 	pcr->finish_me = NULL;
888 }
889 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
890 
rtsx_pci_card_detect(struct work_struct * work)891 static void rtsx_pci_card_detect(struct work_struct *work)
892 {
893 	struct delayed_work *dwork;
894 	struct rtsx_pcr *pcr;
895 	unsigned long flags;
896 	unsigned int card_detect = 0, card_inserted, card_removed;
897 	u32 irq_status;
898 
899 	dwork = to_delayed_work(work);
900 	pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
901 
902 	pcr_dbg(pcr, "--> %s\n", __func__);
903 
904 	mutex_lock(&pcr->pcr_mutex);
905 	spin_lock_irqsave(&pcr->lock, flags);
906 
907 	irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
908 	pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
909 
910 	irq_status &= CARD_EXIST;
911 	card_inserted = pcr->card_inserted & irq_status;
912 	card_removed = pcr->card_removed;
913 	pcr->card_inserted = 0;
914 	pcr->card_removed = 0;
915 
916 	spin_unlock_irqrestore(&pcr->lock, flags);
917 
918 	if (card_inserted || card_removed) {
919 		pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
920 			card_inserted, card_removed);
921 
922 		if (pcr->ops->cd_deglitch)
923 			card_inserted = pcr->ops->cd_deglitch(pcr);
924 
925 		card_detect = card_inserted | card_removed;
926 
927 		pcr->card_exist |= card_inserted;
928 		pcr->card_exist &= ~card_removed;
929 	}
930 
931 	mutex_unlock(&pcr->pcr_mutex);
932 
933 	if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
934 		pcr->slots[RTSX_SD_CARD].card_event(
935 				pcr->slots[RTSX_SD_CARD].p_dev);
936 	if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
937 		pcr->slots[RTSX_MS_CARD].card_event(
938 				pcr->slots[RTSX_MS_CARD].p_dev);
939 }
940 
rtsx_pci_process_ocp(struct rtsx_pcr * pcr)941 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
942 {
943 	if (pcr->ops->process_ocp) {
944 		pcr->ops->process_ocp(pcr);
945 	} else {
946 		if (!pcr->option.ocp_en)
947 			return;
948 		rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
949 		if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
950 			rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
951 			rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
952 			rtsx_pci_clear_ocpstat(pcr);
953 			pcr->ocp_stat = 0;
954 		}
955 	}
956 }
957 
rtsx_pci_process_ocp_interrupt(struct rtsx_pcr * pcr)958 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
959 {
960 	if (pcr->option.ocp_en)
961 		rtsx_pci_process_ocp(pcr);
962 
963 	return 0;
964 }
965 
rtsx_pci_isr(int irq,void * dev_id)966 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
967 {
968 	struct rtsx_pcr *pcr = dev_id;
969 	u32 int_reg;
970 
971 	if (!pcr)
972 		return IRQ_NONE;
973 
974 	spin_lock(&pcr->lock);
975 
976 	int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
977 	/* Clear interrupt flag */
978 	rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
979 	if ((int_reg & pcr->bier) == 0) {
980 		spin_unlock(&pcr->lock);
981 		return IRQ_NONE;
982 	}
983 	if (int_reg == 0xFFFFFFFF) {
984 		spin_unlock(&pcr->lock);
985 		return IRQ_HANDLED;
986 	}
987 
988 	int_reg &= (pcr->bier | 0x7FFFFF);
989 
990 	if (int_reg & SD_OC_INT)
991 		rtsx_pci_process_ocp_interrupt(pcr);
992 
993 	if (int_reg & SD_INT) {
994 		if (int_reg & SD_EXIST) {
995 			pcr->card_inserted |= SD_EXIST;
996 		} else {
997 			pcr->card_removed |= SD_EXIST;
998 			pcr->card_inserted &= ~SD_EXIST;
999 			if (PCI_PID(pcr) == PID_5261) {
1000 				rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
1001 					RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
1002 				pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
1003 			}
1004 		}
1005 		pcr->dma_error_count = 0;
1006 	}
1007 
1008 	if (int_reg & MS_INT) {
1009 		if (int_reg & MS_EXIST) {
1010 			pcr->card_inserted |= MS_EXIST;
1011 		} else {
1012 			pcr->card_removed |= MS_EXIST;
1013 			pcr->card_inserted &= ~MS_EXIST;
1014 		}
1015 	}
1016 
1017 	if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1018 		if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1019 			pcr->trans_result = TRANS_RESULT_FAIL;
1020 			if (pcr->done)
1021 				complete(pcr->done);
1022 		} else if (int_reg & TRANS_OK_INT) {
1023 			pcr->trans_result = TRANS_RESULT_OK;
1024 			if (pcr->done)
1025 				complete(pcr->done);
1026 		}
1027 	}
1028 
1029 	if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
1030 		schedule_delayed_work(&pcr->carddet_work,
1031 				msecs_to_jiffies(200));
1032 
1033 	spin_unlock(&pcr->lock);
1034 	return IRQ_HANDLED;
1035 }
1036 
rtsx_pci_acquire_irq(struct rtsx_pcr * pcr)1037 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1038 {
1039 	pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1040 			__func__, pcr->msi_en, pcr->pci->irq);
1041 
1042 	if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1043 			pcr->msi_en ? 0 : IRQF_SHARED,
1044 			DRV_NAME_RTSX_PCI, pcr)) {
1045 		dev_err(&(pcr->pci->dev),
1046 			"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1047 			pcr->pci->irq);
1048 		return -1;
1049 	}
1050 
1051 	pcr->irq = pcr->pci->irq;
1052 	pci_intx(pcr->pci, !pcr->msi_en);
1053 
1054 	return 0;
1055 }
1056 
rtsx_base_force_power_down(struct rtsx_pcr * pcr)1057 static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
1058 {
1059 	/* Set relink_time to 0 */
1060 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
1061 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
1062 	rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
1063 			RELINK_TIME_MASK, 0);
1064 
1065 	rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
1066 			D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
1067 
1068 	rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
1069 }
1070 
rtsx_pci_power_off(struct rtsx_pcr * pcr,u8 pm_state,bool runtime)1071 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
1072 {
1073 	if (pcr->ops->turn_off_led)
1074 		pcr->ops->turn_off_led(pcr);
1075 
1076 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1077 	pcr->bier = 0;
1078 
1079 	rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1080 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1081 
1082 	if (pcr->ops->force_power_down)
1083 		pcr->ops->force_power_down(pcr, pm_state, runtime);
1084 	else
1085 		rtsx_base_force_power_down(pcr);
1086 }
1087 
rtsx_pci_enable_ocp(struct rtsx_pcr * pcr)1088 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1089 {
1090 	u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1091 
1092 	if (pcr->ops->enable_ocp) {
1093 		pcr->ops->enable_ocp(pcr);
1094 	} else {
1095 		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1096 		rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1097 	}
1098 
1099 }
1100 
rtsx_pci_disable_ocp(struct rtsx_pcr * pcr)1101 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1102 {
1103 	u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1104 
1105 	if (pcr->ops->disable_ocp) {
1106 		pcr->ops->disable_ocp(pcr);
1107 	} else {
1108 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1109 		rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1110 				OC_POWER_DOWN);
1111 	}
1112 }
1113 
rtsx_pci_init_ocp(struct rtsx_pcr * pcr)1114 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1115 {
1116 	if (pcr->ops->init_ocp) {
1117 		pcr->ops->init_ocp(pcr);
1118 	} else {
1119 		struct rtsx_cr_option *option = &(pcr->option);
1120 
1121 		if (option->ocp_en) {
1122 			u8 val = option->sd_800mA_ocp_thd;
1123 
1124 			rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1125 			rtsx_pci_write_register(pcr, REG_OCPPARA1,
1126 				SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1127 			rtsx_pci_write_register(pcr, REG_OCPPARA2,
1128 				SD_OCP_THD_MASK, val);
1129 			rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1130 				SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1131 			rtsx_pci_enable_ocp(pcr);
1132 		}
1133 	}
1134 }
1135 
rtsx_pci_get_ocpstat(struct rtsx_pcr * pcr,u8 * val)1136 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1137 {
1138 	if (pcr->ops->get_ocpstat)
1139 		return pcr->ops->get_ocpstat(pcr, val);
1140 	else
1141 		return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1142 }
1143 
rtsx_pci_clear_ocpstat(struct rtsx_pcr * pcr)1144 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1145 {
1146 	if (pcr->ops->clear_ocpstat) {
1147 		pcr->ops->clear_ocpstat(pcr);
1148 	} else {
1149 		u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1150 		u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1151 
1152 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1153 		udelay(100);
1154 		rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1155 	}
1156 }
1157 
rtsx_pci_enable_oobs_polling(struct rtsx_pcr * pcr)1158 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
1159 {
1160 	u16 val;
1161 
1162 	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1163 		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1164 		val |= 1<<9;
1165 		rtsx_pci_write_phy_register(pcr, 0x01, val);
1166 	}
1167 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
1168 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
1169 	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
1170 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
1171 
1172 }
1173 
rtsx_pci_disable_oobs_polling(struct rtsx_pcr * pcr)1174 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
1175 {
1176 	u16 val;
1177 
1178 	if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
1179 		rtsx_pci_read_phy_register(pcr, 0x01, &val);
1180 		val &= ~(1<<9);
1181 		rtsx_pci_write_phy_register(pcr, 0x01, val);
1182 	}
1183 	rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
1184 	rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
1185 
1186 }
1187 
rtsx_sd_power_off_card3v3(struct rtsx_pcr * pcr)1188 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1189 {
1190 	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1191 		MS_CLK_EN | SD40_CLK_EN, 0);
1192 	rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1193 	rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1194 
1195 	msleep(50);
1196 
1197 	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1198 
1199 	return 0;
1200 }
1201 
rtsx_ms_power_off_card3v3(struct rtsx_pcr * pcr)1202 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1203 {
1204 	rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1205 		MS_CLK_EN | SD40_CLK_EN, 0);
1206 
1207 	rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1208 
1209 	rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1210 	rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1211 
1212 	return 0;
1213 }
1214 
rtsx_pci_init_hw(struct rtsx_pcr * pcr)1215 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1216 {
1217 	struct pci_dev *pdev = pcr->pci;
1218 	int err;
1219 
1220 	if (PCI_PID(pcr) == PID_5228)
1221 		rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
1222 				RTS5228_LDO1_SR_0_5);
1223 
1224 	rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1225 
1226 	rtsx_pci_enable_bus_int(pcr);
1227 
1228 	/* Power on SSC */
1229 	if (PCI_PID(pcr) == PID_5261) {
1230 		/* Gating real mcu clock */
1231 		err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
1232 			RTS5261_MCU_CLOCK_GATING, 0);
1233 		err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
1234 			SSC_POWER_DOWN, 0);
1235 	} else {
1236 		err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1237 	}
1238 	if (err < 0)
1239 		return err;
1240 
1241 	/* Wait SSC power stable */
1242 	udelay(200);
1243 
1244 	rtsx_disable_aspm(pcr);
1245 	if (pcr->ops->optimize_phy) {
1246 		err = pcr->ops->optimize_phy(pcr);
1247 		if (err < 0)
1248 			return err;
1249 	}
1250 
1251 	rtsx_pci_init_cmd(pcr);
1252 
1253 	/* Set mcu_cnt to 7 to ensure data can be sampled properly */
1254 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1255 
1256 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1257 	/* Disable card clock */
1258 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1259 	/* Reset delink mode */
1260 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1261 	/* Card driving select */
1262 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1263 			0xFF, pcr->card_drive_sel);
1264 	/* Enable SSC Clock */
1265 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1266 			0xFF, SSC_8X_EN | SSC_SEL_4M);
1267 	if (PCI_PID(pcr) == PID_5261)
1268 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1269 			RTS5261_SSC_DEPTH_2M);
1270 	else if (PCI_PID(pcr) == PID_5228)
1271 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
1272 			RTS5228_SSC_DEPTH_2M);
1273 	else
1274 		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1275 
1276 	/* Disable cd_pwr_save */
1277 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1278 	/* Clear Link Ready Interrupt */
1279 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1280 			LINK_RDY_INT, LINK_RDY_INT);
1281 	/* Enlarge the estimation window of PERST# glitch
1282 	 * to reduce the chance of invalid card interrupt
1283 	 */
1284 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1285 	/* Update RC oscillator to 400k
1286 	 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1287 	 *                1: 2M  0: 400k
1288 	 */
1289 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1290 	/* Set interrupt write clear
1291 	 * bit 1: U_elbi_if_rd_clr_en
1292 	 *	1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1293 	 *	0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1294 	 */
1295 	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1296 
1297 	err = rtsx_pci_send_cmd(pcr, 100);
1298 	if (err < 0)
1299 		return err;
1300 
1301 	switch (PCI_PID(pcr)) {
1302 	case PID_5250:
1303 	case PID_524A:
1304 	case PID_525A:
1305 	case PID_5260:
1306 	case PID_5261:
1307 	case PID_5228:
1308 		rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1309 		break;
1310 	default:
1311 		break;
1312 	}
1313 
1314 	/*init ocp*/
1315 	rtsx_pci_init_ocp(pcr);
1316 
1317 	/* Enable clk_request_n to enable clock power management */
1318 	pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
1319 					0, PCI_EXP_LNKCTL_CLKREQ_EN);
1320 	/* Enter L1 when host tx idle */
1321 	pci_write_config_byte(pdev, 0x70F, 0x5B);
1322 
1323 	if (pcr->ops->extra_init_hw) {
1324 		err = pcr->ops->extra_init_hw(pcr);
1325 		if (err < 0)
1326 			return err;
1327 	}
1328 
1329 	if (pcr->aspm_mode == ASPM_MODE_REG)
1330 		rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
1331 
1332 	/* No CD interrupt if probing driver with card inserted.
1333 	 * So we need to initialize pcr->card_exist here.
1334 	 */
1335 	if (pcr->ops->cd_deglitch)
1336 		pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1337 	else
1338 		pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1339 
1340 	return 0;
1341 }
1342 
rtsx_pci_init_chip(struct rtsx_pcr * pcr)1343 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1344 {
1345 	struct rtsx_cr_option *option = &(pcr->option);
1346 	int err, l1ss;
1347 	u32 lval;
1348 	u16 cfg_val;
1349 	u8 val;
1350 
1351 	spin_lock_init(&pcr->lock);
1352 	mutex_init(&pcr->pcr_mutex);
1353 
1354 	switch (PCI_PID(pcr)) {
1355 	default:
1356 	case 0x5209:
1357 		rts5209_init_params(pcr);
1358 		break;
1359 
1360 	case 0x5229:
1361 		rts5229_init_params(pcr);
1362 		break;
1363 
1364 	case 0x5289:
1365 		rtl8411_init_params(pcr);
1366 		break;
1367 
1368 	case 0x5227:
1369 		rts5227_init_params(pcr);
1370 		break;
1371 
1372 	case 0x522A:
1373 		rts522a_init_params(pcr);
1374 		break;
1375 
1376 	case 0x5249:
1377 		rts5249_init_params(pcr);
1378 		break;
1379 
1380 	case 0x524A:
1381 		rts524a_init_params(pcr);
1382 		break;
1383 
1384 	case 0x525A:
1385 		rts525a_init_params(pcr);
1386 		break;
1387 
1388 	case 0x5287:
1389 		rtl8411b_init_params(pcr);
1390 		break;
1391 
1392 	case 0x5286:
1393 		rtl8402_init_params(pcr);
1394 		break;
1395 
1396 	case 0x5260:
1397 		rts5260_init_params(pcr);
1398 		break;
1399 
1400 	case 0x5261:
1401 		rts5261_init_params(pcr);
1402 		break;
1403 
1404 	case 0x5228:
1405 		rts5228_init_params(pcr);
1406 		break;
1407 	}
1408 
1409 	pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1410 			PCI_PID(pcr), pcr->ic_version);
1411 
1412 	pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1413 			GFP_KERNEL);
1414 	if (!pcr->slots)
1415 		return -ENOMEM;
1416 
1417 	if (pcr->aspm_mode == ASPM_MODE_CFG) {
1418 		pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
1419 		if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
1420 			pcr->aspm_enabled = true;
1421 		else
1422 			pcr->aspm_enabled = false;
1423 
1424 	} else if (pcr->aspm_mode == ASPM_MODE_REG) {
1425 		rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
1426 		if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
1427 			pcr->aspm_enabled = false;
1428 		else
1429 			pcr->aspm_enabled = true;
1430 	}
1431 
1432 	l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
1433 	if (l1ss) {
1434 		pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
1435 
1436 		if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
1437 			rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
1438 		else
1439 			rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
1440 
1441 		if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
1442 			rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
1443 		else
1444 			rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
1445 
1446 		if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
1447 			rtsx_set_dev_flag(pcr, PM_L1_1_EN);
1448 		else
1449 			rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
1450 
1451 		if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
1452 			rtsx_set_dev_flag(pcr, PM_L1_2_EN);
1453 		else
1454 			rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
1455 
1456 		pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
1457 		if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
1458 			option->ltr_enabled = true;
1459 			option->ltr_active = true;
1460 		} else {
1461 			option->ltr_enabled = false;
1462 		}
1463 
1464 		if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
1465 				| PM_L1_1_EN | PM_L1_2_EN))
1466 			option->force_clkreq_0 = false;
1467 		else
1468 			option->force_clkreq_0 = true;
1469 	} else {
1470 		option->ltr_enabled = false;
1471 		option->force_clkreq_0 = true;
1472 	}
1473 
1474 	if (pcr->ops->fetch_vendor_settings)
1475 		pcr->ops->fetch_vendor_settings(pcr);
1476 
1477 	pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1478 	pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1479 			pcr->sd30_drive_sel_1v8);
1480 	pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1481 			pcr->sd30_drive_sel_3v3);
1482 	pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1483 			pcr->card_drive_sel);
1484 	pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1485 
1486 	pcr->state = PDEV_STAT_IDLE;
1487 	err = rtsx_pci_init_hw(pcr);
1488 	if (err < 0) {
1489 		kfree(pcr->slots);
1490 		return err;
1491 	}
1492 
1493 	return 0;
1494 }
1495 
rtsx_pci_probe(struct pci_dev * pcidev,const struct pci_device_id * id)1496 static int rtsx_pci_probe(struct pci_dev *pcidev,
1497 			  const struct pci_device_id *id)
1498 {
1499 	struct rtsx_pcr *pcr;
1500 	struct pcr_handle *handle;
1501 	u32 base, len;
1502 	int ret, i, bar = 0;
1503 
1504 	dev_dbg(&(pcidev->dev),
1505 		": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1506 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1507 		(int)pcidev->revision);
1508 
1509 	ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
1510 	if (ret < 0)
1511 		return ret;
1512 
1513 	ret = pci_enable_device(pcidev);
1514 	if (ret)
1515 		return ret;
1516 
1517 	ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1518 	if (ret)
1519 		goto disable;
1520 
1521 	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1522 	if (!pcr) {
1523 		ret = -ENOMEM;
1524 		goto release_pci;
1525 	}
1526 
1527 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1528 	if (!handle) {
1529 		ret = -ENOMEM;
1530 		goto free_pcr;
1531 	}
1532 	handle->pcr = pcr;
1533 
1534 	idr_preload(GFP_KERNEL);
1535 	spin_lock(&rtsx_pci_lock);
1536 	ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1537 	if (ret >= 0)
1538 		pcr->id = ret;
1539 	spin_unlock(&rtsx_pci_lock);
1540 	idr_preload_end();
1541 	if (ret < 0)
1542 		goto free_handle;
1543 
1544 	pcr->pci = pcidev;
1545 	dev_set_drvdata(&pcidev->dev, handle);
1546 
1547 	if (CHK_PCI_PID(pcr, 0x525A))
1548 		bar = 1;
1549 	len = pci_resource_len(pcidev, bar);
1550 	base = pci_resource_start(pcidev, bar);
1551 	pcr->remap_addr = ioremap(base, len);
1552 	if (!pcr->remap_addr) {
1553 		ret = -ENOMEM;
1554 		goto free_idr;
1555 	}
1556 
1557 	pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1558 			RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1559 			GFP_KERNEL);
1560 	if (pcr->rtsx_resv_buf == NULL) {
1561 		ret = -ENXIO;
1562 		goto unmap;
1563 	}
1564 	pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1565 	pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1566 	pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1567 	pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1568 	pcr->card_inserted = 0;
1569 	pcr->card_removed = 0;
1570 	INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1571 
1572 	pcr->msi_en = msi_en;
1573 	if (pcr->msi_en) {
1574 		ret = pci_enable_msi(pcidev);
1575 		if (ret)
1576 			pcr->msi_en = false;
1577 	}
1578 
1579 	ret = rtsx_pci_acquire_irq(pcr);
1580 	if (ret < 0)
1581 		goto disable_msi;
1582 
1583 	pci_set_master(pcidev);
1584 	synchronize_irq(pcr->irq);
1585 
1586 	ret = rtsx_pci_init_chip(pcr);
1587 	if (ret < 0)
1588 		goto disable_irq;
1589 
1590 	for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1591 		rtsx_pcr_cells[i].platform_data = handle;
1592 		rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1593 	}
1594 
1595 
1596 	ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1597 			ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1598 	if (ret < 0)
1599 		goto free_slots;
1600 
1601 	pm_runtime_allow(&pcidev->dev);
1602 	pm_runtime_put(&pcidev->dev);
1603 
1604 	return 0;
1605 
1606 free_slots:
1607 	kfree(pcr->slots);
1608 disable_irq:
1609 	free_irq(pcr->irq, (void *)pcr);
1610 disable_msi:
1611 	if (pcr->msi_en)
1612 		pci_disable_msi(pcr->pci);
1613 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1614 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1615 unmap:
1616 	iounmap(pcr->remap_addr);
1617 free_idr:
1618 	spin_lock(&rtsx_pci_lock);
1619 	idr_remove(&rtsx_pci_idr, pcr->id);
1620 	spin_unlock(&rtsx_pci_lock);
1621 free_handle:
1622 	kfree(handle);
1623 free_pcr:
1624 	kfree(pcr);
1625 release_pci:
1626 	pci_release_regions(pcidev);
1627 disable:
1628 	pci_disable_device(pcidev);
1629 
1630 	return ret;
1631 }
1632 
rtsx_pci_remove(struct pci_dev * pcidev)1633 static void rtsx_pci_remove(struct pci_dev *pcidev)
1634 {
1635 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1636 	struct rtsx_pcr *pcr = handle->pcr;
1637 
1638 	pcr->remove_pci = true;
1639 
1640 	pm_runtime_get_sync(&pcidev->dev);
1641 	pm_runtime_forbid(&pcidev->dev);
1642 
1643 	/* Disable interrupts at the pcr level */
1644 	spin_lock_irq(&pcr->lock);
1645 	rtsx_pci_writel(pcr, RTSX_BIER, 0);
1646 	pcr->bier = 0;
1647 	spin_unlock_irq(&pcr->lock);
1648 
1649 	cancel_delayed_work_sync(&pcr->carddet_work);
1650 
1651 	mfd_remove_devices(&pcidev->dev);
1652 
1653 	dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1654 			pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1655 	free_irq(pcr->irq, (void *)pcr);
1656 	if (pcr->msi_en)
1657 		pci_disable_msi(pcr->pci);
1658 	iounmap(pcr->remap_addr);
1659 
1660 	pci_release_regions(pcidev);
1661 	pci_disable_device(pcidev);
1662 
1663 	spin_lock(&rtsx_pci_lock);
1664 	idr_remove(&rtsx_pci_idr, pcr->id);
1665 	spin_unlock(&rtsx_pci_lock);
1666 
1667 	kfree(pcr->slots);
1668 	kfree(pcr);
1669 	kfree(handle);
1670 
1671 	dev_dbg(&(pcidev->dev),
1672 		": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1673 		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1674 }
1675 
rtsx_pci_suspend(struct device * dev_d)1676 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
1677 {
1678 	struct pci_dev *pcidev = to_pci_dev(dev_d);
1679 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1680 	struct rtsx_pcr *pcr = handle->pcr;
1681 
1682 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1683 
1684 	cancel_delayed_work_sync(&pcr->carddet_work);
1685 
1686 	mutex_lock(&pcr->pcr_mutex);
1687 
1688 	rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
1689 
1690 	mutex_unlock(&pcr->pcr_mutex);
1691 	return 0;
1692 }
1693 
rtsx_pci_resume(struct device * dev_d)1694 static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
1695 {
1696 	struct pci_dev *pcidev = to_pci_dev(dev_d);
1697 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1698 	struct rtsx_pcr *pcr = handle->pcr;
1699 	int ret = 0;
1700 
1701 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1702 
1703 	mutex_lock(&pcr->pcr_mutex);
1704 
1705 	ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1706 	if (ret)
1707 		goto out;
1708 
1709 	ret = rtsx_pci_init_hw(pcr);
1710 	if (ret)
1711 		goto out;
1712 
1713 out:
1714 	mutex_unlock(&pcr->pcr_mutex);
1715 	return ret;
1716 }
1717 
1718 #ifdef CONFIG_PM
1719 
rtsx_enable_aspm(struct rtsx_pcr * pcr)1720 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1721 {
1722 	if (pcr->ops->set_aspm)
1723 		pcr->ops->set_aspm(pcr, true);
1724 	else
1725 		rtsx_comm_set_aspm(pcr, true);
1726 }
1727 
rtsx_comm_pm_power_saving(struct rtsx_pcr * pcr)1728 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1729 {
1730 	struct rtsx_cr_option *option = &pcr->option;
1731 
1732 	if (option->ltr_enabled) {
1733 		u32 latency = option->ltr_l1off_latency;
1734 
1735 		if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1736 			mdelay(option->l1_snooze_delay);
1737 
1738 		rtsx_set_ltr_latency(pcr, latency);
1739 	}
1740 
1741 	if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1742 		rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1743 
1744 	rtsx_enable_aspm(pcr);
1745 }
1746 
rtsx_pm_power_saving(struct rtsx_pcr * pcr)1747 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1748 {
1749 	rtsx_comm_pm_power_saving(pcr);
1750 }
1751 
rtsx_pci_shutdown(struct pci_dev * pcidev)1752 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1753 {
1754 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1755 	struct rtsx_pcr *pcr = handle->pcr;
1756 
1757 	dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1758 
1759 	rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
1760 
1761 	pci_disable_device(pcidev);
1762 	free_irq(pcr->irq, (void *)pcr);
1763 	if (pcr->msi_en)
1764 		pci_disable_msi(pcr->pci);
1765 }
1766 
rtsx_pci_runtime_idle(struct device * device)1767 static int rtsx_pci_runtime_idle(struct device *device)
1768 {
1769 	struct pci_dev *pcidev = to_pci_dev(device);
1770 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1771 	struct rtsx_pcr *pcr = handle->pcr;
1772 
1773 	dev_dbg(device, "--> %s\n", __func__);
1774 
1775 	mutex_lock(&pcr->pcr_mutex);
1776 
1777 	pcr->state = PDEV_STAT_IDLE;
1778 
1779 	if (pcr->ops->disable_auto_blink)
1780 		pcr->ops->disable_auto_blink(pcr);
1781 	if (pcr->ops->turn_off_led)
1782 		pcr->ops->turn_off_led(pcr);
1783 
1784 	rtsx_pm_power_saving(pcr);
1785 
1786 	mutex_unlock(&pcr->pcr_mutex);
1787 
1788 	if (pcr->rtd3_en)
1789 		pm_schedule_suspend(device, 10000);
1790 
1791 	return -EBUSY;
1792 }
1793 
rtsx_pci_runtime_suspend(struct device * device)1794 static int rtsx_pci_runtime_suspend(struct device *device)
1795 {
1796 	struct pci_dev *pcidev = to_pci_dev(device);
1797 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1798 	struct rtsx_pcr *pcr = handle->pcr;
1799 
1800 	dev_dbg(device, "--> %s\n", __func__);
1801 
1802 	cancel_delayed_work_sync(&pcr->carddet_work);
1803 
1804 	mutex_lock(&pcr->pcr_mutex);
1805 	rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
1806 
1807 	mutex_unlock(&pcr->pcr_mutex);
1808 
1809 	return 0;
1810 }
1811 
rtsx_pci_runtime_resume(struct device * device)1812 static int rtsx_pci_runtime_resume(struct device *device)
1813 {
1814 	struct pci_dev *pcidev = to_pci_dev(device);
1815 	struct pcr_handle *handle = pci_get_drvdata(pcidev);
1816 	struct rtsx_pcr *pcr = handle->pcr;
1817 
1818 	dev_dbg(device, "--> %s\n", __func__);
1819 
1820 	mutex_lock(&pcr->pcr_mutex);
1821 
1822 	rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1823 
1824 	rtsx_pci_init_hw(pcr);
1825 
1826 	if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
1827 		pcr->slots[RTSX_SD_CARD].card_event(
1828 				pcr->slots[RTSX_SD_CARD].p_dev);
1829 	}
1830 
1831 	mutex_unlock(&pcr->pcr_mutex);
1832 	return 0;
1833 }
1834 
1835 #else /* CONFIG_PM */
1836 
1837 #define rtsx_pci_shutdown NULL
1838 #define rtsx_pci_runtime_suspend NULL
1839 #define rtsx_pic_runtime_resume NULL
1840 
1841 #endif /* CONFIG_PM */
1842 
1843 static const struct dev_pm_ops rtsx_pci_pm_ops = {
1844 	SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
1845 	SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
1846 };
1847 
1848 static struct pci_driver rtsx_pci_driver = {
1849 	.name = DRV_NAME_RTSX_PCI,
1850 	.id_table = rtsx_pci_ids,
1851 	.probe = rtsx_pci_probe,
1852 	.remove = rtsx_pci_remove,
1853 	.driver.pm = &rtsx_pci_pm_ops,
1854 	.shutdown = rtsx_pci_shutdown,
1855 };
1856 module_pci_driver(rtsx_pci_driver);
1857 
1858 MODULE_LICENSE("GPL");
1859 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1860 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
1861