• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * Thanks to the following companies for their support:
7  *
8  *     - JMicron (hardware and technical support)
9  */
10 
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/gpio.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm_qos.h>
26 #include <linux/debugfs.h>
27 #include <linux/acpi.h>
28 #include <linux/dmi.h>
29 
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/slot-gpio.h>
33 
34 #ifdef CONFIG_X86
35 #include <asm/iosf_mbi.h>
36 #endif
37 
38 #include "cqhci.h"
39 
40 #include "sdhci.h"
41 #include "sdhci-pci.h"
42 
43 static void sdhci_pci_hw_reset(struct sdhci_host *host);
44 
45 #ifdef CONFIG_PM_SLEEP
sdhci_pci_init_wakeup(struct sdhci_pci_chip * chip)46 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
47 {
48 	mmc_pm_flag_t pm_flags = 0;
49 	bool cap_cd_wake = false;
50 	int i;
51 
52 	for (i = 0; i < chip->num_slots; i++) {
53 		struct sdhci_pci_slot *slot = chip->slots[i];
54 
55 		if (slot) {
56 			pm_flags |= slot->host->mmc->pm_flags;
57 			if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
58 				cap_cd_wake = true;
59 		}
60 	}
61 
62 	if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
63 		return device_wakeup_enable(&chip->pdev->dev);
64 	else if (!cap_cd_wake)
65 		return device_wakeup_disable(&chip->pdev->dev);
66 
67 	return 0;
68 }
69 
sdhci_pci_suspend_host(struct sdhci_pci_chip * chip)70 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
71 {
72 	int i, ret;
73 
74 	sdhci_pci_init_wakeup(chip);
75 
76 	for (i = 0; i < chip->num_slots; i++) {
77 		struct sdhci_pci_slot *slot = chip->slots[i];
78 		struct sdhci_host *host;
79 
80 		if (!slot)
81 			continue;
82 
83 		host = slot->host;
84 
85 		if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
86 			mmc_retune_needed(host->mmc);
87 
88 		ret = sdhci_suspend_host(host);
89 		if (ret)
90 			goto err_pci_suspend;
91 
92 		if (device_may_wakeup(&chip->pdev->dev))
93 			mmc_gpio_set_cd_wake(host->mmc, true);
94 	}
95 
96 	return 0;
97 
98 err_pci_suspend:
99 	while (--i >= 0)
100 		sdhci_resume_host(chip->slots[i]->host);
101 	return ret;
102 }
103 
sdhci_pci_resume_host(struct sdhci_pci_chip * chip)104 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
105 {
106 	struct sdhci_pci_slot *slot;
107 	int i, ret;
108 
109 	for (i = 0; i < chip->num_slots; i++) {
110 		slot = chip->slots[i];
111 		if (!slot)
112 			continue;
113 
114 		ret = sdhci_resume_host(slot->host);
115 		if (ret)
116 			return ret;
117 
118 		mmc_gpio_set_cd_wake(slot->host->mmc, false);
119 	}
120 
121 	return 0;
122 }
123 
sdhci_cqhci_suspend(struct sdhci_pci_chip * chip)124 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
125 {
126 	int ret;
127 
128 	ret = cqhci_suspend(chip->slots[0]->host->mmc);
129 	if (ret)
130 		return ret;
131 
132 	return sdhci_pci_suspend_host(chip);
133 }
134 
sdhci_cqhci_resume(struct sdhci_pci_chip * chip)135 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
136 {
137 	int ret;
138 
139 	ret = sdhci_pci_resume_host(chip);
140 	if (ret)
141 		return ret;
142 
143 	return cqhci_resume(chip->slots[0]->host->mmc);
144 }
145 #endif
146 
147 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip * chip)148 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
149 {
150 	struct sdhci_pci_slot *slot;
151 	struct sdhci_host *host;
152 	int i, ret;
153 
154 	for (i = 0; i < chip->num_slots; i++) {
155 		slot = chip->slots[i];
156 		if (!slot)
157 			continue;
158 
159 		host = slot->host;
160 
161 		ret = sdhci_runtime_suspend_host(host);
162 		if (ret)
163 			goto err_pci_runtime_suspend;
164 
165 		if (chip->rpm_retune &&
166 		    host->tuning_mode != SDHCI_TUNING_MODE_3)
167 			mmc_retune_needed(host->mmc);
168 	}
169 
170 	return 0;
171 
172 err_pci_runtime_suspend:
173 	while (--i >= 0)
174 		sdhci_runtime_resume_host(chip->slots[i]->host, 0);
175 	return ret;
176 }
177 
sdhci_pci_runtime_resume_host(struct sdhci_pci_chip * chip)178 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
179 {
180 	struct sdhci_pci_slot *slot;
181 	int i, ret;
182 
183 	for (i = 0; i < chip->num_slots; i++) {
184 		slot = chip->slots[i];
185 		if (!slot)
186 			continue;
187 
188 		ret = sdhci_runtime_resume_host(slot->host, 0);
189 		if (ret)
190 			return ret;
191 	}
192 
193 	return 0;
194 }
195 
sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip * chip)196 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
197 {
198 	int ret;
199 
200 	ret = cqhci_suspend(chip->slots[0]->host->mmc);
201 	if (ret)
202 		return ret;
203 
204 	return sdhci_pci_runtime_suspend_host(chip);
205 }
206 
sdhci_cqhci_runtime_resume(struct sdhci_pci_chip * chip)207 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
208 {
209 	int ret;
210 
211 	ret = sdhci_pci_runtime_resume_host(chip);
212 	if (ret)
213 		return ret;
214 
215 	return cqhci_resume(chip->slots[0]->host->mmc);
216 }
217 #endif
218 
sdhci_cqhci_irq(struct sdhci_host * host,u32 intmask)219 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
220 {
221 	int cmd_error = 0;
222 	int data_error = 0;
223 
224 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
225 		return intmask;
226 
227 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
228 
229 	return 0;
230 }
231 
sdhci_pci_dumpregs(struct mmc_host * mmc)232 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
233 {
234 	sdhci_dumpregs(mmc_priv(mmc));
235 }
236 
sdhci_cqhci_reset(struct sdhci_host * host,u8 mask)237 static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
238 {
239 	if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
240 	    host->mmc->cqe_private)
241 		cqhci_deactivate(host->mmc);
242 	sdhci_reset(host, mask);
243 }
244 
245 /*****************************************************************************\
246  *                                                                           *
247  * Hardware specific quirk handling                                          *
248  *                                                                           *
249 \*****************************************************************************/
250 
ricoh_probe(struct sdhci_pci_chip * chip)251 static int ricoh_probe(struct sdhci_pci_chip *chip)
252 {
253 	if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
254 	    chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
255 		chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
256 	return 0;
257 }
258 
ricoh_mmc_probe_slot(struct sdhci_pci_slot * slot)259 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
260 {
261 	slot->host->caps =
262 		FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
263 		FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
264 		SDHCI_TIMEOUT_CLK_UNIT |
265 		SDHCI_CAN_VDD_330 |
266 		SDHCI_CAN_DO_HISPD |
267 		SDHCI_CAN_DO_SDMA;
268 	return 0;
269 }
270 
271 #ifdef CONFIG_PM_SLEEP
ricoh_mmc_resume(struct sdhci_pci_chip * chip)272 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
273 {
274 	/* Apply a delay to allow controller to settle */
275 	/* Otherwise it becomes confused if card state changed
276 		during suspend */
277 	msleep(500);
278 	return sdhci_pci_resume_host(chip);
279 }
280 #endif
281 
282 static const struct sdhci_pci_fixes sdhci_ricoh = {
283 	.probe		= ricoh_probe,
284 	.quirks		= SDHCI_QUIRK_32BIT_DMA_ADDR |
285 			  SDHCI_QUIRK_FORCE_DMA |
286 			  SDHCI_QUIRK_CLOCK_BEFORE_RESET,
287 };
288 
289 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
290 	.probe_slot	= ricoh_mmc_probe_slot,
291 #ifdef CONFIG_PM_SLEEP
292 	.resume		= ricoh_mmc_resume,
293 #endif
294 	.quirks		= SDHCI_QUIRK_32BIT_DMA_ADDR |
295 			  SDHCI_QUIRK_CLOCK_BEFORE_RESET |
296 			  SDHCI_QUIRK_NO_CARD_NO_RESET |
297 			  SDHCI_QUIRK_MISSING_CAPS
298 };
299 
ene_714_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)300 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
301 {
302 	struct sdhci_host *host = mmc_priv(mmc);
303 
304 	sdhci_set_ios(mmc, ios);
305 
306 	/*
307 	 * Some (ENE) controllers misbehave on some ios operations,
308 	 * signalling timeout and CRC errors even on CMD0. Resetting
309 	 * it on each ios seems to solve the problem.
310 	 */
311 	if (!(host->flags & SDHCI_DEVICE_DEAD))
312 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
313 }
314 
ene_714_probe_slot(struct sdhci_pci_slot * slot)315 static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
316 {
317 	slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
318 	return 0;
319 }
320 
321 static const struct sdhci_pci_fixes sdhci_ene_712 = {
322 	.quirks		= SDHCI_QUIRK_SINGLE_POWER_WRITE |
323 			  SDHCI_QUIRK_BROKEN_DMA,
324 };
325 
326 static const struct sdhci_pci_fixes sdhci_ene_714 = {
327 	.quirks		= SDHCI_QUIRK_SINGLE_POWER_WRITE |
328 			  SDHCI_QUIRK_BROKEN_DMA,
329 	.probe_slot	= ene_714_probe_slot,
330 };
331 
332 static const struct sdhci_pci_fixes sdhci_cafe = {
333 	.quirks		= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
334 			  SDHCI_QUIRK_NO_BUSY_IRQ |
335 			  SDHCI_QUIRK_BROKEN_CARD_DETECTION |
336 			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
337 };
338 
339 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
340 	.quirks		= SDHCI_QUIRK_NO_HISPD_BIT,
341 };
342 
mrst_hc_probe_slot(struct sdhci_pci_slot * slot)343 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
344 {
345 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
346 	return 0;
347 }
348 
349 /*
350  * ADMA operation is disabled for Moorestown platform due to
351  * hardware bugs.
352  */
mrst_hc_probe(struct sdhci_pci_chip * chip)353 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
354 {
355 	/*
356 	 * slots number is fixed here for MRST as SDIO3/5 are never used and
357 	 * have hardware bugs.
358 	 */
359 	chip->num_slots = 1;
360 	return 0;
361 }
362 
pch_hc_probe_slot(struct sdhci_pci_slot * slot)363 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
364 {
365 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
366 	return 0;
367 }
368 
mfd_emmc_probe_slot(struct sdhci_pci_slot * slot)369 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
370 {
371 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
372 	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
373 	return 0;
374 }
375 
mfd_sdio_probe_slot(struct sdhci_pci_slot * slot)376 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
377 {
378 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
379 	return 0;
380 }
381 
382 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
383 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
384 	.probe_slot	= mrst_hc_probe_slot,
385 };
386 
387 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
388 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
389 	.probe		= mrst_hc_probe,
390 };
391 
392 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
393 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
394 	.allow_runtime_pm = true,
395 	.own_cd_for_runtime_pm = true,
396 };
397 
398 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
399 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
400 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON,
401 	.allow_runtime_pm = true,
402 	.probe_slot	= mfd_sdio_probe_slot,
403 };
404 
405 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
406 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
407 	.allow_runtime_pm = true,
408 	.probe_slot	= mfd_emmc_probe_slot,
409 };
410 
411 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
412 	.quirks		= SDHCI_QUIRK_BROKEN_ADMA,
413 	.probe_slot	= pch_hc_probe_slot,
414 };
415 
416 #ifdef CONFIG_X86
417 
418 #define BYT_IOSF_SCCEP			0x63
419 #define BYT_IOSF_OCP_NETCTRL0		0x1078
420 #define BYT_IOSF_OCP_TIMEOUT_BASE	GENMASK(10, 8)
421 
byt_ocp_setting(struct pci_dev * pdev)422 static void byt_ocp_setting(struct pci_dev *pdev)
423 {
424 	u32 val = 0;
425 
426 	if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
427 	    pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
428 	    pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
429 	    pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
430 		return;
431 
432 	if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
433 			  &val)) {
434 		dev_err(&pdev->dev, "%s read error\n", __func__);
435 		return;
436 	}
437 
438 	if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
439 		return;
440 
441 	val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
442 
443 	if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
444 			   val)) {
445 		dev_err(&pdev->dev, "%s write error\n", __func__);
446 		return;
447 	}
448 
449 	dev_dbg(&pdev->dev, "%s completed\n", __func__);
450 }
451 
452 #else
453 
byt_ocp_setting(struct pci_dev * pdev)454 static inline void byt_ocp_setting(struct pci_dev *pdev)
455 {
456 }
457 
458 #endif
459 
460 enum {
461 	INTEL_DSM_FNS		=  0,
462 	INTEL_DSM_V18_SWITCH	=  3,
463 	INTEL_DSM_V33_SWITCH	=  4,
464 	INTEL_DSM_DRV_STRENGTH	=  9,
465 	INTEL_DSM_D3_RETUNE	= 10,
466 };
467 
468 struct intel_host {
469 	u32	dsm_fns;
470 	int	drv_strength;
471 	bool	d3_retune;
472 	bool	rpm_retune_ok;
473 	bool	needs_pwr_off;
474 	u32	glk_rx_ctrl1;
475 	u32	glk_tun_val;
476 	u32	active_ltr;
477 	u32	idle_ltr;
478 };
479 
480 static const guid_t intel_dsm_guid =
481 	GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
482 		  0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
483 
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)484 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
485 		       unsigned int fn, u32 *result)
486 {
487 	union acpi_object *obj;
488 	int err = 0;
489 	size_t len;
490 
491 	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
492 	if (!obj)
493 		return -EOPNOTSUPP;
494 
495 	if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
496 		err = -EINVAL;
497 		goto out;
498 	}
499 
500 	len = min_t(size_t, obj->buffer.length, 4);
501 
502 	*result = 0;
503 	memcpy(result, obj->buffer.pointer, len);
504 out:
505 	ACPI_FREE(obj);
506 
507 	return err;
508 }
509 
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)510 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
511 		     unsigned int fn, u32 *result)
512 {
513 	if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
514 		return -EOPNOTSUPP;
515 
516 	return __intel_dsm(intel_host, dev, fn, result);
517 }
518 
intel_dsm_init(struct intel_host * intel_host,struct device * dev,struct mmc_host * mmc)519 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
520 			   struct mmc_host *mmc)
521 {
522 	int err;
523 	u32 val;
524 
525 	intel_host->d3_retune = true;
526 
527 	err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
528 	if (err) {
529 		pr_debug("%s: DSM not supported, error %d\n",
530 			 mmc_hostname(mmc), err);
531 		return;
532 	}
533 
534 	pr_debug("%s: DSM function mask %#x\n",
535 		 mmc_hostname(mmc), intel_host->dsm_fns);
536 
537 	err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
538 	intel_host->drv_strength = err ? 0 : val;
539 
540 	err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
541 	intel_host->d3_retune = err ? true : !!val;
542 }
543 
sdhci_pci_int_hw_reset(struct sdhci_host * host)544 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
545 {
546 	u8 reg;
547 
548 	reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
549 	reg |= 0x10;
550 	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
551 	/* For eMMC, minimum is 1us but give it 9us for good measure */
552 	udelay(9);
553 	reg &= ~0x10;
554 	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
555 	/* For eMMC, minimum is 200us but give it 300us for good measure */
556 	usleep_range(300, 1000);
557 }
558 
intel_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int host_drv,int card_drv,int * drv_type)559 static int intel_select_drive_strength(struct mmc_card *card,
560 				       unsigned int max_dtr, int host_drv,
561 				       int card_drv, int *drv_type)
562 {
563 	struct sdhci_host *host = mmc_priv(card->host);
564 	struct sdhci_pci_slot *slot = sdhci_priv(host);
565 	struct intel_host *intel_host = sdhci_pci_priv(slot);
566 
567 	if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
568 		return 0;
569 
570 	return intel_host->drv_strength;
571 }
572 
bxt_get_cd(struct mmc_host * mmc)573 static int bxt_get_cd(struct mmc_host *mmc)
574 {
575 	int gpio_cd = mmc_gpio_get_cd(mmc);
576 
577 	if (!gpio_cd)
578 		return 0;
579 
580 	return sdhci_get_cd_nogpio(mmc);
581 }
582 
mrfld_get_cd(struct mmc_host * mmc)583 static int mrfld_get_cd(struct mmc_host *mmc)
584 {
585 	return sdhci_get_cd_nogpio(mmc);
586 }
587 
588 #define SDHCI_INTEL_PWR_TIMEOUT_CNT	20
589 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY	100
590 
sdhci_intel_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)591 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
592 				  unsigned short vdd)
593 {
594 	struct sdhci_pci_slot *slot = sdhci_priv(host);
595 	struct intel_host *intel_host = sdhci_pci_priv(slot);
596 	int cntr;
597 	u8 reg;
598 
599 	/*
600 	 * Bus power may control card power, but a full reset still may not
601 	 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
602 	 * That might be needed to initialize correctly, if the card was left
603 	 * powered on previously.
604 	 */
605 	if (intel_host->needs_pwr_off) {
606 		intel_host->needs_pwr_off = false;
607 		if (mode != MMC_POWER_OFF) {
608 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
609 			usleep_range(10000, 12500);
610 		}
611 	}
612 
613 	sdhci_set_power(host, mode, vdd);
614 
615 	if (mode == MMC_POWER_OFF)
616 		return;
617 
618 	/*
619 	 * Bus power might not enable after D3 -> D0 transition due to the
620 	 * present state not yet having propagated. Retry for up to 2ms.
621 	 */
622 	for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
623 		reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
624 		if (reg & SDHCI_POWER_ON)
625 			break;
626 		udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
627 		reg |= SDHCI_POWER_ON;
628 		sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
629 	}
630 }
631 
sdhci_intel_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)632 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
633 					  unsigned int timing)
634 {
635 	/* Set UHS timing to SDR25 for High Speed mode */
636 	if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
637 		timing = MMC_TIMING_UHS_SDR25;
638 	sdhci_set_uhs_signaling(host, timing);
639 }
640 
641 #define INTEL_HS400_ES_REG 0x78
642 #define INTEL_HS400_ES_BIT BIT(0)
643 
intel_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)644 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
645 					struct mmc_ios *ios)
646 {
647 	struct sdhci_host *host = mmc_priv(mmc);
648 	u32 val;
649 
650 	val = sdhci_readl(host, INTEL_HS400_ES_REG);
651 	if (ios->enhanced_strobe)
652 		val |= INTEL_HS400_ES_BIT;
653 	else
654 		val &= ~INTEL_HS400_ES_BIT;
655 	sdhci_writel(host, val, INTEL_HS400_ES_REG);
656 }
657 
intel_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)658 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
659 					     struct mmc_ios *ios)
660 {
661 	struct device *dev = mmc_dev(mmc);
662 	struct sdhci_host *host = mmc_priv(mmc);
663 	struct sdhci_pci_slot *slot = sdhci_priv(host);
664 	struct intel_host *intel_host = sdhci_pci_priv(slot);
665 	unsigned int fn;
666 	u32 result = 0;
667 	int err;
668 
669 	err = sdhci_start_signal_voltage_switch(mmc, ios);
670 	if (err)
671 		return err;
672 
673 	switch (ios->signal_voltage) {
674 	case MMC_SIGNAL_VOLTAGE_330:
675 		fn = INTEL_DSM_V33_SWITCH;
676 		break;
677 	case MMC_SIGNAL_VOLTAGE_180:
678 		fn = INTEL_DSM_V18_SWITCH;
679 		break;
680 	default:
681 		return 0;
682 	}
683 
684 	err = intel_dsm(intel_host, dev, fn, &result);
685 	pr_debug("%s: %s DSM fn %u error %d result %u\n",
686 		 mmc_hostname(mmc), __func__, fn, err, result);
687 
688 	return 0;
689 }
690 
691 static const struct sdhci_ops sdhci_intel_byt_ops = {
692 	.set_clock		= sdhci_set_clock,
693 	.set_power		= sdhci_intel_set_power,
694 	.enable_dma		= sdhci_pci_enable_dma,
695 	.set_bus_width		= sdhci_set_bus_width,
696 	.reset			= sdhci_reset,
697 	.set_uhs_signaling	= sdhci_intel_set_uhs_signaling,
698 	.hw_reset		= sdhci_pci_hw_reset,
699 };
700 
701 static const struct sdhci_ops sdhci_intel_glk_ops = {
702 	.set_clock		= sdhci_set_clock,
703 	.set_power		= sdhci_intel_set_power,
704 	.enable_dma		= sdhci_pci_enable_dma,
705 	.set_bus_width		= sdhci_set_bus_width,
706 	.reset			= sdhci_cqhci_reset,
707 	.set_uhs_signaling	= sdhci_intel_set_uhs_signaling,
708 	.hw_reset		= sdhci_pci_hw_reset,
709 	.irq			= sdhci_cqhci_irq,
710 };
711 
byt_read_dsm(struct sdhci_pci_slot * slot)712 static void byt_read_dsm(struct sdhci_pci_slot *slot)
713 {
714 	struct intel_host *intel_host = sdhci_pci_priv(slot);
715 	struct device *dev = &slot->chip->pdev->dev;
716 	struct mmc_host *mmc = slot->host->mmc;
717 
718 	intel_dsm_init(intel_host, dev, mmc);
719 	slot->chip->rpm_retune = intel_host->d3_retune;
720 }
721 
intel_execute_tuning(struct mmc_host * mmc,u32 opcode)722 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
723 {
724 	int err = sdhci_execute_tuning(mmc, opcode);
725 	struct sdhci_host *host = mmc_priv(mmc);
726 
727 	if (err)
728 		return err;
729 
730 	/*
731 	 * Tuning can leave the IP in an active state (Buffer Read Enable bit
732 	 * set) which prevents the entry to low power states (i.e. S0i3). Data
733 	 * reset will clear it.
734 	 */
735 	sdhci_reset(host, SDHCI_RESET_DATA);
736 
737 	return 0;
738 }
739 
740 #define INTEL_ACTIVELTR		0x804
741 #define INTEL_IDLELTR		0x808
742 
743 #define INTEL_LTR_REQ		BIT(15)
744 #define INTEL_LTR_SCALE_MASK	GENMASK(11, 10)
745 #define INTEL_LTR_SCALE_1US	(2 << 10)
746 #define INTEL_LTR_SCALE_32US	(3 << 10)
747 #define INTEL_LTR_VALUE_MASK	GENMASK(9, 0)
748 
intel_cache_ltr(struct sdhci_pci_slot * slot)749 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
750 {
751 	struct intel_host *intel_host = sdhci_pci_priv(slot);
752 	struct sdhci_host *host = slot->host;
753 
754 	intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
755 	intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
756 }
757 
intel_ltr_set(struct device * dev,s32 val)758 static void intel_ltr_set(struct device *dev, s32 val)
759 {
760 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
761 	struct sdhci_pci_slot *slot = chip->slots[0];
762 	struct intel_host *intel_host = sdhci_pci_priv(slot);
763 	struct sdhci_host *host = slot->host;
764 	u32 ltr;
765 
766 	pm_runtime_get_sync(dev);
767 
768 	/*
769 	 * Program latency tolerance (LTR) accordingly what has been asked
770 	 * by the PM QoS layer or disable it in case we were passed
771 	 * negative value or PM_QOS_LATENCY_ANY.
772 	 */
773 	ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
774 
775 	if (val == PM_QOS_LATENCY_ANY || val < 0) {
776 		ltr &= ~INTEL_LTR_REQ;
777 	} else {
778 		ltr |= INTEL_LTR_REQ;
779 		ltr &= ~INTEL_LTR_SCALE_MASK;
780 		ltr &= ~INTEL_LTR_VALUE_MASK;
781 
782 		if (val > INTEL_LTR_VALUE_MASK) {
783 			val >>= 5;
784 			if (val > INTEL_LTR_VALUE_MASK)
785 				val = INTEL_LTR_VALUE_MASK;
786 			ltr |= INTEL_LTR_SCALE_32US | val;
787 		} else {
788 			ltr |= INTEL_LTR_SCALE_1US | val;
789 		}
790 	}
791 
792 	if (ltr == intel_host->active_ltr)
793 		goto out;
794 
795 	writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
796 	writel(ltr, host->ioaddr + INTEL_IDLELTR);
797 
798 	/* Cache the values into lpss structure */
799 	intel_cache_ltr(slot);
800 out:
801 	pm_runtime_put_autosuspend(dev);
802 }
803 
intel_use_ltr(struct sdhci_pci_chip * chip)804 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
805 {
806 	switch (chip->pdev->device) {
807 	case PCI_DEVICE_ID_INTEL_BYT_EMMC:
808 	case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
809 	case PCI_DEVICE_ID_INTEL_BYT_SDIO:
810 	case PCI_DEVICE_ID_INTEL_BYT_SD:
811 	case PCI_DEVICE_ID_INTEL_BSW_EMMC:
812 	case PCI_DEVICE_ID_INTEL_BSW_SDIO:
813 	case PCI_DEVICE_ID_INTEL_BSW_SD:
814 		return false;
815 	default:
816 		return true;
817 	}
818 }
819 
intel_ltr_expose(struct sdhci_pci_chip * chip)820 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
821 {
822 	struct device *dev = &chip->pdev->dev;
823 
824 	if (!intel_use_ltr(chip))
825 		return;
826 
827 	dev->power.set_latency_tolerance = intel_ltr_set;
828 	dev_pm_qos_expose_latency_tolerance(dev);
829 }
830 
intel_ltr_hide(struct sdhci_pci_chip * chip)831 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
832 {
833 	struct device *dev = &chip->pdev->dev;
834 
835 	if (!intel_use_ltr(chip))
836 		return;
837 
838 	dev_pm_qos_hide_latency_tolerance(dev);
839 	dev->power.set_latency_tolerance = NULL;
840 }
841 
byt_probe_slot(struct sdhci_pci_slot * slot)842 static void byt_probe_slot(struct sdhci_pci_slot *slot)
843 {
844 	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
845 	struct device *dev = &slot->chip->pdev->dev;
846 	struct mmc_host *mmc = slot->host->mmc;
847 
848 	byt_read_dsm(slot);
849 
850 	byt_ocp_setting(slot->chip->pdev);
851 
852 	ops->execute_tuning = intel_execute_tuning;
853 	ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
854 
855 	device_property_read_u32(dev, "max-frequency", &mmc->f_max);
856 
857 	if (!mmc->slotno) {
858 		slot->chip->slots[mmc->slotno] = slot;
859 		intel_ltr_expose(slot->chip);
860 	}
861 }
862 
byt_add_debugfs(struct sdhci_pci_slot * slot)863 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
864 {
865 	struct intel_host *intel_host = sdhci_pci_priv(slot);
866 	struct mmc_host *mmc = slot->host->mmc;
867 	struct dentry *dir = mmc->debugfs_root;
868 
869 	if (!intel_use_ltr(slot->chip))
870 		return;
871 
872 	debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
873 	debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
874 
875 	intel_cache_ltr(slot);
876 }
877 
byt_add_host(struct sdhci_pci_slot * slot)878 static int byt_add_host(struct sdhci_pci_slot *slot)
879 {
880 	int ret = sdhci_add_host(slot->host);
881 
882 	if (!ret)
883 		byt_add_debugfs(slot);
884 	return ret;
885 }
886 
byt_remove_slot(struct sdhci_pci_slot * slot,int dead)887 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
888 {
889 	struct mmc_host *mmc = slot->host->mmc;
890 
891 	if (!mmc->slotno)
892 		intel_ltr_hide(slot->chip);
893 }
894 
byt_emmc_probe_slot(struct sdhci_pci_slot * slot)895 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
896 {
897 	byt_probe_slot(slot);
898 	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
899 				 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
900 				 MMC_CAP_CMD_DURING_TFR |
901 				 MMC_CAP_WAIT_WHILE_BUSY;
902 	slot->hw_reset = sdhci_pci_int_hw_reset;
903 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
904 		slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
905 	slot->host->mmc_host_ops.select_drive_strength =
906 						intel_select_drive_strength;
907 	return 0;
908 }
909 
glk_broken_cqhci(struct sdhci_pci_slot * slot)910 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
911 {
912 	return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
913 	       (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
914 		dmi_match(DMI_SYS_VENDOR, "IRBIS"));
915 }
916 
jsl_broken_hs400es(struct sdhci_pci_slot * slot)917 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
918 {
919 	return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
920 			dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
921 }
922 
glk_emmc_probe_slot(struct sdhci_pci_slot * slot)923 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
924 {
925 	int ret = byt_emmc_probe_slot(slot);
926 
927 	if (!glk_broken_cqhci(slot))
928 		slot->host->mmc->caps2 |= MMC_CAP2_CQE;
929 
930 	if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
931 		if (!jsl_broken_hs400es(slot)) {
932 			slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
933 			slot->host->mmc_host_ops.hs400_enhanced_strobe =
934 							intel_hs400_enhanced_strobe;
935 		}
936 		slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
937 	}
938 
939 	return ret;
940 }
941 
942 static const struct cqhci_host_ops glk_cqhci_ops = {
943 	.enable		= sdhci_cqe_enable,
944 	.disable	= sdhci_cqe_disable,
945 	.dumpregs	= sdhci_pci_dumpregs,
946 };
947 
glk_emmc_add_host(struct sdhci_pci_slot * slot)948 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
949 {
950 	struct device *dev = &slot->chip->pdev->dev;
951 	struct sdhci_host *host = slot->host;
952 	struct cqhci_host *cq_host;
953 	bool dma64;
954 	int ret;
955 
956 	ret = sdhci_setup_host(host);
957 	if (ret)
958 		return ret;
959 
960 	cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
961 	if (!cq_host) {
962 		ret = -ENOMEM;
963 		goto cleanup;
964 	}
965 
966 	cq_host->mmio = host->ioaddr + 0x200;
967 	cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
968 	cq_host->ops = &glk_cqhci_ops;
969 
970 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
971 	if (dma64)
972 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
973 
974 	ret = cqhci_init(cq_host, host->mmc, dma64);
975 	if (ret)
976 		goto cleanup;
977 
978 	ret = __sdhci_add_host(host);
979 	if (ret)
980 		goto cleanup;
981 
982 	byt_add_debugfs(slot);
983 
984 	return 0;
985 
986 cleanup:
987 	sdhci_cleanup_host(host);
988 	return ret;
989 }
990 
991 #ifdef CONFIG_PM
992 #define GLK_RX_CTRL1	0x834
993 #define GLK_TUN_VAL	0x840
994 #define GLK_PATH_PLL	GENMASK(13, 8)
995 #define GLK_DLY		GENMASK(6, 0)
996 /* Workaround firmware failing to restore the tuning value */
glk_rpm_retune_wa(struct sdhci_pci_chip * chip,bool susp)997 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
998 {
999 	struct sdhci_pci_slot *slot = chip->slots[0];
1000 	struct intel_host *intel_host = sdhci_pci_priv(slot);
1001 	struct sdhci_host *host = slot->host;
1002 	u32 glk_rx_ctrl1;
1003 	u32 glk_tun_val;
1004 	u32 dly;
1005 
1006 	if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
1007 		return;
1008 
1009 	glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
1010 	glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
1011 
1012 	if (susp) {
1013 		intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
1014 		intel_host->glk_tun_val = glk_tun_val;
1015 		return;
1016 	}
1017 
1018 	if (!intel_host->glk_tun_val)
1019 		return;
1020 
1021 	if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
1022 		intel_host->rpm_retune_ok = true;
1023 		return;
1024 	}
1025 
1026 	dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
1027 				  (intel_host->glk_tun_val << 1));
1028 	if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1029 		return;
1030 
1031 	glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1032 	sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1033 
1034 	intel_host->rpm_retune_ok = true;
1035 	chip->rpm_retune = true;
1036 	mmc_retune_needed(host->mmc);
1037 	pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1038 }
1039 
glk_rpm_retune_chk(struct sdhci_pci_chip * chip,bool susp)1040 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1041 {
1042 	if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1043 	    !chip->rpm_retune)
1044 		glk_rpm_retune_wa(chip, susp);
1045 }
1046 
glk_runtime_suspend(struct sdhci_pci_chip * chip)1047 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1048 {
1049 	glk_rpm_retune_chk(chip, true);
1050 
1051 	return sdhci_cqhci_runtime_suspend(chip);
1052 }
1053 
glk_runtime_resume(struct sdhci_pci_chip * chip)1054 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1055 {
1056 	glk_rpm_retune_chk(chip, false);
1057 
1058 	return sdhci_cqhci_runtime_resume(chip);
1059 }
1060 #endif
1061 
1062 #ifdef CONFIG_ACPI
ni_set_max_freq(struct sdhci_pci_slot * slot)1063 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1064 {
1065 	acpi_status status;
1066 	unsigned long long max_freq;
1067 
1068 	status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1069 				       "MXFQ", NULL, &max_freq);
1070 	if (ACPI_FAILURE(status)) {
1071 		dev_err(&slot->chip->pdev->dev,
1072 			"MXFQ not found in acpi table\n");
1073 		return -EINVAL;
1074 	}
1075 
1076 	slot->host->mmc->f_max = max_freq * 1000000;
1077 
1078 	return 0;
1079 }
1080 #else
ni_set_max_freq(struct sdhci_pci_slot * slot)1081 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1082 {
1083 	return 0;
1084 }
1085 #endif
1086 
ni_byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1087 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1088 {
1089 	int err;
1090 
1091 	byt_probe_slot(slot);
1092 
1093 	err = ni_set_max_freq(slot);
1094 	if (err)
1095 		return err;
1096 
1097 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1098 				 MMC_CAP_WAIT_WHILE_BUSY;
1099 	return 0;
1100 }
1101 
byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1102 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1103 {
1104 	byt_probe_slot(slot);
1105 	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1106 				 MMC_CAP_WAIT_WHILE_BUSY;
1107 	return 0;
1108 }
1109 
byt_needs_pwr_off(struct sdhci_pci_slot * slot)1110 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1111 {
1112 	struct intel_host *intel_host = sdhci_pci_priv(slot);
1113 	u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1114 
1115 	intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
1116 }
1117 
byt_sd_probe_slot(struct sdhci_pci_slot * slot)1118 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1119 {
1120 	byt_probe_slot(slot);
1121 	slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1122 				 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1123 	slot->cd_idx = 0;
1124 	slot->cd_override_level = true;
1125 	if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1126 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1127 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1128 	    slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1129 		slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1130 
1131 	if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1132 	    slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1133 		slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1134 
1135 	byt_needs_pwr_off(slot);
1136 
1137 	return 0;
1138 }
1139 
1140 #ifdef CONFIG_PM_SLEEP
1141 
byt_resume(struct sdhci_pci_chip * chip)1142 static int byt_resume(struct sdhci_pci_chip *chip)
1143 {
1144 	byt_ocp_setting(chip->pdev);
1145 
1146 	return sdhci_pci_resume_host(chip);
1147 }
1148 
1149 #endif
1150 
1151 #ifdef CONFIG_PM
1152 
byt_runtime_resume(struct sdhci_pci_chip * chip)1153 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1154 {
1155 	byt_ocp_setting(chip->pdev);
1156 
1157 	return sdhci_pci_runtime_resume_host(chip);
1158 }
1159 
1160 #endif
1161 
1162 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1163 #ifdef CONFIG_PM_SLEEP
1164 	.resume		= byt_resume,
1165 #endif
1166 #ifdef CONFIG_PM
1167 	.runtime_resume	= byt_runtime_resume,
1168 #endif
1169 	.allow_runtime_pm = true,
1170 	.probe_slot	= byt_emmc_probe_slot,
1171 	.add_host	= byt_add_host,
1172 	.remove_slot	= byt_remove_slot,
1173 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1174 			  SDHCI_QUIRK_NO_LED,
1175 	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1176 			  SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1177 			  SDHCI_QUIRK2_STOP_WITH_TC,
1178 	.ops		= &sdhci_intel_byt_ops,
1179 	.priv_size	= sizeof(struct intel_host),
1180 };
1181 
1182 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1183 	.allow_runtime_pm	= true,
1184 	.probe_slot		= glk_emmc_probe_slot,
1185 	.add_host		= glk_emmc_add_host,
1186 	.remove_slot		= byt_remove_slot,
1187 #ifdef CONFIG_PM_SLEEP
1188 	.suspend		= sdhci_cqhci_suspend,
1189 	.resume			= sdhci_cqhci_resume,
1190 #endif
1191 #ifdef CONFIG_PM
1192 	.runtime_suspend	= glk_runtime_suspend,
1193 	.runtime_resume		= glk_runtime_resume,
1194 #endif
1195 	.quirks			= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1196 				  SDHCI_QUIRK_NO_LED,
1197 	.quirks2		= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1198 				  SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1199 				  SDHCI_QUIRK2_STOP_WITH_TC,
1200 	.ops			= &sdhci_intel_glk_ops,
1201 	.priv_size		= sizeof(struct intel_host),
1202 };
1203 
1204 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1205 #ifdef CONFIG_PM_SLEEP
1206 	.resume		= byt_resume,
1207 #endif
1208 #ifdef CONFIG_PM
1209 	.runtime_resume	= byt_runtime_resume,
1210 #endif
1211 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1212 			  SDHCI_QUIRK_NO_LED,
1213 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1214 			  SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1215 	.allow_runtime_pm = true,
1216 	.probe_slot	= ni_byt_sdio_probe_slot,
1217 	.add_host	= byt_add_host,
1218 	.remove_slot	= byt_remove_slot,
1219 	.ops		= &sdhci_intel_byt_ops,
1220 	.priv_size	= sizeof(struct intel_host),
1221 };
1222 
1223 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1224 #ifdef CONFIG_PM_SLEEP
1225 	.resume		= byt_resume,
1226 #endif
1227 #ifdef CONFIG_PM
1228 	.runtime_resume	= byt_runtime_resume,
1229 #endif
1230 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1231 			  SDHCI_QUIRK_NO_LED,
1232 	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1233 			SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1234 	.allow_runtime_pm = true,
1235 	.probe_slot	= byt_sdio_probe_slot,
1236 	.add_host	= byt_add_host,
1237 	.remove_slot	= byt_remove_slot,
1238 	.ops		= &sdhci_intel_byt_ops,
1239 	.priv_size	= sizeof(struct intel_host),
1240 };
1241 
1242 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1243 #ifdef CONFIG_PM_SLEEP
1244 	.resume		= byt_resume,
1245 #endif
1246 #ifdef CONFIG_PM
1247 	.runtime_resume	= byt_runtime_resume,
1248 #endif
1249 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1250 			  SDHCI_QUIRK_NO_LED,
1251 	.quirks2	= SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1252 			  SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1253 			  SDHCI_QUIRK2_STOP_WITH_TC,
1254 	.allow_runtime_pm = true,
1255 	.own_cd_for_runtime_pm = true,
1256 	.probe_slot	= byt_sd_probe_slot,
1257 	.add_host	= byt_add_host,
1258 	.remove_slot	= byt_remove_slot,
1259 	.ops		= &sdhci_intel_byt_ops,
1260 	.priv_size	= sizeof(struct intel_host),
1261 };
1262 
1263 /* Define Host controllers for Intel Merrifield platform */
1264 #define INTEL_MRFLD_EMMC_0	0
1265 #define INTEL_MRFLD_EMMC_1	1
1266 #define INTEL_MRFLD_SD		2
1267 #define INTEL_MRFLD_SDIO	3
1268 
1269 #ifdef CONFIG_ACPI
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1270 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1271 {
1272 	struct acpi_device *device;
1273 
1274 	device = ACPI_COMPANION(&slot->chip->pdev->dev);
1275 	if (device)
1276 		acpi_device_fix_up_power_extended(device);
1277 }
1278 #else
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1279 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1280 #endif
1281 
intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot * slot)1282 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1283 {
1284 	unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1285 
1286 	switch (func) {
1287 	case INTEL_MRFLD_EMMC_0:
1288 	case INTEL_MRFLD_EMMC_1:
1289 		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1290 					 MMC_CAP_8_BIT_DATA |
1291 					 MMC_CAP_1_8V_DDR;
1292 		break;
1293 	case INTEL_MRFLD_SD:
1294 		slot->cd_idx = 0;
1295 		slot->cd_override_level = true;
1296 		/*
1297 		 * There are two PCB designs of SD card slot with the opposite
1298 		 * card detection sense. Quirk this out by ignoring GPIO state
1299 		 * completely in the custom ->get_cd() callback.
1300 		 */
1301 		slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
1302 		slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1303 		break;
1304 	case INTEL_MRFLD_SDIO:
1305 		/* Advertise 2.0v for compatibility with the SDIO card's OCR */
1306 		slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1307 		slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1308 					 MMC_CAP_POWER_OFF_CARD;
1309 		break;
1310 	default:
1311 		return -ENODEV;
1312 	}
1313 
1314 	intel_mrfld_mmc_fix_up_power_slot(slot);
1315 	return 0;
1316 }
1317 
1318 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1319 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1320 	.quirks2	= SDHCI_QUIRK2_BROKEN_HS200 |
1321 			SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1322 	.allow_runtime_pm = true,
1323 	.probe_slot	= intel_mrfld_mmc_probe_slot,
1324 };
1325 
jmicron_pmos(struct sdhci_pci_chip * chip,int on)1326 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1327 {
1328 	u8 scratch;
1329 	int ret;
1330 
1331 	ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1332 	if (ret)
1333 		return ret;
1334 
1335 	/*
1336 	 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1337 	 * [bit 1:2] and enable over current debouncing [bit 6].
1338 	 */
1339 	if (on)
1340 		scratch |= 0x47;
1341 	else
1342 		scratch &= ~0x47;
1343 
1344 	return pci_write_config_byte(chip->pdev, 0xAE, scratch);
1345 }
1346 
jmicron_probe(struct sdhci_pci_chip * chip)1347 static int jmicron_probe(struct sdhci_pci_chip *chip)
1348 {
1349 	int ret;
1350 	u16 mmcdev = 0;
1351 
1352 	if (chip->pdev->revision == 0) {
1353 		chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1354 			  SDHCI_QUIRK_32BIT_DMA_SIZE |
1355 			  SDHCI_QUIRK_32BIT_ADMA_SIZE |
1356 			  SDHCI_QUIRK_RESET_AFTER_REQUEST |
1357 			  SDHCI_QUIRK_BROKEN_SMALL_PIO;
1358 	}
1359 
1360 	/*
1361 	 * JMicron chips can have two interfaces to the same hardware
1362 	 * in order to work around limitations in Microsoft's driver.
1363 	 * We need to make sure we only bind to one of them.
1364 	 *
1365 	 * This code assumes two things:
1366 	 *
1367 	 * 1. The PCI code adds subfunctions in order.
1368 	 *
1369 	 * 2. The MMC interface has a lower subfunction number
1370 	 *    than the SD interface.
1371 	 */
1372 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1373 		mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1374 	else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1375 		mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1376 
1377 	if (mmcdev) {
1378 		struct pci_dev *sd_dev;
1379 
1380 		sd_dev = NULL;
1381 		while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1382 						mmcdev, sd_dev)) != NULL) {
1383 			if ((PCI_SLOT(chip->pdev->devfn) ==
1384 				PCI_SLOT(sd_dev->devfn)) &&
1385 				(chip->pdev->bus == sd_dev->bus))
1386 				break;
1387 		}
1388 
1389 		if (sd_dev) {
1390 			pci_dev_put(sd_dev);
1391 			dev_info(&chip->pdev->dev, "Refusing to bind to "
1392 				"secondary interface.\n");
1393 			return -ENODEV;
1394 		}
1395 	}
1396 
1397 	/*
1398 	 * JMicron chips need a bit of a nudge to enable the power
1399 	 * output pins.
1400 	 */
1401 	ret = jmicron_pmos(chip, 1);
1402 	if (ret) {
1403 		dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1404 		return ret;
1405 	}
1406 
1407 	/* quirk for unsable RO-detection on JM388 chips */
1408 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1409 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1410 		chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
1411 
1412 	return 0;
1413 }
1414 
jmicron_enable_mmc(struct sdhci_host * host,int on)1415 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1416 {
1417 	u8 scratch;
1418 
1419 	scratch = readb(host->ioaddr + 0xC0);
1420 
1421 	if (on)
1422 		scratch |= 0x01;
1423 	else
1424 		scratch &= ~0x01;
1425 
1426 	writeb(scratch, host->ioaddr + 0xC0);
1427 }
1428 
jmicron_probe_slot(struct sdhci_pci_slot * slot)1429 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1430 {
1431 	if (slot->chip->pdev->revision == 0) {
1432 		u16 version;
1433 
1434 		version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1435 		version = (version & SDHCI_VENDOR_VER_MASK) >>
1436 			SDHCI_VENDOR_VER_SHIFT;
1437 
1438 		/*
1439 		 * Older versions of the chip have lots of nasty glitches
1440 		 * in the ADMA engine. It's best just to avoid it
1441 		 * completely.
1442 		 */
1443 		if (version < 0xAC)
1444 			slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1445 	}
1446 
1447 	/* JM388 MMC doesn't support 1.8V while SD supports it */
1448 	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1449 		slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1450 			MMC_VDD_29_30 | MMC_VDD_30_31 |
1451 			MMC_VDD_165_195; /* allow 1.8V */
1452 		slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1453 			MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1454 	}
1455 
1456 	/*
1457 	 * The secondary interface requires a bit set to get the
1458 	 * interrupts.
1459 	 */
1460 	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1461 	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1462 		jmicron_enable_mmc(slot->host, 1);
1463 
1464 	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1465 
1466 	return 0;
1467 }
1468 
jmicron_remove_slot(struct sdhci_pci_slot * slot,int dead)1469 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1470 {
1471 	if (dead)
1472 		return;
1473 
1474 	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1475 	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1476 		jmicron_enable_mmc(slot->host, 0);
1477 }
1478 
1479 #ifdef CONFIG_PM_SLEEP
jmicron_suspend(struct sdhci_pci_chip * chip)1480 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1481 {
1482 	int i, ret;
1483 
1484 	ret = sdhci_pci_suspend_host(chip);
1485 	if (ret)
1486 		return ret;
1487 
1488 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1489 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1490 		for (i = 0; i < chip->num_slots; i++)
1491 			jmicron_enable_mmc(chip->slots[i]->host, 0);
1492 	}
1493 
1494 	return 0;
1495 }
1496 
jmicron_resume(struct sdhci_pci_chip * chip)1497 static int jmicron_resume(struct sdhci_pci_chip *chip)
1498 {
1499 	int ret, i;
1500 
1501 	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1502 	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1503 		for (i = 0; i < chip->num_slots; i++)
1504 			jmicron_enable_mmc(chip->slots[i]->host, 1);
1505 	}
1506 
1507 	ret = jmicron_pmos(chip, 1);
1508 	if (ret) {
1509 		dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1510 		return ret;
1511 	}
1512 
1513 	return sdhci_pci_resume_host(chip);
1514 }
1515 #endif
1516 
1517 static const struct sdhci_pci_fixes sdhci_jmicron = {
1518 	.probe		= jmicron_probe,
1519 
1520 	.probe_slot	= jmicron_probe_slot,
1521 	.remove_slot	= jmicron_remove_slot,
1522 
1523 #ifdef CONFIG_PM_SLEEP
1524 	.suspend	= jmicron_suspend,
1525 	.resume		= jmicron_resume,
1526 #endif
1527 };
1528 
1529 /* SysKonnect CardBus2SDIO extra registers */
1530 #define SYSKT_CTRL		0x200
1531 #define SYSKT_RDFIFO_STAT	0x204
1532 #define SYSKT_WRFIFO_STAT	0x208
1533 #define SYSKT_POWER_DATA	0x20c
1534 #define   SYSKT_POWER_330	0xef
1535 #define   SYSKT_POWER_300	0xf8
1536 #define   SYSKT_POWER_184	0xcc
1537 #define SYSKT_POWER_CMD		0x20d
1538 #define   SYSKT_POWER_START	(1 << 7)
1539 #define SYSKT_POWER_STATUS	0x20e
1540 #define   SYSKT_POWER_STATUS_OK	(1 << 0)
1541 #define SYSKT_BOARD_REV		0x210
1542 #define SYSKT_CHIP_REV		0x211
1543 #define SYSKT_CONF_DATA		0x212
1544 #define   SYSKT_CONF_DATA_1V8	(1 << 2)
1545 #define   SYSKT_CONF_DATA_2V5	(1 << 1)
1546 #define   SYSKT_CONF_DATA_3V3	(1 << 0)
1547 
syskt_probe(struct sdhci_pci_chip * chip)1548 static int syskt_probe(struct sdhci_pci_chip *chip)
1549 {
1550 	if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1551 		chip->pdev->class &= ~0x0000FF;
1552 		chip->pdev->class |= PCI_SDHCI_IFDMA;
1553 	}
1554 	return 0;
1555 }
1556 
syskt_probe_slot(struct sdhci_pci_slot * slot)1557 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1558 {
1559 	int tm, ps;
1560 
1561 	u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1562 	u8  chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1563 	dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1564 					 "board rev %d.%d, chip rev %d.%d\n",
1565 					 board_rev >> 4, board_rev & 0xf,
1566 					 chip_rev >> 4,  chip_rev & 0xf);
1567 	if (chip_rev >= 0x20)
1568 		slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1569 
1570 	writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1571 	writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1572 	udelay(50);
1573 	tm = 10;  /* Wait max 1 ms */
1574 	do {
1575 		ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1576 		if (ps & SYSKT_POWER_STATUS_OK)
1577 			break;
1578 		udelay(100);
1579 	} while (--tm);
1580 	if (!tm) {
1581 		dev_err(&slot->chip->pdev->dev,
1582 			"power regulator never stabilized");
1583 		writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1584 		return -ENODEV;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 static const struct sdhci_pci_fixes sdhci_syskt = {
1591 	.quirks		= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1592 	.probe		= syskt_probe,
1593 	.probe_slot	= syskt_probe_slot,
1594 };
1595 
via_probe(struct sdhci_pci_chip * chip)1596 static int via_probe(struct sdhci_pci_chip *chip)
1597 {
1598 	if (chip->pdev->revision == 0x10)
1599 		chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1600 
1601 	return 0;
1602 }
1603 
1604 static const struct sdhci_pci_fixes sdhci_via = {
1605 	.probe		= via_probe,
1606 };
1607 
rtsx_probe_slot(struct sdhci_pci_slot * slot)1608 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1609 {
1610 	slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1611 	return 0;
1612 }
1613 
1614 static const struct sdhci_pci_fixes sdhci_rtsx = {
1615 	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1616 			SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1617 			SDHCI_QUIRK2_BROKEN_DDR50,
1618 	.probe_slot	= rtsx_probe_slot,
1619 };
1620 
1621 /*AMD chipset generation*/
1622 enum amd_chipset_gen {
1623 	AMD_CHIPSET_BEFORE_ML,
1624 	AMD_CHIPSET_CZ,
1625 	AMD_CHIPSET_NL,
1626 	AMD_CHIPSET_UNKNOWN,
1627 };
1628 
1629 /* AMD registers */
1630 #define AMD_SD_AUTO_PATTERN		0xB8
1631 #define AMD_MSLEEP_DURATION		4
1632 #define AMD_SD_MISC_CONTROL		0xD0
1633 #define AMD_MAX_TUNE_VALUE		0x0B
1634 #define AMD_AUTO_TUNE_SEL		0x10800
1635 #define AMD_FIFO_PTR			0x30
1636 #define AMD_BIT_MASK			0x1F
1637 
amd_tuning_reset(struct sdhci_host * host)1638 static void amd_tuning_reset(struct sdhci_host *host)
1639 {
1640 	unsigned int val;
1641 
1642 	val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1643 	val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1644 	sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1645 
1646 	val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1647 	val &= ~SDHCI_CTRL_EXEC_TUNING;
1648 	sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1649 }
1650 
amd_config_tuning_phase(struct pci_dev * pdev,u8 phase)1651 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1652 {
1653 	unsigned int val;
1654 
1655 	pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1656 	val &= ~AMD_BIT_MASK;
1657 	val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1658 	pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1659 }
1660 
amd_enable_manual_tuning(struct pci_dev * pdev)1661 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1662 {
1663 	unsigned int val;
1664 
1665 	pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1666 	val |= AMD_FIFO_PTR;
1667 	pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1668 }
1669 
amd_execute_tuning_hs200(struct sdhci_host * host,u32 opcode)1670 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1671 {
1672 	struct sdhci_pci_slot *slot = sdhci_priv(host);
1673 	struct pci_dev *pdev = slot->chip->pdev;
1674 	u8 valid_win = 0;
1675 	u8 valid_win_max = 0;
1676 	u8 valid_win_end = 0;
1677 	u8 ctrl, tune_around;
1678 
1679 	amd_tuning_reset(host);
1680 
1681 	for (tune_around = 0; tune_around < 12; tune_around++) {
1682 		amd_config_tuning_phase(pdev, tune_around);
1683 
1684 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1685 			valid_win = 0;
1686 			msleep(AMD_MSLEEP_DURATION);
1687 			ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1688 			sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1689 		} else if (++valid_win > valid_win_max) {
1690 			valid_win_max = valid_win;
1691 			valid_win_end = tune_around;
1692 		}
1693 	}
1694 
1695 	if (!valid_win_max) {
1696 		dev_err(&pdev->dev, "no tuning point found\n");
1697 		return -EIO;
1698 	}
1699 
1700 	amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1701 
1702 	amd_enable_manual_tuning(pdev);
1703 
1704 	host->mmc->retune_period = 0;
1705 
1706 	return 0;
1707 }
1708 
amd_execute_tuning(struct mmc_host * mmc,u32 opcode)1709 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1710 {
1711 	struct sdhci_host *host = mmc_priv(mmc);
1712 
1713 	/* AMD requires custom HS200 tuning */
1714 	if (host->timing == MMC_TIMING_MMC_HS200)
1715 		return amd_execute_tuning_hs200(host, opcode);
1716 
1717 	/* Otherwise perform standard SDHCI tuning */
1718 	return sdhci_execute_tuning(mmc, opcode);
1719 }
1720 
amd_probe_slot(struct sdhci_pci_slot * slot)1721 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1722 {
1723 	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1724 
1725 	ops->execute_tuning = amd_execute_tuning;
1726 
1727 	return 0;
1728 }
1729 
amd_probe(struct sdhci_pci_chip * chip)1730 static int amd_probe(struct sdhci_pci_chip *chip)
1731 {
1732 	struct pci_dev	*smbus_dev;
1733 	enum amd_chipset_gen gen;
1734 
1735 	smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1736 			PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1737 	if (smbus_dev) {
1738 		gen = AMD_CHIPSET_BEFORE_ML;
1739 	} else {
1740 		smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1741 				PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1742 		if (smbus_dev) {
1743 			if (smbus_dev->revision < 0x51)
1744 				gen = AMD_CHIPSET_CZ;
1745 			else
1746 				gen = AMD_CHIPSET_NL;
1747 		} else {
1748 			gen = AMD_CHIPSET_UNKNOWN;
1749 		}
1750 	}
1751 
1752 	pci_dev_put(smbus_dev);
1753 
1754 	if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1755 		chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1756 
1757 	return 0;
1758 }
1759 
sdhci_read_present_state(struct sdhci_host * host)1760 static u32 sdhci_read_present_state(struct sdhci_host *host)
1761 {
1762 	return sdhci_readl(host, SDHCI_PRESENT_STATE);
1763 }
1764 
amd_sdhci_reset(struct sdhci_host * host,u8 mask)1765 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1766 {
1767 	struct sdhci_pci_slot *slot = sdhci_priv(host);
1768 	struct pci_dev *pdev = slot->chip->pdev;
1769 	u32 present_state;
1770 
1771 	/*
1772 	 * SDHC 0x7906 requires a hard reset to clear all internal state.
1773 	 * Otherwise it can get into a bad state where the DATA lines are always
1774 	 * read as zeros.
1775 	 */
1776 	if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1777 		pci_clear_master(pdev);
1778 
1779 		pci_save_state(pdev);
1780 
1781 		pci_set_power_state(pdev, PCI_D3cold);
1782 		pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1783 			pdev->current_state);
1784 		pci_set_power_state(pdev, PCI_D0);
1785 
1786 		pci_restore_state(pdev);
1787 
1788 		/*
1789 		 * SDHCI_RESET_ALL says the card detect logic should not be
1790 		 * reset, but since we need to reset the entire controller
1791 		 * we should wait until the card detect logic has stabilized.
1792 		 *
1793 		 * This normally takes about 40ms.
1794 		 */
1795 		readx_poll_timeout(
1796 			sdhci_read_present_state,
1797 			host,
1798 			present_state,
1799 			present_state & SDHCI_CD_STABLE,
1800 			10000,
1801 			100000
1802 		);
1803 	}
1804 
1805 	return sdhci_reset(host, mask);
1806 }
1807 
1808 static const struct sdhci_ops amd_sdhci_pci_ops = {
1809 	.set_clock			= sdhci_set_clock,
1810 	.enable_dma			= sdhci_pci_enable_dma,
1811 	.set_bus_width			= sdhci_set_bus_width,
1812 	.reset				= amd_sdhci_reset,
1813 	.set_uhs_signaling		= sdhci_set_uhs_signaling,
1814 };
1815 
1816 static const struct sdhci_pci_fixes sdhci_amd = {
1817 	.probe		= amd_probe,
1818 	.ops		= &amd_sdhci_pci_ops,
1819 	.probe_slot	= amd_probe_slot,
1820 };
1821 
1822 static const struct pci_device_id pci_ids[] = {
1823 	SDHCI_PCI_DEVICE(RICOH, R5C822,  ricoh),
1824 	SDHCI_PCI_DEVICE(RICOH, R5C843,  ricoh_mmc),
1825 	SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1826 	SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1827 	SDHCI_PCI_DEVICE(ENE, CB712_SD,   ene_712),
1828 	SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1829 	SDHCI_PCI_DEVICE(ENE, CB714_SD,   ene_714),
1830 	SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1831 	SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1832 	SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD,  jmicron),
1833 	SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1834 	SDHCI_PCI_DEVICE(JMICRON, JMB388_SD,  jmicron),
1835 	SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1836 	SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1837 	SDHCI_PCI_DEVICE(VIA, 95D0, via),
1838 	SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1839 	SDHCI_PCI_DEVICE(INTEL, QRK_SD,    intel_qrk),
1840 	SDHCI_PCI_DEVICE(INTEL, MRST_SD0,  intel_mrst_hc0),
1841 	SDHCI_PCI_DEVICE(INTEL, MRST_SD1,  intel_mrst_hc1_hc2),
1842 	SDHCI_PCI_DEVICE(INTEL, MRST_SD2,  intel_mrst_hc1_hc2),
1843 	SDHCI_PCI_DEVICE(INTEL, MFD_SD,    intel_mfd_sd),
1844 	SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1845 	SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1846 	SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1847 	SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1848 	SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1849 	SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1850 	SDHCI_PCI_DEVICE(INTEL, BYT_EMMC,  intel_byt_emmc),
1851 	SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1852 	SDHCI_PCI_DEVICE(INTEL, BYT_SDIO,  intel_byt_sdio),
1853 	SDHCI_PCI_DEVICE(INTEL, BYT_SD,    intel_byt_sd),
1854 	SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1855 	SDHCI_PCI_DEVICE(INTEL, BSW_EMMC,  intel_byt_emmc),
1856 	SDHCI_PCI_DEVICE(INTEL, BSW_SDIO,  intel_byt_sdio),
1857 	SDHCI_PCI_DEVICE(INTEL, BSW_SD,    intel_byt_sd),
1858 	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1859 	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1860 	SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1861 	SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1862 	SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1863 	SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1864 	SDHCI_PCI_DEVICE(INTEL, SPT_EMMC,  intel_byt_emmc),
1865 	SDHCI_PCI_DEVICE(INTEL, SPT_SDIO,  intel_byt_sdio),
1866 	SDHCI_PCI_DEVICE(INTEL, SPT_SD,    intel_byt_sd),
1867 	SDHCI_PCI_DEVICE(INTEL, DNV_EMMC,  intel_byt_emmc),
1868 	SDHCI_PCI_DEVICE(INTEL, CDF_EMMC,  intel_glk_emmc),
1869 	SDHCI_PCI_DEVICE(INTEL, BXT_EMMC,  intel_byt_emmc),
1870 	SDHCI_PCI_DEVICE(INTEL, BXT_SDIO,  intel_byt_sdio),
1871 	SDHCI_PCI_DEVICE(INTEL, BXT_SD,    intel_byt_sd),
1872 	SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1873 	SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1874 	SDHCI_PCI_DEVICE(INTEL, BXTM_SD,   intel_byt_sd),
1875 	SDHCI_PCI_DEVICE(INTEL, APL_EMMC,  intel_byt_emmc),
1876 	SDHCI_PCI_DEVICE(INTEL, APL_SDIO,  intel_byt_sdio),
1877 	SDHCI_PCI_DEVICE(INTEL, APL_SD,    intel_byt_sd),
1878 	SDHCI_PCI_DEVICE(INTEL, GLK_EMMC,  intel_glk_emmc),
1879 	SDHCI_PCI_DEVICE(INTEL, GLK_SDIO,  intel_byt_sdio),
1880 	SDHCI_PCI_DEVICE(INTEL, GLK_SD,    intel_byt_sd),
1881 	SDHCI_PCI_DEVICE(INTEL, CNP_EMMC,  intel_glk_emmc),
1882 	SDHCI_PCI_DEVICE(INTEL, CNP_SD,    intel_byt_sd),
1883 	SDHCI_PCI_DEVICE(INTEL, CNPH_SD,   intel_byt_sd),
1884 	SDHCI_PCI_DEVICE(INTEL, ICP_EMMC,  intel_glk_emmc),
1885 	SDHCI_PCI_DEVICE(INTEL, ICP_SD,    intel_byt_sd),
1886 	SDHCI_PCI_DEVICE(INTEL, EHL_EMMC,  intel_glk_emmc),
1887 	SDHCI_PCI_DEVICE(INTEL, EHL_SD,    intel_byt_sd),
1888 	SDHCI_PCI_DEVICE(INTEL, CML_EMMC,  intel_glk_emmc),
1889 	SDHCI_PCI_DEVICE(INTEL, CML_SD,    intel_byt_sd),
1890 	SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
1891 	SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
1892 	SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
1893 	SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
1894 	SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
1895 	SDHCI_PCI_DEVICE(INTEL, ADL_EMMC,  intel_glk_emmc),
1896 	SDHCI_PCI_DEVICE(O2, 8120,     o2),
1897 	SDHCI_PCI_DEVICE(O2, 8220,     o2),
1898 	SDHCI_PCI_DEVICE(O2, 8221,     o2),
1899 	SDHCI_PCI_DEVICE(O2, 8320,     o2),
1900 	SDHCI_PCI_DEVICE(O2, 8321,     o2),
1901 	SDHCI_PCI_DEVICE(O2, FUJIN2,   o2),
1902 	SDHCI_PCI_DEVICE(O2, SDS0,     o2),
1903 	SDHCI_PCI_DEVICE(O2, SDS1,     o2),
1904 	SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1905 	SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1906 	SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1907 	SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1908 	SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1909 	SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1910 	SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1911 	SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1912 	/* Generic SD host controller */
1913 	{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1914 	{ /* end: all zeroes */ },
1915 };
1916 
1917 MODULE_DEVICE_TABLE(pci, pci_ids);
1918 
1919 /*****************************************************************************\
1920  *                                                                           *
1921  * SDHCI core callbacks                                                      *
1922  *                                                                           *
1923 \*****************************************************************************/
1924 
sdhci_pci_enable_dma(struct sdhci_host * host)1925 int sdhci_pci_enable_dma(struct sdhci_host *host)
1926 {
1927 	struct sdhci_pci_slot *slot;
1928 	struct pci_dev *pdev;
1929 
1930 	slot = sdhci_priv(host);
1931 	pdev = slot->chip->pdev;
1932 
1933 	if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1934 		((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1935 		(host->flags & SDHCI_USE_SDMA)) {
1936 		dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1937 			"doesn't fully claim to support it.\n");
1938 	}
1939 
1940 	pci_set_master(pdev);
1941 
1942 	return 0;
1943 }
1944 
sdhci_pci_hw_reset(struct sdhci_host * host)1945 static void sdhci_pci_hw_reset(struct sdhci_host *host)
1946 {
1947 	struct sdhci_pci_slot *slot = sdhci_priv(host);
1948 
1949 	if (slot->hw_reset)
1950 		slot->hw_reset(host);
1951 }
1952 
1953 static const struct sdhci_ops sdhci_pci_ops = {
1954 	.set_clock	= sdhci_set_clock,
1955 	.enable_dma	= sdhci_pci_enable_dma,
1956 	.set_bus_width	= sdhci_set_bus_width,
1957 	.reset		= sdhci_reset,
1958 	.set_uhs_signaling = sdhci_set_uhs_signaling,
1959 	.hw_reset		= sdhci_pci_hw_reset,
1960 };
1961 
1962 /*****************************************************************************\
1963  *                                                                           *
1964  * Suspend/resume                                                            *
1965  *                                                                           *
1966 \*****************************************************************************/
1967 
1968 #ifdef CONFIG_PM_SLEEP
sdhci_pci_suspend(struct device * dev)1969 static int sdhci_pci_suspend(struct device *dev)
1970 {
1971 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1972 
1973 	if (!chip)
1974 		return 0;
1975 
1976 	if (chip->fixes && chip->fixes->suspend)
1977 		return chip->fixes->suspend(chip);
1978 
1979 	return sdhci_pci_suspend_host(chip);
1980 }
1981 
sdhci_pci_resume(struct device * dev)1982 static int sdhci_pci_resume(struct device *dev)
1983 {
1984 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
1985 
1986 	if (!chip)
1987 		return 0;
1988 
1989 	if (chip->fixes && chip->fixes->resume)
1990 		return chip->fixes->resume(chip);
1991 
1992 	return sdhci_pci_resume_host(chip);
1993 }
1994 #endif
1995 
1996 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend(struct device * dev)1997 static int sdhci_pci_runtime_suspend(struct device *dev)
1998 {
1999 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2000 
2001 	if (!chip)
2002 		return 0;
2003 
2004 	if (chip->fixes && chip->fixes->runtime_suspend)
2005 		return chip->fixes->runtime_suspend(chip);
2006 
2007 	return sdhci_pci_runtime_suspend_host(chip);
2008 }
2009 
sdhci_pci_runtime_resume(struct device * dev)2010 static int sdhci_pci_runtime_resume(struct device *dev)
2011 {
2012 	struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2013 
2014 	if (!chip)
2015 		return 0;
2016 
2017 	if (chip->fixes && chip->fixes->runtime_resume)
2018 		return chip->fixes->runtime_resume(chip);
2019 
2020 	return sdhci_pci_runtime_resume_host(chip);
2021 }
2022 #endif
2023 
2024 static const struct dev_pm_ops sdhci_pci_pm_ops = {
2025 	SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2026 	SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2027 			sdhci_pci_runtime_resume, NULL)
2028 };
2029 
2030 /*****************************************************************************\
2031  *                                                                           *
2032  * Device probing/removal                                                    *
2033  *                                                                           *
2034 \*****************************************************************************/
2035 
sdhci_pci_probe_slot(struct pci_dev * pdev,struct sdhci_pci_chip * chip,int first_bar,int slotno)2036 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2037 	struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2038 	int slotno)
2039 {
2040 	struct sdhci_pci_slot *slot;
2041 	struct sdhci_host *host;
2042 	int ret, bar = first_bar + slotno;
2043 	size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2044 
2045 	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2046 		dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2047 		return ERR_PTR(-ENODEV);
2048 	}
2049 
2050 	if (pci_resource_len(pdev, bar) < 0x100) {
2051 		dev_err(&pdev->dev, "Invalid iomem size. You may "
2052 			"experience problems.\n");
2053 	}
2054 
2055 	if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2056 		dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2057 		return ERR_PTR(-ENODEV);
2058 	}
2059 
2060 	if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2061 		dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2062 		return ERR_PTR(-ENODEV);
2063 	}
2064 
2065 	host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2066 	if (IS_ERR(host)) {
2067 		dev_err(&pdev->dev, "cannot allocate host\n");
2068 		return ERR_CAST(host);
2069 	}
2070 
2071 	slot = sdhci_priv(host);
2072 
2073 	slot->chip = chip;
2074 	slot->host = host;
2075 	slot->cd_idx = -1;
2076 
2077 	host->hw_name = "PCI";
2078 	host->ops = chip->fixes && chip->fixes->ops ?
2079 		    chip->fixes->ops :
2080 		    &sdhci_pci_ops;
2081 	host->quirks = chip->quirks;
2082 	host->quirks2 = chip->quirks2;
2083 
2084 	host->irq = pdev->irq;
2085 
2086 	ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2087 	if (ret) {
2088 		dev_err(&pdev->dev, "cannot request region\n");
2089 		goto cleanup;
2090 	}
2091 
2092 	host->ioaddr = pcim_iomap_table(pdev)[bar];
2093 
2094 	if (chip->fixes && chip->fixes->probe_slot) {
2095 		ret = chip->fixes->probe_slot(slot);
2096 		if (ret)
2097 			goto cleanup;
2098 	}
2099 
2100 	host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2101 	host->mmc->slotno = slotno;
2102 	host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2103 
2104 	if (device_can_wakeup(&pdev->dev))
2105 		host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2106 
2107 	if (host->mmc->caps & MMC_CAP_CD_WAKE)
2108 		device_init_wakeup(&pdev->dev, true);
2109 
2110 	if (slot->cd_idx >= 0) {
2111 		ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2112 					   slot->cd_override_level, 0);
2113 		if (ret && ret != -EPROBE_DEFER)
2114 			ret = mmc_gpiod_request_cd(host->mmc, NULL,
2115 						   slot->cd_idx,
2116 						   slot->cd_override_level,
2117 						   0);
2118 		if (ret == -EPROBE_DEFER)
2119 			goto remove;
2120 
2121 		if (ret) {
2122 			dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2123 			slot->cd_idx = -1;
2124 		}
2125 	}
2126 
2127 	if (chip->fixes && chip->fixes->add_host)
2128 		ret = chip->fixes->add_host(slot);
2129 	else
2130 		ret = sdhci_add_host(host);
2131 	if (ret)
2132 		goto remove;
2133 
2134 	/*
2135 	 * Check if the chip needs a separate GPIO for card detect to wake up
2136 	 * from runtime suspend.  If it is not there, don't allow runtime PM.
2137 	 */
2138 	if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
2139 		chip->allow_runtime_pm = false;
2140 
2141 	return slot;
2142 
2143 remove:
2144 	if (chip->fixes && chip->fixes->remove_slot)
2145 		chip->fixes->remove_slot(slot, 0);
2146 
2147 cleanup:
2148 	sdhci_free_host(host);
2149 
2150 	return ERR_PTR(ret);
2151 }
2152 
sdhci_pci_remove_slot(struct sdhci_pci_slot * slot)2153 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2154 {
2155 	int dead;
2156 	u32 scratch;
2157 
2158 	dead = 0;
2159 	scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2160 	if (scratch == (u32)-1)
2161 		dead = 1;
2162 
2163 	sdhci_remove_host(slot->host, dead);
2164 
2165 	if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2166 		slot->chip->fixes->remove_slot(slot, dead);
2167 
2168 	sdhci_free_host(slot->host);
2169 }
2170 
sdhci_pci_runtime_pm_allow(struct device * dev)2171 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2172 {
2173 	pm_suspend_ignore_children(dev, 1);
2174 	pm_runtime_set_autosuspend_delay(dev, 50);
2175 	pm_runtime_use_autosuspend(dev);
2176 	pm_runtime_allow(dev);
2177 	/* Stay active until mmc core scans for a card */
2178 	pm_runtime_put_noidle(dev);
2179 }
2180 
sdhci_pci_runtime_pm_forbid(struct device * dev)2181 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2182 {
2183 	pm_runtime_forbid(dev);
2184 	pm_runtime_get_noresume(dev);
2185 }
2186 
sdhci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2187 static int sdhci_pci_probe(struct pci_dev *pdev,
2188 				     const struct pci_device_id *ent)
2189 {
2190 	struct sdhci_pci_chip *chip;
2191 	struct sdhci_pci_slot *slot;
2192 
2193 	u8 slots, first_bar;
2194 	int ret, i;
2195 
2196 	BUG_ON(pdev == NULL);
2197 	BUG_ON(ent == NULL);
2198 
2199 	dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2200 		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2201 
2202 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2203 	if (ret)
2204 		return ret;
2205 
2206 	slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2207 	dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2208 
2209 	BUG_ON(slots > MAX_SLOTS);
2210 
2211 	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2212 	if (ret)
2213 		return ret;
2214 
2215 	first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2216 
2217 	if (first_bar > 5) {
2218 		dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2219 		return -ENODEV;
2220 	}
2221 
2222 	ret = pcim_enable_device(pdev);
2223 	if (ret)
2224 		return ret;
2225 
2226 	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2227 	if (!chip)
2228 		return -ENOMEM;
2229 
2230 	chip->pdev = pdev;
2231 	chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2232 	if (chip->fixes) {
2233 		chip->quirks = chip->fixes->quirks;
2234 		chip->quirks2 = chip->fixes->quirks2;
2235 		chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2236 	}
2237 	chip->num_slots = slots;
2238 	chip->pm_retune = true;
2239 	chip->rpm_retune = true;
2240 
2241 	pci_set_drvdata(pdev, chip);
2242 
2243 	if (chip->fixes && chip->fixes->probe) {
2244 		ret = chip->fixes->probe(chip);
2245 		if (ret)
2246 			return ret;
2247 	}
2248 
2249 	slots = chip->num_slots;	/* Quirk may have changed this */
2250 
2251 	for (i = 0; i < slots; i++) {
2252 		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2253 		if (IS_ERR(slot)) {
2254 			for (i--; i >= 0; i--)
2255 				sdhci_pci_remove_slot(chip->slots[i]);
2256 			return PTR_ERR(slot);
2257 		}
2258 
2259 		chip->slots[i] = slot;
2260 	}
2261 
2262 	if (chip->allow_runtime_pm)
2263 		sdhci_pci_runtime_pm_allow(&pdev->dev);
2264 
2265 	return 0;
2266 }
2267 
sdhci_pci_remove(struct pci_dev * pdev)2268 static void sdhci_pci_remove(struct pci_dev *pdev)
2269 {
2270 	int i;
2271 	struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2272 
2273 	if (chip->allow_runtime_pm)
2274 		sdhci_pci_runtime_pm_forbid(&pdev->dev);
2275 
2276 	for (i = 0; i < chip->num_slots; i++)
2277 		sdhci_pci_remove_slot(chip->slots[i]);
2278 }
2279 
2280 static struct pci_driver sdhci_driver = {
2281 	.name =		"sdhci-pci",
2282 	.id_table =	pci_ids,
2283 	.probe =	sdhci_pci_probe,
2284 	.remove =	sdhci_pci_remove,
2285 	.driver =	{
2286 		.pm =   &sdhci_pci_pm_ops
2287 	},
2288 };
2289 
2290 module_pci_driver(sdhci_driver);
2291 
2292 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2293 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2294 MODULE_LICENSE("GPL");
2295