1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * Thanks to the following companies for their support:
7 *
8 * - JMicron (hardware and technical support)
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/scatterlist.h>
21 #include <linux/io.h>
22 #include <linux/iopoll.h>
23 #include <linux/gpio.h>
24 #include <linux/gpio/machine.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_qos.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/dmi.h>
30
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #ifdef CONFIG_X86
36 #include <asm/iosf_mbi.h>
37 #endif
38
39 #include "cqhci.h"
40
41 #include "sdhci.h"
42 #include "sdhci-cqhci.h"
43 #include "sdhci-pci.h"
44
45 static void sdhci_pci_hw_reset(struct sdhci_host *host);
46
47 #ifdef CONFIG_PM_SLEEP
sdhci_pci_init_wakeup(struct sdhci_pci_chip * chip)48 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
49 {
50 mmc_pm_flag_t pm_flags = 0;
51 bool cap_cd_wake = false;
52 int i;
53
54 for (i = 0; i < chip->num_slots; i++) {
55 struct sdhci_pci_slot *slot = chip->slots[i];
56
57 if (slot) {
58 pm_flags |= slot->host->mmc->pm_flags;
59 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
60 cap_cd_wake = true;
61 }
62 }
63
64 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
65 return device_wakeup_enable(&chip->pdev->dev);
66 else if (!cap_cd_wake)
67 device_wakeup_disable(&chip->pdev->dev);
68
69 return 0;
70 }
71
sdhci_pci_suspend_host(struct sdhci_pci_chip * chip)72 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
73 {
74 int i, ret;
75
76 sdhci_pci_init_wakeup(chip);
77
78 for (i = 0; i < chip->num_slots; i++) {
79 struct sdhci_pci_slot *slot = chip->slots[i];
80 struct sdhci_host *host;
81
82 if (!slot)
83 continue;
84
85 host = slot->host;
86
87 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
88 mmc_retune_needed(host->mmc);
89
90 ret = sdhci_suspend_host(host);
91 if (ret)
92 goto err_pci_suspend;
93
94 if (device_may_wakeup(&chip->pdev->dev))
95 mmc_gpio_set_cd_wake(host->mmc, true);
96 }
97
98 return 0;
99
100 err_pci_suspend:
101 while (--i >= 0)
102 sdhci_resume_host(chip->slots[i]->host);
103 return ret;
104 }
105
sdhci_pci_resume_host(struct sdhci_pci_chip * chip)106 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
107 {
108 struct sdhci_pci_slot *slot;
109 int i, ret;
110
111 for (i = 0; i < chip->num_slots; i++) {
112 slot = chip->slots[i];
113 if (!slot)
114 continue;
115
116 ret = sdhci_resume_host(slot->host);
117 if (ret)
118 return ret;
119
120 mmc_gpio_set_cd_wake(slot->host->mmc, false);
121 }
122
123 return 0;
124 }
125
sdhci_cqhci_suspend(struct sdhci_pci_chip * chip)126 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
127 {
128 int ret;
129
130 ret = cqhci_suspend(chip->slots[0]->host->mmc);
131 if (ret)
132 return ret;
133
134 return sdhci_pci_suspend_host(chip);
135 }
136
sdhci_cqhci_resume(struct sdhci_pci_chip * chip)137 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
138 {
139 int ret;
140
141 ret = sdhci_pci_resume_host(chip);
142 if (ret)
143 return ret;
144
145 return cqhci_resume(chip->slots[0]->host->mmc);
146 }
147 #endif
148
149 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip * chip)150 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
151 {
152 struct sdhci_pci_slot *slot;
153 struct sdhci_host *host;
154 int i, ret;
155
156 for (i = 0; i < chip->num_slots; i++) {
157 slot = chip->slots[i];
158 if (!slot)
159 continue;
160
161 host = slot->host;
162
163 ret = sdhci_runtime_suspend_host(host);
164 if (ret)
165 goto err_pci_runtime_suspend;
166
167 if (chip->rpm_retune &&
168 host->tuning_mode != SDHCI_TUNING_MODE_3)
169 mmc_retune_needed(host->mmc);
170 }
171
172 return 0;
173
174 err_pci_runtime_suspend:
175 while (--i >= 0)
176 sdhci_runtime_resume_host(chip->slots[i]->host, 0);
177 return ret;
178 }
179
sdhci_pci_runtime_resume_host(struct sdhci_pci_chip * chip)180 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
181 {
182 struct sdhci_pci_slot *slot;
183 int i, ret;
184
185 for (i = 0; i < chip->num_slots; i++) {
186 slot = chip->slots[i];
187 if (!slot)
188 continue;
189
190 ret = sdhci_runtime_resume_host(slot->host, 0);
191 if (ret)
192 return ret;
193 }
194
195 return 0;
196 }
197
sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip * chip)198 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
199 {
200 int ret;
201
202 ret = cqhci_suspend(chip->slots[0]->host->mmc);
203 if (ret)
204 return ret;
205
206 return sdhci_pci_runtime_suspend_host(chip);
207 }
208
sdhci_cqhci_runtime_resume(struct sdhci_pci_chip * chip)209 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
210 {
211 int ret;
212
213 ret = sdhci_pci_runtime_resume_host(chip);
214 if (ret)
215 return ret;
216
217 return cqhci_resume(chip->slots[0]->host->mmc);
218 }
219 #endif
220
sdhci_cqhci_irq(struct sdhci_host * host,u32 intmask)221 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
222 {
223 int cmd_error = 0;
224 int data_error = 0;
225
226 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
227 return intmask;
228
229 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
230
231 return 0;
232 }
233
sdhci_pci_dumpregs(struct mmc_host * mmc)234 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
235 {
236 sdhci_dumpregs(mmc_priv(mmc));
237 }
238
239 /*****************************************************************************\
240 * *
241 * Hardware specific quirk handling *
242 * *
243 \*****************************************************************************/
244
ricoh_probe(struct sdhci_pci_chip * chip)245 static int ricoh_probe(struct sdhci_pci_chip *chip)
246 {
247 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
248 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
249 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
250 return 0;
251 }
252
ricoh_mmc_probe_slot(struct sdhci_pci_slot * slot)253 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
254 {
255 u32 caps =
256 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
257 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
258 SDHCI_TIMEOUT_CLK_UNIT |
259 SDHCI_CAN_VDD_330 |
260 SDHCI_CAN_DO_HISPD |
261 SDHCI_CAN_DO_SDMA;
262 u32 caps1 = 0;
263
264 __sdhci_read_caps(slot->host, NULL, &caps, &caps1);
265 return 0;
266 }
267
268 #ifdef CONFIG_PM_SLEEP
ricoh_mmc_resume(struct sdhci_pci_chip * chip)269 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
270 {
271 /* Apply a delay to allow controller to settle */
272 /* Otherwise it becomes confused if card state changed
273 during suspend */
274 msleep(500);
275 return sdhci_pci_resume_host(chip);
276 }
277 #endif
278
279 static const struct sdhci_pci_fixes sdhci_ricoh = {
280 .probe = ricoh_probe,
281 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
282 SDHCI_QUIRK_FORCE_DMA |
283 SDHCI_QUIRK_CLOCK_BEFORE_RESET,
284 };
285
286 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
287 .probe_slot = ricoh_mmc_probe_slot,
288 #ifdef CONFIG_PM_SLEEP
289 .resume = ricoh_mmc_resume,
290 #endif
291 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
292 SDHCI_QUIRK_CLOCK_BEFORE_RESET |
293 SDHCI_QUIRK_NO_CARD_NO_RESET,
294 };
295
ene_714_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)296 static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
297 {
298 struct sdhci_host *host = mmc_priv(mmc);
299
300 sdhci_set_ios(mmc, ios);
301
302 /*
303 * Some (ENE) controllers misbehave on some ios operations,
304 * signalling timeout and CRC errors even on CMD0. Resetting
305 * it on each ios seems to solve the problem.
306 */
307 if (!(host->flags & SDHCI_DEVICE_DEAD))
308 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
309 }
310
ene_714_probe_slot(struct sdhci_pci_slot * slot)311 static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
312 {
313 slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
314 return 0;
315 }
316
317 static const struct sdhci_pci_fixes sdhci_ene_712 = {
318 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
319 SDHCI_QUIRK_BROKEN_DMA,
320 };
321
322 static const struct sdhci_pci_fixes sdhci_ene_714 = {
323 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
324 SDHCI_QUIRK_BROKEN_DMA,
325 .probe_slot = ene_714_probe_slot,
326 };
327
328 static const struct sdhci_pci_fixes sdhci_cafe = {
329 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
330 SDHCI_QUIRK_NO_BUSY_IRQ |
331 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
332 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
333 };
334
335 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
336 .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
337 };
338
mrst_hc_probe_slot(struct sdhci_pci_slot * slot)339 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
340 {
341 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
342 return 0;
343 }
344
345 /*
346 * ADMA operation is disabled for Moorestown platform due to
347 * hardware bugs.
348 */
mrst_hc_probe(struct sdhci_pci_chip * chip)349 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
350 {
351 /*
352 * slots number is fixed here for MRST as SDIO3/5 are never used and
353 * have hardware bugs.
354 */
355 chip->num_slots = 1;
356 return 0;
357 }
358
pch_hc_probe_slot(struct sdhci_pci_slot * slot)359 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
360 {
361 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
362 return 0;
363 }
364
mfd_emmc_probe_slot(struct sdhci_pci_slot * slot)365 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
366 {
367 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
368 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
369 return 0;
370 }
371
mfd_sdio_probe_slot(struct sdhci_pci_slot * slot)372 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
373 {
374 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
375 return 0;
376 }
377
378 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
379 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
380 .probe_slot = mrst_hc_probe_slot,
381 };
382
383 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
384 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
385 .probe = mrst_hc_probe,
386 };
387
388 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
389 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
390 .allow_runtime_pm = true,
391 .own_cd_for_runtime_pm = true,
392 };
393
394 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
395 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
396 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
397 .allow_runtime_pm = true,
398 .probe_slot = mfd_sdio_probe_slot,
399 };
400
401 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
402 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
403 .allow_runtime_pm = true,
404 .probe_slot = mfd_emmc_probe_slot,
405 };
406
407 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
408 .quirks = SDHCI_QUIRK_BROKEN_ADMA,
409 .probe_slot = pch_hc_probe_slot,
410 };
411
412 #ifdef CONFIG_X86
413
414 #define BYT_IOSF_SCCEP 0x63
415 #define BYT_IOSF_OCP_NETCTRL0 0x1078
416 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
417
byt_ocp_setting(struct pci_dev * pdev)418 static void byt_ocp_setting(struct pci_dev *pdev)
419 {
420 u32 val = 0;
421
422 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
423 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
424 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
425 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
426 return;
427
428 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
429 &val)) {
430 dev_err(&pdev->dev, "%s read error\n", __func__);
431 return;
432 }
433
434 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
435 return;
436
437 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
438
439 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
440 val)) {
441 dev_err(&pdev->dev, "%s write error\n", __func__);
442 return;
443 }
444
445 dev_dbg(&pdev->dev, "%s completed\n", __func__);
446 }
447
448 #else
449
byt_ocp_setting(struct pci_dev * pdev)450 static inline void byt_ocp_setting(struct pci_dev *pdev)
451 {
452 }
453
454 #endif
455
456 enum {
457 INTEL_DSM_FNS = 0,
458 INTEL_DSM_V18_SWITCH = 3,
459 INTEL_DSM_V33_SWITCH = 4,
460 INTEL_DSM_DRV_STRENGTH = 9,
461 INTEL_DSM_D3_RETUNE = 10,
462 };
463
464 struct intel_host {
465 u32 dsm_fns;
466 int drv_strength;
467 bool d3_retune;
468 bool rpm_retune_ok;
469 bool needs_pwr_off;
470 u32 glk_rx_ctrl1;
471 u32 glk_tun_val;
472 u32 active_ltr;
473 u32 idle_ltr;
474 };
475
476 static const guid_t intel_dsm_guid =
477 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
478 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
479
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)480 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
481 unsigned int fn, u32 *result)
482 {
483 union acpi_object *obj;
484 int err = 0;
485 size_t len;
486
487 obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
488 ACPI_TYPE_BUFFER);
489 if (!obj)
490 return -EOPNOTSUPP;
491
492 if (obj->buffer.length < 1) {
493 err = -EINVAL;
494 goto out;
495 }
496
497 len = min_t(size_t, obj->buffer.length, 4);
498
499 *result = 0;
500 memcpy(result, obj->buffer.pointer, len);
501 out:
502 ACPI_FREE(obj);
503
504 return err;
505 }
506
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)507 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
508 unsigned int fn, u32 *result)
509 {
510 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
511 return -EOPNOTSUPP;
512
513 return __intel_dsm(intel_host, dev, fn, result);
514 }
515
intel_dsm_init(struct intel_host * intel_host,struct device * dev,struct mmc_host * mmc)516 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
517 struct mmc_host *mmc)
518 {
519 int err;
520 u32 val;
521
522 intel_host->d3_retune = true;
523
524 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
525 if (err) {
526 pr_debug("%s: DSM not supported, error %d\n",
527 mmc_hostname(mmc), err);
528 return;
529 }
530
531 pr_debug("%s: DSM function mask %#x\n",
532 mmc_hostname(mmc), intel_host->dsm_fns);
533
534 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
535 intel_host->drv_strength = err ? 0 : val;
536
537 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
538 intel_host->d3_retune = err ? true : !!val;
539 }
540
sdhci_pci_int_hw_reset(struct sdhci_host * host)541 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
542 {
543 u8 reg;
544
545 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
546 reg |= 0x10;
547 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
548 /* For eMMC, minimum is 1us but give it 9us for good measure */
549 udelay(9);
550 reg &= ~0x10;
551 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
552 /* For eMMC, minimum is 200us but give it 300us for good measure */
553 usleep_range(300, 1000);
554 }
555
intel_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int host_drv,int card_drv,int * drv_type)556 static int intel_select_drive_strength(struct mmc_card *card,
557 unsigned int max_dtr, int host_drv,
558 int card_drv, int *drv_type)
559 {
560 struct sdhci_host *host = mmc_priv(card->host);
561 struct sdhci_pci_slot *slot = sdhci_priv(host);
562 struct intel_host *intel_host = sdhci_pci_priv(slot);
563
564 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
565 return 0;
566
567 return intel_host->drv_strength;
568 }
569
bxt_get_cd(struct mmc_host * mmc)570 static int bxt_get_cd(struct mmc_host *mmc)
571 {
572 int gpio_cd = mmc_gpio_get_cd(mmc);
573
574 if (!gpio_cd)
575 return 0;
576
577 return sdhci_get_cd_nogpio(mmc);
578 }
579
mrfld_get_cd(struct mmc_host * mmc)580 static int mrfld_get_cd(struct mmc_host *mmc)
581 {
582 return sdhci_get_cd_nogpio(mmc);
583 }
584
585 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
586 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
587
sdhci_intel_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)588 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
589 unsigned short vdd)
590 {
591 struct sdhci_pci_slot *slot = sdhci_priv(host);
592 struct intel_host *intel_host = sdhci_pci_priv(slot);
593 int cntr;
594 u8 reg;
595
596 /*
597 * Bus power may control card power, but a full reset still may not
598 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
599 * That might be needed to initialize correctly, if the card was left
600 * powered on previously.
601 */
602 if (intel_host->needs_pwr_off) {
603 intel_host->needs_pwr_off = false;
604 if (mode != MMC_POWER_OFF) {
605 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
606 usleep_range(10000, 12500);
607 }
608 }
609
610 sdhci_set_power(host, mode, vdd);
611
612 if (mode == MMC_POWER_OFF) {
613 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
614 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
615 usleep_range(15000, 17500);
616 return;
617 }
618
619 /*
620 * Bus power might not enable after D3 -> D0 transition due to the
621 * present state not yet having propagated. Retry for up to 2ms.
622 */
623 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
624 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
625 if (reg & SDHCI_POWER_ON)
626 break;
627 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
628 reg |= SDHCI_POWER_ON;
629 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
630 }
631 }
632
sdhci_intel_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)633 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
634 unsigned int timing)
635 {
636 /* Set UHS timing to SDR25 for High Speed mode */
637 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
638 timing = MMC_TIMING_UHS_SDR25;
639 sdhci_set_uhs_signaling(host, timing);
640 }
641
642 #define INTEL_HS400_ES_REG 0x78
643 #define INTEL_HS400_ES_BIT BIT(0)
644
intel_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)645 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
646 struct mmc_ios *ios)
647 {
648 struct sdhci_host *host = mmc_priv(mmc);
649 u32 val;
650
651 val = sdhci_readl(host, INTEL_HS400_ES_REG);
652 if (ios->enhanced_strobe)
653 val |= INTEL_HS400_ES_BIT;
654 else
655 val &= ~INTEL_HS400_ES_BIT;
656 sdhci_writel(host, val, INTEL_HS400_ES_REG);
657 }
658
intel_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)659 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
660 struct mmc_ios *ios)
661 {
662 struct device *dev = mmc_dev(mmc);
663 struct sdhci_host *host = mmc_priv(mmc);
664 struct sdhci_pci_slot *slot = sdhci_priv(host);
665 struct intel_host *intel_host = sdhci_pci_priv(slot);
666 unsigned int fn;
667 u32 result = 0;
668 int err;
669
670 err = sdhci_start_signal_voltage_switch(mmc, ios);
671 if (err)
672 return err;
673
674 switch (ios->signal_voltage) {
675 case MMC_SIGNAL_VOLTAGE_330:
676 fn = INTEL_DSM_V33_SWITCH;
677 break;
678 case MMC_SIGNAL_VOLTAGE_180:
679 fn = INTEL_DSM_V18_SWITCH;
680 break;
681 default:
682 return 0;
683 }
684
685 err = intel_dsm(intel_host, dev, fn, &result);
686 pr_debug("%s: %s DSM fn %u error %d result %u\n",
687 mmc_hostname(mmc), __func__, fn, err, result);
688
689 return 0;
690 }
691
692 static const struct sdhci_ops sdhci_intel_byt_ops = {
693 .set_clock = sdhci_set_clock,
694 .set_power = sdhci_intel_set_power,
695 .enable_dma = sdhci_pci_enable_dma,
696 .set_bus_width = sdhci_set_bus_width,
697 .reset = sdhci_reset,
698 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
699 .hw_reset = sdhci_pci_hw_reset,
700 };
701
702 static const struct sdhci_ops sdhci_intel_glk_ops = {
703 .set_clock = sdhci_set_clock,
704 .set_power = sdhci_intel_set_power,
705 .enable_dma = sdhci_pci_enable_dma,
706 .set_bus_width = sdhci_set_bus_width,
707 .reset = sdhci_and_cqhci_reset,
708 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
709 .hw_reset = sdhci_pci_hw_reset,
710 .irq = sdhci_cqhci_irq,
711 };
712
byt_read_dsm(struct sdhci_pci_slot * slot)713 static void byt_read_dsm(struct sdhci_pci_slot *slot)
714 {
715 struct intel_host *intel_host = sdhci_pci_priv(slot);
716 struct device *dev = &slot->chip->pdev->dev;
717 struct mmc_host *mmc = slot->host->mmc;
718
719 intel_dsm_init(intel_host, dev, mmc);
720 slot->chip->rpm_retune = intel_host->d3_retune;
721 }
722
intel_execute_tuning(struct mmc_host * mmc,u32 opcode)723 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
724 {
725 int err = sdhci_execute_tuning(mmc, opcode);
726 struct sdhci_host *host = mmc_priv(mmc);
727
728 if (err)
729 return err;
730
731 /*
732 * Tuning can leave the IP in an active state (Buffer Read Enable bit
733 * set) which prevents the entry to low power states (i.e. S0i3). Data
734 * reset will clear it.
735 */
736 sdhci_reset(host, SDHCI_RESET_DATA);
737
738 return 0;
739 }
740
741 #define INTEL_ACTIVELTR 0x804
742 #define INTEL_IDLELTR 0x808
743
744 #define INTEL_LTR_REQ BIT(15)
745 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
746 #define INTEL_LTR_SCALE_1US (2 << 10)
747 #define INTEL_LTR_SCALE_32US (3 << 10)
748 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
749
intel_cache_ltr(struct sdhci_pci_slot * slot)750 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
751 {
752 struct intel_host *intel_host = sdhci_pci_priv(slot);
753 struct sdhci_host *host = slot->host;
754
755 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
756 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
757 }
758
intel_ltr_set(struct device * dev,s32 val)759 static void intel_ltr_set(struct device *dev, s32 val)
760 {
761 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
762 struct sdhci_pci_slot *slot = chip->slots[0];
763 struct intel_host *intel_host = sdhci_pci_priv(slot);
764 struct sdhci_host *host = slot->host;
765 u32 ltr;
766
767 pm_runtime_get_sync(dev);
768
769 /*
770 * Program latency tolerance (LTR) accordingly what has been asked
771 * by the PM QoS layer or disable it in case we were passed
772 * negative value or PM_QOS_LATENCY_ANY.
773 */
774 ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
775
776 if (val == PM_QOS_LATENCY_ANY || val < 0) {
777 ltr &= ~INTEL_LTR_REQ;
778 } else {
779 ltr |= INTEL_LTR_REQ;
780 ltr &= ~INTEL_LTR_SCALE_MASK;
781 ltr &= ~INTEL_LTR_VALUE_MASK;
782
783 if (val > INTEL_LTR_VALUE_MASK) {
784 val >>= 5;
785 if (val > INTEL_LTR_VALUE_MASK)
786 val = INTEL_LTR_VALUE_MASK;
787 ltr |= INTEL_LTR_SCALE_32US | val;
788 } else {
789 ltr |= INTEL_LTR_SCALE_1US | val;
790 }
791 }
792
793 if (ltr == intel_host->active_ltr)
794 goto out;
795
796 writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
797 writel(ltr, host->ioaddr + INTEL_IDLELTR);
798
799 /* Cache the values into lpss structure */
800 intel_cache_ltr(slot);
801 out:
802 pm_runtime_put_autosuspend(dev);
803 }
804
intel_use_ltr(struct sdhci_pci_chip * chip)805 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
806 {
807 switch (chip->pdev->device) {
808 case PCI_DEVICE_ID_INTEL_BYT_EMMC:
809 case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
810 case PCI_DEVICE_ID_INTEL_BYT_SDIO:
811 case PCI_DEVICE_ID_INTEL_BYT_SD:
812 case PCI_DEVICE_ID_INTEL_BSW_EMMC:
813 case PCI_DEVICE_ID_INTEL_BSW_SDIO:
814 case PCI_DEVICE_ID_INTEL_BSW_SD:
815 return false;
816 default:
817 return true;
818 }
819 }
820
intel_ltr_expose(struct sdhci_pci_chip * chip)821 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
822 {
823 struct device *dev = &chip->pdev->dev;
824
825 if (!intel_use_ltr(chip))
826 return;
827
828 dev->power.set_latency_tolerance = intel_ltr_set;
829 dev_pm_qos_expose_latency_tolerance(dev);
830 }
831
intel_ltr_hide(struct sdhci_pci_chip * chip)832 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
833 {
834 struct device *dev = &chip->pdev->dev;
835
836 if (!intel_use_ltr(chip))
837 return;
838
839 dev_pm_qos_hide_latency_tolerance(dev);
840 dev->power.set_latency_tolerance = NULL;
841 }
842
byt_probe_slot(struct sdhci_pci_slot * slot)843 static void byt_probe_slot(struct sdhci_pci_slot *slot)
844 {
845 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
846 struct device *dev = &slot->chip->pdev->dev;
847 struct mmc_host *mmc = slot->host->mmc;
848
849 byt_read_dsm(slot);
850
851 byt_ocp_setting(slot->chip->pdev);
852
853 ops->execute_tuning = intel_execute_tuning;
854 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
855
856 device_property_read_u32(dev, "max-frequency", &mmc->f_max);
857
858 if (!mmc->slotno) {
859 slot->chip->slots[mmc->slotno] = slot;
860 intel_ltr_expose(slot->chip);
861 }
862 }
863
byt_add_debugfs(struct sdhci_pci_slot * slot)864 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
865 {
866 struct intel_host *intel_host = sdhci_pci_priv(slot);
867 struct mmc_host *mmc = slot->host->mmc;
868 struct dentry *dir = mmc->debugfs_root;
869
870 if (!intel_use_ltr(slot->chip))
871 return;
872
873 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
874 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
875
876 intel_cache_ltr(slot);
877 }
878
byt_add_host(struct sdhci_pci_slot * slot)879 static int byt_add_host(struct sdhci_pci_slot *slot)
880 {
881 int ret = sdhci_add_host(slot->host);
882
883 if (!ret)
884 byt_add_debugfs(slot);
885 return ret;
886 }
887
byt_remove_slot(struct sdhci_pci_slot * slot,int dead)888 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
889 {
890 struct mmc_host *mmc = slot->host->mmc;
891
892 if (!mmc->slotno)
893 intel_ltr_hide(slot->chip);
894 }
895
byt_emmc_probe_slot(struct sdhci_pci_slot * slot)896 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
897 {
898 byt_probe_slot(slot);
899 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
900 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
901 MMC_CAP_CMD_DURING_TFR |
902 MMC_CAP_WAIT_WHILE_BUSY;
903 slot->hw_reset = sdhci_pci_int_hw_reset;
904 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
905 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
906 slot->host->mmc_host_ops.select_drive_strength =
907 intel_select_drive_strength;
908 return 0;
909 }
910
glk_broken_cqhci(struct sdhci_pci_slot * slot)911 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
912 {
913 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
914 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
915 dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
916 dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
917 }
918
jsl_broken_hs400es(struct sdhci_pci_slot * slot)919 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
920 {
921 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
922 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
923 }
924
glk_emmc_probe_slot(struct sdhci_pci_slot * slot)925 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
926 {
927 int ret = byt_emmc_probe_slot(slot);
928
929 if (!glk_broken_cqhci(slot))
930 slot->host->mmc->caps2 |= MMC_CAP2_CQE;
931
932 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
933 if (!jsl_broken_hs400es(slot)) {
934 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
935 slot->host->mmc_host_ops.hs400_enhanced_strobe =
936 intel_hs400_enhanced_strobe;
937 }
938 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
939 }
940
941 return ret;
942 }
943
944 static const struct cqhci_host_ops glk_cqhci_ops = {
945 .enable = sdhci_cqe_enable,
946 .disable = sdhci_cqe_disable,
947 .dumpregs = sdhci_pci_dumpregs,
948 };
949
glk_emmc_add_host(struct sdhci_pci_slot * slot)950 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
951 {
952 struct device *dev = &slot->chip->pdev->dev;
953 struct sdhci_host *host = slot->host;
954 struct cqhci_host *cq_host;
955 bool dma64;
956 int ret;
957
958 ret = sdhci_setup_host(host);
959 if (ret)
960 return ret;
961
962 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
963 if (!cq_host) {
964 ret = -ENOMEM;
965 goto cleanup;
966 }
967
968 cq_host->mmio = host->ioaddr + 0x200;
969 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
970 cq_host->ops = &glk_cqhci_ops;
971
972 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
973 if (dma64)
974 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
975
976 ret = cqhci_init(cq_host, host->mmc, dma64);
977 if (ret)
978 goto cleanup;
979
980 ret = __sdhci_add_host(host);
981 if (ret)
982 goto cleanup;
983
984 byt_add_debugfs(slot);
985
986 return 0;
987
988 cleanup:
989 sdhci_cleanup_host(host);
990 return ret;
991 }
992
993 #ifdef CONFIG_PM
994 #define GLK_RX_CTRL1 0x834
995 #define GLK_TUN_VAL 0x840
996 #define GLK_PATH_PLL GENMASK(13, 8)
997 #define GLK_DLY GENMASK(6, 0)
998 /* Workaround firmware failing to restore the tuning value */
glk_rpm_retune_wa(struct sdhci_pci_chip * chip,bool susp)999 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
1000 {
1001 struct sdhci_pci_slot *slot = chip->slots[0];
1002 struct intel_host *intel_host = sdhci_pci_priv(slot);
1003 struct sdhci_host *host = slot->host;
1004 u32 glk_rx_ctrl1;
1005 u32 glk_tun_val;
1006 u32 dly;
1007
1008 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
1009 return;
1010
1011 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
1012 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
1013
1014 if (susp) {
1015 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
1016 intel_host->glk_tun_val = glk_tun_val;
1017 return;
1018 }
1019
1020 if (!intel_host->glk_tun_val)
1021 return;
1022
1023 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
1024 intel_host->rpm_retune_ok = true;
1025 return;
1026 }
1027
1028 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
1029 (intel_host->glk_tun_val << 1));
1030 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1031 return;
1032
1033 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1034 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1035
1036 intel_host->rpm_retune_ok = true;
1037 chip->rpm_retune = true;
1038 mmc_retune_needed(host->mmc);
1039 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1040 }
1041
glk_rpm_retune_chk(struct sdhci_pci_chip * chip,bool susp)1042 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1043 {
1044 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1045 !chip->rpm_retune)
1046 glk_rpm_retune_wa(chip, susp);
1047 }
1048
glk_runtime_suspend(struct sdhci_pci_chip * chip)1049 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1050 {
1051 glk_rpm_retune_chk(chip, true);
1052
1053 return sdhci_cqhci_runtime_suspend(chip);
1054 }
1055
glk_runtime_resume(struct sdhci_pci_chip * chip)1056 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1057 {
1058 glk_rpm_retune_chk(chip, false);
1059
1060 return sdhci_cqhci_runtime_resume(chip);
1061 }
1062 #endif
1063
1064 #ifdef CONFIG_ACPI
ni_set_max_freq(struct sdhci_pci_slot * slot)1065 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1066 {
1067 acpi_status status;
1068 unsigned long long max_freq;
1069
1070 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1071 "MXFQ", NULL, &max_freq);
1072 if (ACPI_FAILURE(status)) {
1073 dev_err(&slot->chip->pdev->dev,
1074 "MXFQ not found in acpi table\n");
1075 return -EINVAL;
1076 }
1077
1078 slot->host->mmc->f_max = max_freq * 1000000;
1079
1080 return 0;
1081 }
1082 #else
ni_set_max_freq(struct sdhci_pci_slot * slot)1083 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1084 {
1085 return 0;
1086 }
1087 #endif
1088
ni_byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1089 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1090 {
1091 int err;
1092
1093 byt_probe_slot(slot);
1094
1095 err = ni_set_max_freq(slot);
1096 if (err)
1097 return err;
1098
1099 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1100 MMC_CAP_WAIT_WHILE_BUSY;
1101 return 0;
1102 }
1103
byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1104 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1105 {
1106 byt_probe_slot(slot);
1107 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1108 MMC_CAP_WAIT_WHILE_BUSY;
1109 return 0;
1110 }
1111
byt_needs_pwr_off(struct sdhci_pci_slot * slot)1112 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1113 {
1114 struct intel_host *intel_host = sdhci_pci_priv(slot);
1115 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1116
1117 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
1118 }
1119
byt_sd_probe_slot(struct sdhci_pci_slot * slot)1120 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1121 {
1122 byt_probe_slot(slot);
1123 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1124 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1125 slot->cd_idx = 0;
1126 slot->cd_override_level = true;
1127 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1128 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1129 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1130 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1131 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1132
1133 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1134 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1135 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1136
1137 byt_needs_pwr_off(slot);
1138
1139 return 0;
1140 }
1141
1142 #ifdef CONFIG_PM_SLEEP
1143
byt_resume(struct sdhci_pci_chip * chip)1144 static int byt_resume(struct sdhci_pci_chip *chip)
1145 {
1146 byt_ocp_setting(chip->pdev);
1147
1148 return sdhci_pci_resume_host(chip);
1149 }
1150
1151 #endif
1152
1153 #ifdef CONFIG_PM
1154
byt_runtime_resume(struct sdhci_pci_chip * chip)1155 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1156 {
1157 byt_ocp_setting(chip->pdev);
1158
1159 return sdhci_pci_runtime_resume_host(chip);
1160 }
1161
1162 #endif
1163
1164 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1165 #ifdef CONFIG_PM_SLEEP
1166 .resume = byt_resume,
1167 #endif
1168 #ifdef CONFIG_PM
1169 .runtime_resume = byt_runtime_resume,
1170 #endif
1171 .allow_runtime_pm = true,
1172 .probe_slot = byt_emmc_probe_slot,
1173 .add_host = byt_add_host,
1174 .remove_slot = byt_remove_slot,
1175 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1176 SDHCI_QUIRK_NO_LED,
1177 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1178 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1179 SDHCI_QUIRK2_STOP_WITH_TC,
1180 .ops = &sdhci_intel_byt_ops,
1181 .priv_size = sizeof(struct intel_host),
1182 };
1183
1184 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1185 .allow_runtime_pm = true,
1186 .probe_slot = glk_emmc_probe_slot,
1187 .add_host = glk_emmc_add_host,
1188 .remove_slot = byt_remove_slot,
1189 #ifdef CONFIG_PM_SLEEP
1190 .suspend = sdhci_cqhci_suspend,
1191 .resume = sdhci_cqhci_resume,
1192 #endif
1193 #ifdef CONFIG_PM
1194 .runtime_suspend = glk_runtime_suspend,
1195 .runtime_resume = glk_runtime_resume,
1196 #endif
1197 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1198 SDHCI_QUIRK_NO_LED,
1199 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1200 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1201 SDHCI_QUIRK2_STOP_WITH_TC,
1202 .ops = &sdhci_intel_glk_ops,
1203 .priv_size = sizeof(struct intel_host),
1204 };
1205
1206 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1207 #ifdef CONFIG_PM_SLEEP
1208 .resume = byt_resume,
1209 #endif
1210 #ifdef CONFIG_PM
1211 .runtime_resume = byt_runtime_resume,
1212 #endif
1213 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1214 SDHCI_QUIRK_NO_LED,
1215 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1216 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1217 .allow_runtime_pm = true,
1218 .probe_slot = ni_byt_sdio_probe_slot,
1219 .add_host = byt_add_host,
1220 .remove_slot = byt_remove_slot,
1221 .ops = &sdhci_intel_byt_ops,
1222 .priv_size = sizeof(struct intel_host),
1223 };
1224
1225 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1226 #ifdef CONFIG_PM_SLEEP
1227 .resume = byt_resume,
1228 #endif
1229 #ifdef CONFIG_PM
1230 .runtime_resume = byt_runtime_resume,
1231 #endif
1232 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1233 SDHCI_QUIRK_NO_LED,
1234 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1235 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1236 .allow_runtime_pm = true,
1237 .probe_slot = byt_sdio_probe_slot,
1238 .add_host = byt_add_host,
1239 .remove_slot = byt_remove_slot,
1240 .ops = &sdhci_intel_byt_ops,
1241 .priv_size = sizeof(struct intel_host),
1242 };
1243
1244 /* DMI quirks for devices with missing or broken CD GPIO info */
1245 static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = {
1246 .dev_id = "0000:00:12.0",
1247 .table = {
1248 GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH),
1249 { }
1250 },
1251 };
1252
1253 static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = {
1254 {
1255 /* Vexia Edu Atla 10 tablet 9V version */
1256 .matches = {
1257 DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
1258 DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
1259 /* Above strings are too generic, also match on BIOS date */
1260 DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"),
1261 },
1262 .driver_data = (void *)&vexia_edu_atla10_cd_gpios,
1263 },
1264 { }
1265 };
1266
1267 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1268 #ifdef CONFIG_PM_SLEEP
1269 .resume = byt_resume,
1270 #endif
1271 #ifdef CONFIG_PM
1272 .runtime_resume = byt_runtime_resume,
1273 #endif
1274 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1275 SDHCI_QUIRK_NO_LED,
1276 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1277 SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1278 SDHCI_QUIRK2_STOP_WITH_TC,
1279 .allow_runtime_pm = true,
1280 .own_cd_for_runtime_pm = true,
1281 .probe_slot = byt_sd_probe_slot,
1282 .add_host = byt_add_host,
1283 .remove_slot = byt_remove_slot,
1284 .ops = &sdhci_intel_byt_ops,
1285 .cd_gpio_override = sdhci_intel_byt_cd_gpio_override,
1286 .priv_size = sizeof(struct intel_host),
1287 };
1288
1289 /* Define Host controllers for Intel Merrifield platform */
1290 #define INTEL_MRFLD_EMMC_0 0
1291 #define INTEL_MRFLD_EMMC_1 1
1292 #define INTEL_MRFLD_SD 2
1293 #define INTEL_MRFLD_SDIO 3
1294
1295 #ifdef CONFIG_ACPI
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1296 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1297 {
1298 struct acpi_device *device;
1299
1300 device = ACPI_COMPANION(&slot->chip->pdev->dev);
1301 if (device)
1302 acpi_device_fix_up_power_extended(device);
1303 }
1304 #else
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1305 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1306 #endif
1307
intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot * slot)1308 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1309 {
1310 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1311
1312 switch (func) {
1313 case INTEL_MRFLD_EMMC_0:
1314 case INTEL_MRFLD_EMMC_1:
1315 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1316 MMC_CAP_8_BIT_DATA |
1317 MMC_CAP_1_8V_DDR;
1318 break;
1319 case INTEL_MRFLD_SD:
1320 slot->cd_idx = 0;
1321 slot->cd_override_level = true;
1322 /*
1323 * There are two PCB designs of SD card slot with the opposite
1324 * card detection sense. Quirk this out by ignoring GPIO state
1325 * completely in the custom ->get_cd() callback.
1326 */
1327 slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
1328 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1329 break;
1330 case INTEL_MRFLD_SDIO:
1331 /* Advertise 2.0v for compatibility with the SDIO card's OCR */
1332 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1333 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1334 MMC_CAP_POWER_OFF_CARD;
1335 break;
1336 default:
1337 return -ENODEV;
1338 }
1339
1340 intel_mrfld_mmc_fix_up_power_slot(slot);
1341 return 0;
1342 }
1343
1344 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1345 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1346 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
1347 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1348 .allow_runtime_pm = true,
1349 .probe_slot = intel_mrfld_mmc_probe_slot,
1350 };
1351
1352 #define JMB388_SAMPLE_COUNT 5
1353
jmicron_jmb388_get_ro(struct mmc_host * mmc)1354 static int jmicron_jmb388_get_ro(struct mmc_host *mmc)
1355 {
1356 int i, ro_count;
1357
1358 ro_count = 0;
1359 for (i = 0; i < JMB388_SAMPLE_COUNT; i++) {
1360 if (sdhci_get_ro(mmc) > 0) {
1361 if (++ro_count > JMB388_SAMPLE_COUNT / 2)
1362 return 1;
1363 }
1364 msleep(30);
1365 }
1366 return 0;
1367 }
1368
jmicron_pmos(struct sdhci_pci_chip * chip,int on)1369 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1370 {
1371 u8 scratch;
1372 int ret;
1373
1374 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1375 if (ret)
1376 goto fail;
1377
1378 /*
1379 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1380 * [bit 1:2] and enable over current debouncing [bit 6].
1381 */
1382 if (on)
1383 scratch |= 0x47;
1384 else
1385 scratch &= ~0x47;
1386
1387 ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
1388
1389 fail:
1390 return pcibios_err_to_errno(ret);
1391 }
1392
jmicron_probe(struct sdhci_pci_chip * chip)1393 static int jmicron_probe(struct sdhci_pci_chip *chip)
1394 {
1395 int ret;
1396 u16 mmcdev = 0;
1397
1398 if (chip->pdev->revision == 0) {
1399 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1400 SDHCI_QUIRK_32BIT_DMA_SIZE |
1401 SDHCI_QUIRK_32BIT_ADMA_SIZE |
1402 SDHCI_QUIRK_RESET_AFTER_REQUEST |
1403 SDHCI_QUIRK_BROKEN_SMALL_PIO;
1404 }
1405
1406 /*
1407 * JMicron chips can have two interfaces to the same hardware
1408 * in order to work around limitations in Microsoft's driver.
1409 * We need to make sure we only bind to one of them.
1410 *
1411 * This code assumes two things:
1412 *
1413 * 1. The PCI code adds subfunctions in order.
1414 *
1415 * 2. The MMC interface has a lower subfunction number
1416 * than the SD interface.
1417 */
1418 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1419 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1420 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1421 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1422
1423 if (mmcdev) {
1424 struct pci_dev *sd_dev;
1425
1426 sd_dev = NULL;
1427 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1428 mmcdev, sd_dev)) != NULL) {
1429 if ((PCI_SLOT(chip->pdev->devfn) ==
1430 PCI_SLOT(sd_dev->devfn)) &&
1431 (chip->pdev->bus == sd_dev->bus))
1432 break;
1433 }
1434
1435 if (sd_dev) {
1436 pci_dev_put(sd_dev);
1437 dev_info(&chip->pdev->dev, "Refusing to bind to "
1438 "secondary interface.\n");
1439 return -ENODEV;
1440 }
1441 }
1442
1443 /*
1444 * JMicron chips need a bit of a nudge to enable the power
1445 * output pins.
1446 */
1447 ret = jmicron_pmos(chip, 1);
1448 if (ret) {
1449 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1450 return ret;
1451 }
1452
1453 return 0;
1454 }
1455
jmicron_enable_mmc(struct sdhci_host * host,int on)1456 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1457 {
1458 u8 scratch;
1459
1460 scratch = readb(host->ioaddr + 0xC0);
1461
1462 if (on)
1463 scratch |= 0x01;
1464 else
1465 scratch &= ~0x01;
1466
1467 writeb(scratch, host->ioaddr + 0xC0);
1468 }
1469
jmicron_probe_slot(struct sdhci_pci_slot * slot)1470 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1471 {
1472 if (slot->chip->pdev->revision == 0) {
1473 u16 version;
1474
1475 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1476 version = (version & SDHCI_VENDOR_VER_MASK) >>
1477 SDHCI_VENDOR_VER_SHIFT;
1478
1479 /*
1480 * Older versions of the chip have lots of nasty glitches
1481 * in the ADMA engine. It's best just to avoid it
1482 * completely.
1483 */
1484 if (version < 0xAC)
1485 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1486 }
1487
1488 /* JM388 MMC doesn't support 1.8V while SD supports it */
1489 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1490 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1491 MMC_VDD_29_30 | MMC_VDD_30_31 |
1492 MMC_VDD_165_195; /* allow 1.8V */
1493 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1494 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1495 }
1496
1497 /*
1498 * The secondary interface requires a bit set to get the
1499 * interrupts.
1500 */
1501 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1502 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1503 jmicron_enable_mmc(slot->host, 1);
1504
1505 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1506
1507 /* Handle unstable RO-detection on JM388 chips */
1508 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1509 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1510 slot->host->mmc_host_ops.get_ro = jmicron_jmb388_get_ro;
1511
1512 return 0;
1513 }
1514
jmicron_remove_slot(struct sdhci_pci_slot * slot,int dead)1515 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1516 {
1517 if (dead)
1518 return;
1519
1520 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1521 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1522 jmicron_enable_mmc(slot->host, 0);
1523 }
1524
1525 #ifdef CONFIG_PM_SLEEP
jmicron_suspend(struct sdhci_pci_chip * chip)1526 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1527 {
1528 int i, ret;
1529
1530 ret = sdhci_pci_suspend_host(chip);
1531 if (ret)
1532 return ret;
1533
1534 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1535 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1536 for (i = 0; i < chip->num_slots; i++)
1537 jmicron_enable_mmc(chip->slots[i]->host, 0);
1538 }
1539
1540 return 0;
1541 }
1542
jmicron_resume(struct sdhci_pci_chip * chip)1543 static int jmicron_resume(struct sdhci_pci_chip *chip)
1544 {
1545 int ret, i;
1546
1547 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1548 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1549 for (i = 0; i < chip->num_slots; i++)
1550 jmicron_enable_mmc(chip->slots[i]->host, 1);
1551 }
1552
1553 ret = jmicron_pmos(chip, 1);
1554 if (ret) {
1555 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1556 return ret;
1557 }
1558
1559 return sdhci_pci_resume_host(chip);
1560 }
1561 #endif
1562
1563 static const struct sdhci_pci_fixes sdhci_jmicron = {
1564 .probe = jmicron_probe,
1565
1566 .probe_slot = jmicron_probe_slot,
1567 .remove_slot = jmicron_remove_slot,
1568
1569 #ifdef CONFIG_PM_SLEEP
1570 .suspend = jmicron_suspend,
1571 .resume = jmicron_resume,
1572 #endif
1573 };
1574
1575 /* SysKonnect CardBus2SDIO extra registers */
1576 #define SYSKT_CTRL 0x200
1577 #define SYSKT_RDFIFO_STAT 0x204
1578 #define SYSKT_WRFIFO_STAT 0x208
1579 #define SYSKT_POWER_DATA 0x20c
1580 #define SYSKT_POWER_330 0xef
1581 #define SYSKT_POWER_300 0xf8
1582 #define SYSKT_POWER_184 0xcc
1583 #define SYSKT_POWER_CMD 0x20d
1584 #define SYSKT_POWER_START (1 << 7)
1585 #define SYSKT_POWER_STATUS 0x20e
1586 #define SYSKT_POWER_STATUS_OK (1 << 0)
1587 #define SYSKT_BOARD_REV 0x210
1588 #define SYSKT_CHIP_REV 0x211
1589 #define SYSKT_CONF_DATA 0x212
1590 #define SYSKT_CONF_DATA_1V8 (1 << 2)
1591 #define SYSKT_CONF_DATA_2V5 (1 << 1)
1592 #define SYSKT_CONF_DATA_3V3 (1 << 0)
1593
syskt_probe(struct sdhci_pci_chip * chip)1594 static int syskt_probe(struct sdhci_pci_chip *chip)
1595 {
1596 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1597 chip->pdev->class &= ~0x0000FF;
1598 chip->pdev->class |= PCI_SDHCI_IFDMA;
1599 }
1600 return 0;
1601 }
1602
syskt_probe_slot(struct sdhci_pci_slot * slot)1603 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1604 {
1605 int tm, ps;
1606
1607 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1608 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1609 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1610 "board rev %d.%d, chip rev %d.%d\n",
1611 board_rev >> 4, board_rev & 0xf,
1612 chip_rev >> 4, chip_rev & 0xf);
1613 if (chip_rev >= 0x20)
1614 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1615
1616 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1617 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1618 udelay(50);
1619 tm = 10; /* Wait max 1 ms */
1620 do {
1621 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1622 if (ps & SYSKT_POWER_STATUS_OK)
1623 break;
1624 udelay(100);
1625 } while (--tm);
1626 if (!tm) {
1627 dev_err(&slot->chip->pdev->dev,
1628 "power regulator never stabilized");
1629 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1630 return -ENODEV;
1631 }
1632
1633 return 0;
1634 }
1635
1636 static const struct sdhci_pci_fixes sdhci_syskt = {
1637 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1638 .probe = syskt_probe,
1639 .probe_slot = syskt_probe_slot,
1640 };
1641
via_probe(struct sdhci_pci_chip * chip)1642 static int via_probe(struct sdhci_pci_chip *chip)
1643 {
1644 if (chip->pdev->revision == 0x10)
1645 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1646
1647 return 0;
1648 }
1649
1650 static const struct sdhci_pci_fixes sdhci_via = {
1651 .probe = via_probe,
1652 };
1653
rtsx_probe_slot(struct sdhci_pci_slot * slot)1654 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1655 {
1656 slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1657 return 0;
1658 }
1659
1660 static const struct sdhci_pci_fixes sdhci_rtsx = {
1661 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1662 SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1663 SDHCI_QUIRK2_BROKEN_DDR50,
1664 .probe_slot = rtsx_probe_slot,
1665 };
1666
1667 /*AMD chipset generation*/
1668 enum amd_chipset_gen {
1669 AMD_CHIPSET_BEFORE_ML,
1670 AMD_CHIPSET_CZ,
1671 AMD_CHIPSET_NL,
1672 AMD_CHIPSET_UNKNOWN,
1673 };
1674
1675 /* AMD registers */
1676 #define AMD_SD_AUTO_PATTERN 0xB8
1677 #define AMD_MSLEEP_DURATION 4
1678 #define AMD_SD_MISC_CONTROL 0xD0
1679 #define AMD_MAX_TUNE_VALUE 0x0B
1680 #define AMD_AUTO_TUNE_SEL 0x10800
1681 #define AMD_FIFO_PTR 0x30
1682 #define AMD_BIT_MASK 0x1F
1683
amd_tuning_reset(struct sdhci_host * host)1684 static void amd_tuning_reset(struct sdhci_host *host)
1685 {
1686 unsigned int val;
1687
1688 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1689 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1690 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1691
1692 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1693 val &= ~SDHCI_CTRL_EXEC_TUNING;
1694 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1695 }
1696
amd_config_tuning_phase(struct pci_dev * pdev,u8 phase)1697 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1698 {
1699 unsigned int val;
1700
1701 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1702 val &= ~AMD_BIT_MASK;
1703 val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1704 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1705 }
1706
amd_enable_manual_tuning(struct pci_dev * pdev)1707 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1708 {
1709 unsigned int val;
1710
1711 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1712 val |= AMD_FIFO_PTR;
1713 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1714 }
1715
amd_execute_tuning_hs200(struct sdhci_host * host,u32 opcode)1716 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1717 {
1718 struct sdhci_pci_slot *slot = sdhci_priv(host);
1719 struct pci_dev *pdev = slot->chip->pdev;
1720 u8 valid_win = 0;
1721 u8 valid_win_max = 0;
1722 u8 valid_win_end = 0;
1723 u8 ctrl, tune_around;
1724
1725 amd_tuning_reset(host);
1726
1727 for (tune_around = 0; tune_around < 12; tune_around++) {
1728 amd_config_tuning_phase(pdev, tune_around);
1729
1730 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1731 valid_win = 0;
1732 msleep(AMD_MSLEEP_DURATION);
1733 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1734 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1735 } else if (++valid_win > valid_win_max) {
1736 valid_win_max = valid_win;
1737 valid_win_end = tune_around;
1738 }
1739 }
1740
1741 if (!valid_win_max) {
1742 dev_err(&pdev->dev, "no tuning point found\n");
1743 return -EIO;
1744 }
1745
1746 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1747
1748 amd_enable_manual_tuning(pdev);
1749
1750 host->mmc->retune_period = 0;
1751
1752 return 0;
1753 }
1754
amd_execute_tuning(struct mmc_host * mmc,u32 opcode)1755 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1756 {
1757 struct sdhci_host *host = mmc_priv(mmc);
1758
1759 /* AMD requires custom HS200 tuning */
1760 if (host->timing == MMC_TIMING_MMC_HS200)
1761 return amd_execute_tuning_hs200(host, opcode);
1762
1763 /* Otherwise perform standard SDHCI tuning */
1764 return sdhci_execute_tuning(mmc, opcode);
1765 }
1766
amd_probe_slot(struct sdhci_pci_slot * slot)1767 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1768 {
1769 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1770
1771 ops->execute_tuning = amd_execute_tuning;
1772
1773 return 0;
1774 }
1775
amd_probe(struct sdhci_pci_chip * chip)1776 static int amd_probe(struct sdhci_pci_chip *chip)
1777 {
1778 struct pci_dev *smbus_dev;
1779 enum amd_chipset_gen gen;
1780
1781 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1782 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1783 if (smbus_dev) {
1784 gen = AMD_CHIPSET_BEFORE_ML;
1785 } else {
1786 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1787 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1788 if (smbus_dev) {
1789 if (smbus_dev->revision < 0x51)
1790 gen = AMD_CHIPSET_CZ;
1791 else
1792 gen = AMD_CHIPSET_NL;
1793 } else {
1794 gen = AMD_CHIPSET_UNKNOWN;
1795 }
1796 }
1797
1798 pci_dev_put(smbus_dev);
1799
1800 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1801 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1802
1803 return 0;
1804 }
1805
sdhci_read_present_state(struct sdhci_host * host)1806 static u32 sdhci_read_present_state(struct sdhci_host *host)
1807 {
1808 return sdhci_readl(host, SDHCI_PRESENT_STATE);
1809 }
1810
amd_sdhci_reset(struct sdhci_host * host,u8 mask)1811 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1812 {
1813 struct sdhci_pci_slot *slot = sdhci_priv(host);
1814 struct pci_dev *pdev = slot->chip->pdev;
1815 u32 present_state;
1816
1817 /*
1818 * SDHC 0x7906 requires a hard reset to clear all internal state.
1819 * Otherwise it can get into a bad state where the DATA lines are always
1820 * read as zeros.
1821 */
1822 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1823 pci_clear_master(pdev);
1824
1825 pci_save_state(pdev);
1826
1827 pci_set_power_state(pdev, PCI_D3cold);
1828 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1829 pdev->current_state);
1830 pci_set_power_state(pdev, PCI_D0);
1831
1832 pci_restore_state(pdev);
1833
1834 /*
1835 * SDHCI_RESET_ALL says the card detect logic should not be
1836 * reset, but since we need to reset the entire controller
1837 * we should wait until the card detect logic has stabilized.
1838 *
1839 * This normally takes about 40ms.
1840 */
1841 readx_poll_timeout(
1842 sdhci_read_present_state,
1843 host,
1844 present_state,
1845 present_state & SDHCI_CD_STABLE,
1846 10000,
1847 100000
1848 );
1849 }
1850
1851 return sdhci_reset(host, mask);
1852 }
1853
1854 static const struct sdhci_ops amd_sdhci_pci_ops = {
1855 .set_clock = sdhci_set_clock,
1856 .enable_dma = sdhci_pci_enable_dma,
1857 .set_bus_width = sdhci_set_bus_width,
1858 .reset = amd_sdhci_reset,
1859 .set_uhs_signaling = sdhci_set_uhs_signaling,
1860 };
1861
1862 static const struct sdhci_pci_fixes sdhci_amd = {
1863 .probe = amd_probe,
1864 .ops = &amd_sdhci_pci_ops,
1865 .probe_slot = amd_probe_slot,
1866 };
1867
1868 static const struct pci_device_id pci_ids[] = {
1869 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
1870 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
1871 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1872 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1873 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
1874 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1875 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
1876 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1877 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1878 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
1879 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1880 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
1881 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1882 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1883 SDHCI_PCI_DEVICE(VIA, 95D0, via),
1884 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1885 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
1886 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
1887 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
1888 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
1889 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
1890 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1891 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1892 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1893 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1894 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1895 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1896 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
1897 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1898 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
1899 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
1900 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1901 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
1902 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
1903 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
1904 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1905 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1906 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1907 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1908 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1909 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1910 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
1911 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
1912 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
1913 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
1914 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
1915 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
1916 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
1917 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
1918 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1919 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1920 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
1921 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
1922 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
1923 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
1924 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
1925 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
1926 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
1927 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
1928 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
1929 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
1930 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
1931 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
1932 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc),
1933 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
1934 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
1935 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
1936 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
1937 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
1938 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
1939 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
1940 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
1941 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
1942 SDHCI_PCI_DEVICE(O2, 8120, o2),
1943 SDHCI_PCI_DEVICE(O2, 8220, o2),
1944 SDHCI_PCI_DEVICE(O2, 8221, o2),
1945 SDHCI_PCI_DEVICE(O2, 8320, o2),
1946 SDHCI_PCI_DEVICE(O2, 8321, o2),
1947 SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
1948 SDHCI_PCI_DEVICE(O2, SDS0, o2),
1949 SDHCI_PCI_DEVICE(O2, SDS1, o2),
1950 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1951 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1952 SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
1953 SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
1954 SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
1955 SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
1956 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1957 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1958 SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1959 SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1960 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1961 SDHCI_PCI_DEVICE(GLI, 9767, gl9767),
1962 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1963 /* Generic SD host controller */
1964 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1965 { /* end: all zeroes */ },
1966 };
1967
1968 MODULE_DEVICE_TABLE(pci, pci_ids);
1969
1970 /*****************************************************************************\
1971 * *
1972 * SDHCI core callbacks *
1973 * *
1974 \*****************************************************************************/
1975
sdhci_pci_enable_dma(struct sdhci_host * host)1976 int sdhci_pci_enable_dma(struct sdhci_host *host)
1977 {
1978 struct sdhci_pci_slot *slot;
1979 struct pci_dev *pdev;
1980
1981 slot = sdhci_priv(host);
1982 pdev = slot->chip->pdev;
1983
1984 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1985 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1986 (host->flags & SDHCI_USE_SDMA)) {
1987 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1988 "doesn't fully claim to support it.\n");
1989 }
1990
1991 pci_set_master(pdev);
1992
1993 return 0;
1994 }
1995
sdhci_pci_hw_reset(struct sdhci_host * host)1996 static void sdhci_pci_hw_reset(struct sdhci_host *host)
1997 {
1998 struct sdhci_pci_slot *slot = sdhci_priv(host);
1999
2000 if (slot->hw_reset)
2001 slot->hw_reset(host);
2002 }
2003
2004 static const struct sdhci_ops sdhci_pci_ops = {
2005 .set_clock = sdhci_set_clock,
2006 .enable_dma = sdhci_pci_enable_dma,
2007 .set_bus_width = sdhci_set_bus_width,
2008 .reset = sdhci_reset,
2009 .set_uhs_signaling = sdhci_set_uhs_signaling,
2010 .hw_reset = sdhci_pci_hw_reset,
2011 };
2012
2013 /*****************************************************************************\
2014 * *
2015 * Suspend/resume *
2016 * *
2017 \*****************************************************************************/
2018
2019 #ifdef CONFIG_PM_SLEEP
sdhci_pci_suspend(struct device * dev)2020 static int sdhci_pci_suspend(struct device *dev)
2021 {
2022 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2023
2024 if (!chip)
2025 return 0;
2026
2027 if (chip->fixes && chip->fixes->suspend)
2028 return chip->fixes->suspend(chip);
2029
2030 return sdhci_pci_suspend_host(chip);
2031 }
2032
sdhci_pci_resume(struct device * dev)2033 static int sdhci_pci_resume(struct device *dev)
2034 {
2035 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2036
2037 if (!chip)
2038 return 0;
2039
2040 if (chip->fixes && chip->fixes->resume)
2041 return chip->fixes->resume(chip);
2042
2043 return sdhci_pci_resume_host(chip);
2044 }
2045 #endif
2046
2047 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend(struct device * dev)2048 static int sdhci_pci_runtime_suspend(struct device *dev)
2049 {
2050 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2051
2052 if (!chip)
2053 return 0;
2054
2055 if (chip->fixes && chip->fixes->runtime_suspend)
2056 return chip->fixes->runtime_suspend(chip);
2057
2058 return sdhci_pci_runtime_suspend_host(chip);
2059 }
2060
sdhci_pci_runtime_resume(struct device * dev)2061 static int sdhci_pci_runtime_resume(struct device *dev)
2062 {
2063 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2064
2065 if (!chip)
2066 return 0;
2067
2068 if (chip->fixes && chip->fixes->runtime_resume)
2069 return chip->fixes->runtime_resume(chip);
2070
2071 return sdhci_pci_runtime_resume_host(chip);
2072 }
2073 #endif
2074
2075 static const struct dev_pm_ops sdhci_pci_pm_ops = {
2076 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2077 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2078 sdhci_pci_runtime_resume, NULL)
2079 };
2080
2081 /*****************************************************************************\
2082 * *
2083 * Device probing/removal *
2084 * *
2085 \*****************************************************************************/
2086
sdhci_pci_add_gpio_lookup_table(struct sdhci_pci_chip * chip)2087 static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table(
2088 struct sdhci_pci_chip *chip)
2089 {
2090 struct gpiod_lookup_table *cd_gpio_lookup_table;
2091 const struct dmi_system_id *dmi_id = NULL;
2092 size_t count;
2093
2094 if (chip->fixes && chip->fixes->cd_gpio_override)
2095 dmi_id = dmi_first_match(chip->fixes->cd_gpio_override);
2096
2097 if (!dmi_id)
2098 return NULL;
2099
2100 cd_gpio_lookup_table = dmi_id->driver_data;
2101 for (count = 0; cd_gpio_lookup_table->table[count].key; count++)
2102 ;
2103
2104 cd_gpio_lookup_table = kmemdup(dmi_id->driver_data,
2105 /* count + 1 terminating entry */
2106 struct_size(cd_gpio_lookup_table, table, count + 1),
2107 GFP_KERNEL);
2108 if (!cd_gpio_lookup_table)
2109 return ERR_PTR(-ENOMEM);
2110
2111 gpiod_add_lookup_table(cd_gpio_lookup_table);
2112 return cd_gpio_lookup_table;
2113 }
2114
sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table * lookup_table)2115 static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table)
2116 {
2117 if (lookup_table) {
2118 gpiod_remove_lookup_table(lookup_table);
2119 kfree(lookup_table);
2120 }
2121 }
2122
sdhci_pci_probe_slot(struct pci_dev * pdev,struct sdhci_pci_chip * chip,int first_bar,int slotno)2123 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2124 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2125 int slotno)
2126 {
2127 struct sdhci_pci_slot *slot;
2128 struct sdhci_host *host;
2129 int ret, bar = first_bar + slotno;
2130 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2131
2132 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2133 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2134 return ERR_PTR(-ENODEV);
2135 }
2136
2137 if (pci_resource_len(pdev, bar) < 0x100) {
2138 dev_err(&pdev->dev, "Invalid iomem size. You may "
2139 "experience problems.\n");
2140 }
2141
2142 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2143 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2144 return ERR_PTR(-ENODEV);
2145 }
2146
2147 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2148 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2149 return ERR_PTR(-ENODEV);
2150 }
2151
2152 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2153 if (IS_ERR(host)) {
2154 dev_err(&pdev->dev, "cannot allocate host\n");
2155 return ERR_CAST(host);
2156 }
2157
2158 slot = sdhci_priv(host);
2159
2160 slot->chip = chip;
2161 slot->host = host;
2162 slot->cd_idx = -1;
2163
2164 host->hw_name = "PCI";
2165 host->ops = chip->fixes && chip->fixes->ops ?
2166 chip->fixes->ops :
2167 &sdhci_pci_ops;
2168 host->quirks = chip->quirks;
2169 host->quirks2 = chip->quirks2;
2170
2171 host->irq = pdev->irq;
2172
2173 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2174 if (ret) {
2175 dev_err(&pdev->dev, "cannot request region\n");
2176 goto cleanup;
2177 }
2178
2179 host->ioaddr = pcim_iomap_table(pdev)[bar];
2180
2181 if (chip->fixes && chip->fixes->probe_slot) {
2182 ret = chip->fixes->probe_slot(slot);
2183 if (ret)
2184 goto cleanup;
2185 }
2186
2187 host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2188 host->mmc->slotno = slotno;
2189 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2190
2191 if (device_can_wakeup(&pdev->dev))
2192 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2193
2194 if (host->mmc->caps & MMC_CAP_CD_WAKE)
2195 device_init_wakeup(&pdev->dev, true);
2196
2197 if (slot->cd_idx >= 0) {
2198 struct gpiod_lookup_table *cd_gpio_lookup_table;
2199
2200 cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip);
2201 if (IS_ERR(cd_gpio_lookup_table)) {
2202 ret = PTR_ERR(cd_gpio_lookup_table);
2203 goto remove;
2204 }
2205
2206 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2207 slot->cd_override_level, 0);
2208
2209 sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table);
2210
2211 if (ret && ret != -EPROBE_DEFER)
2212 ret = mmc_gpiod_request_cd(host->mmc, NULL,
2213 slot->cd_idx,
2214 slot->cd_override_level,
2215 0);
2216 if (ret == -EPROBE_DEFER)
2217 goto remove;
2218
2219 if (ret) {
2220 dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2221 slot->cd_idx = -1;
2222 }
2223 }
2224
2225 if (chip->fixes && chip->fixes->add_host)
2226 ret = chip->fixes->add_host(slot);
2227 else
2228 ret = sdhci_add_host(host);
2229 if (ret)
2230 goto remove;
2231
2232 /*
2233 * Check if the chip needs a separate GPIO for card detect to wake up
2234 * from runtime suspend. If it is not there, don't allow runtime PM.
2235 */
2236 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
2237 chip->allow_runtime_pm = false;
2238
2239 return slot;
2240
2241 remove:
2242 if (chip->fixes && chip->fixes->remove_slot)
2243 chip->fixes->remove_slot(slot, 0);
2244
2245 cleanup:
2246 sdhci_free_host(host);
2247
2248 return ERR_PTR(ret);
2249 }
2250
sdhci_pci_remove_slot(struct sdhci_pci_slot * slot)2251 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2252 {
2253 int dead;
2254 u32 scratch;
2255
2256 dead = 0;
2257 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2258 if (scratch == (u32)-1)
2259 dead = 1;
2260
2261 sdhci_remove_host(slot->host, dead);
2262
2263 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2264 slot->chip->fixes->remove_slot(slot, dead);
2265
2266 sdhci_free_host(slot->host);
2267 }
2268
sdhci_pci_runtime_pm_allow(struct device * dev)2269 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2270 {
2271 pm_suspend_ignore_children(dev, 1);
2272 pm_runtime_set_autosuspend_delay(dev, 50);
2273 pm_runtime_use_autosuspend(dev);
2274 pm_runtime_allow(dev);
2275 /* Stay active until mmc core scans for a card */
2276 pm_runtime_put_noidle(dev);
2277 }
2278
sdhci_pci_runtime_pm_forbid(struct device * dev)2279 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2280 {
2281 pm_runtime_forbid(dev);
2282 pm_runtime_get_noresume(dev);
2283 }
2284
sdhci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2285 static int sdhci_pci_probe(struct pci_dev *pdev,
2286 const struct pci_device_id *ent)
2287 {
2288 struct sdhci_pci_chip *chip;
2289 struct sdhci_pci_slot *slot;
2290
2291 u8 slots, first_bar;
2292 int ret, i;
2293
2294 BUG_ON(pdev == NULL);
2295 BUG_ON(ent == NULL);
2296
2297 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2298 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2299
2300 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2301 if (ret)
2302 return pcibios_err_to_errno(ret);
2303
2304 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2305 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2306
2307 BUG_ON(slots > MAX_SLOTS);
2308
2309 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2310 if (ret)
2311 return pcibios_err_to_errno(ret);
2312
2313 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2314
2315 if (first_bar > 5) {
2316 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2317 return -ENODEV;
2318 }
2319
2320 ret = pcim_enable_device(pdev);
2321 if (ret)
2322 return ret;
2323
2324 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2325 if (!chip)
2326 return -ENOMEM;
2327
2328 chip->pdev = pdev;
2329 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2330 if (chip->fixes) {
2331 chip->quirks = chip->fixes->quirks;
2332 chip->quirks2 = chip->fixes->quirks2;
2333 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2334 }
2335 chip->num_slots = slots;
2336 chip->pm_retune = true;
2337 chip->rpm_retune = true;
2338
2339 pci_set_drvdata(pdev, chip);
2340
2341 if (chip->fixes && chip->fixes->probe) {
2342 ret = chip->fixes->probe(chip);
2343 if (ret)
2344 return ret;
2345 }
2346
2347 slots = chip->num_slots; /* Quirk may have changed this */
2348
2349 for (i = 0; i < slots; i++) {
2350 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2351 if (IS_ERR(slot)) {
2352 for (i--; i >= 0; i--)
2353 sdhci_pci_remove_slot(chip->slots[i]);
2354 return PTR_ERR(slot);
2355 }
2356
2357 chip->slots[i] = slot;
2358 }
2359
2360 if (chip->allow_runtime_pm)
2361 sdhci_pci_runtime_pm_allow(&pdev->dev);
2362
2363 return 0;
2364 }
2365
sdhci_pci_remove(struct pci_dev * pdev)2366 static void sdhci_pci_remove(struct pci_dev *pdev)
2367 {
2368 int i;
2369 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2370
2371 if (chip->allow_runtime_pm)
2372 sdhci_pci_runtime_pm_forbid(&pdev->dev);
2373
2374 for (i = 0; i < chip->num_slots; i++)
2375 sdhci_pci_remove_slot(chip->slots[i]);
2376 }
2377
2378 static struct pci_driver sdhci_driver = {
2379 .name = "sdhci-pci",
2380 .id_table = pci_ids,
2381 .probe = sdhci_pci_probe,
2382 .remove = sdhci_pci_remove,
2383 .driver = {
2384 .pm = &sdhci_pci_pm_ops,
2385 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2386 },
2387 };
2388
2389 module_pci_driver(sdhci_driver);
2390
2391 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2392 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2393 MODULE_LICENSE("GPL");
2394