1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* linux/drivers/mmc/host/sdhci-pci.c - SDHCI on PCI bus interface
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * Thanks to the following companies for their support:
7 *
8 * - JMicron (hardware and technical support)
9 */
10
11 #include <linux/bitfield.h>
12 #include <linux/string.h>
13 #include <linux/delay.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/device.h>
20 #include <linux/mmc/host.h>
21 #include <linux/mmc/mmc.h>
22 #include <linux/scatterlist.h>
23 #include <linux/io.h>
24 #include <linux/iopoll.h>
25 #include <linux/gpio.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/pm_qos.h>
28 #include <linux/debugfs.h>
29 #include <linux/mmc/slot-gpio.h>
30 #include <linux/mmc/sdhci-pci-data.h>
31 #include <linux/acpi.h>
32 #include <linux/dmi.h>
33
34 #ifdef CONFIG_X86
35 #include <asm/iosf_mbi.h>
36 #endif
37
38 #include "cqhci.h"
39
40 #include "sdhci.h"
41 #include "sdhci-pci.h"
42
43 static void sdhci_pci_hw_reset(struct sdhci_host *host);
44
45 #ifdef CONFIG_PM_SLEEP
sdhci_pci_init_wakeup(struct sdhci_pci_chip * chip)46 static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
47 {
48 mmc_pm_flag_t pm_flags = 0;
49 bool cap_cd_wake = false;
50 int i;
51
52 for (i = 0; i < chip->num_slots; i++) {
53 struct sdhci_pci_slot *slot = chip->slots[i];
54
55 if (slot) {
56 pm_flags |= slot->host->mmc->pm_flags;
57 if (slot->host->mmc->caps & MMC_CAP_CD_WAKE)
58 cap_cd_wake = true;
59 }
60 }
61
62 if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ))
63 return device_wakeup_enable(&chip->pdev->dev);
64 else if (!cap_cd_wake)
65 return device_wakeup_disable(&chip->pdev->dev);
66
67 return 0;
68 }
69
sdhci_pci_suspend_host(struct sdhci_pci_chip * chip)70 static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
71 {
72 int i, ret;
73
74 sdhci_pci_init_wakeup(chip);
75
76 for (i = 0; i < chip->num_slots; i++) {
77 struct sdhci_pci_slot *slot = chip->slots[i];
78 struct sdhci_host *host;
79
80 if (!slot)
81 continue;
82
83 host = slot->host;
84
85 if (chip->pm_retune && host->tuning_mode != SDHCI_TUNING_MODE_3)
86 mmc_retune_needed(host->mmc);
87
88 ret = sdhci_suspend_host(host);
89 if (ret)
90 goto err_pci_suspend;
91
92 if (device_may_wakeup(&chip->pdev->dev))
93 mmc_gpio_set_cd_wake(host->mmc, true);
94 }
95
96 return 0;
97
98 err_pci_suspend:
99 while (--i >= 0)
100 sdhci_resume_host(chip->slots[i]->host);
101 return ret;
102 }
103
sdhci_pci_resume_host(struct sdhci_pci_chip * chip)104 int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
105 {
106 struct sdhci_pci_slot *slot;
107 int i, ret;
108
109 for (i = 0; i < chip->num_slots; i++) {
110 slot = chip->slots[i];
111 if (!slot)
112 continue;
113
114 ret = sdhci_resume_host(slot->host);
115 if (ret)
116 return ret;
117
118 mmc_gpio_set_cd_wake(slot->host->mmc, false);
119 }
120
121 return 0;
122 }
123
sdhci_cqhci_suspend(struct sdhci_pci_chip * chip)124 static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
125 {
126 int ret;
127
128 ret = cqhci_suspend(chip->slots[0]->host->mmc);
129 if (ret)
130 return ret;
131
132 return sdhci_pci_suspend_host(chip);
133 }
134
sdhci_cqhci_resume(struct sdhci_pci_chip * chip)135 static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
136 {
137 int ret;
138
139 ret = sdhci_pci_resume_host(chip);
140 if (ret)
141 return ret;
142
143 return cqhci_resume(chip->slots[0]->host->mmc);
144 }
145 #endif
146
147 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip * chip)148 static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
149 {
150 struct sdhci_pci_slot *slot;
151 struct sdhci_host *host;
152 int i, ret;
153
154 for (i = 0; i < chip->num_slots; i++) {
155 slot = chip->slots[i];
156 if (!slot)
157 continue;
158
159 host = slot->host;
160
161 ret = sdhci_runtime_suspend_host(host);
162 if (ret)
163 goto err_pci_runtime_suspend;
164
165 if (chip->rpm_retune &&
166 host->tuning_mode != SDHCI_TUNING_MODE_3)
167 mmc_retune_needed(host->mmc);
168 }
169
170 return 0;
171
172 err_pci_runtime_suspend:
173 while (--i >= 0)
174 sdhci_runtime_resume_host(chip->slots[i]->host, 0);
175 return ret;
176 }
177
sdhci_pci_runtime_resume_host(struct sdhci_pci_chip * chip)178 static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
179 {
180 struct sdhci_pci_slot *slot;
181 int i, ret;
182
183 for (i = 0; i < chip->num_slots; i++) {
184 slot = chip->slots[i];
185 if (!slot)
186 continue;
187
188 ret = sdhci_runtime_resume_host(slot->host, 0);
189 if (ret)
190 return ret;
191 }
192
193 return 0;
194 }
195
sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip * chip)196 static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
197 {
198 int ret;
199
200 ret = cqhci_suspend(chip->slots[0]->host->mmc);
201 if (ret)
202 return ret;
203
204 return sdhci_pci_runtime_suspend_host(chip);
205 }
206
sdhci_cqhci_runtime_resume(struct sdhci_pci_chip * chip)207 static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
208 {
209 int ret;
210
211 ret = sdhci_pci_runtime_resume_host(chip);
212 if (ret)
213 return ret;
214
215 return cqhci_resume(chip->slots[0]->host->mmc);
216 }
217 #endif
218
sdhci_cqhci_irq(struct sdhci_host * host,u32 intmask)219 static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
220 {
221 int cmd_error = 0;
222 int data_error = 0;
223
224 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
225 return intmask;
226
227 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
228
229 return 0;
230 }
231
sdhci_pci_dumpregs(struct mmc_host * mmc)232 static void sdhci_pci_dumpregs(struct mmc_host *mmc)
233 {
234 sdhci_dumpregs(mmc_priv(mmc));
235 }
236
sdhci_cqhci_reset(struct sdhci_host * host,u8 mask)237 static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
238 {
239 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
240 host->mmc->cqe_private)
241 cqhci_deactivate(host->mmc);
242 sdhci_reset(host, mask);
243 }
244
245 /*****************************************************************************\
246 * *
247 * Hardware specific quirk handling *
248 * *
249 \*****************************************************************************/
250
ricoh_probe(struct sdhci_pci_chip * chip)251 static int ricoh_probe(struct sdhci_pci_chip *chip)
252 {
253 if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
254 chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
255 chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
256 return 0;
257 }
258
ricoh_mmc_probe_slot(struct sdhci_pci_slot * slot)259 static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)
260 {
261 slot->host->caps =
262 FIELD_PREP(SDHCI_TIMEOUT_CLK_MASK, 0x21) |
263 FIELD_PREP(SDHCI_CLOCK_BASE_MASK, 0x21) |
264 SDHCI_TIMEOUT_CLK_UNIT |
265 SDHCI_CAN_VDD_330 |
266 SDHCI_CAN_DO_HISPD |
267 SDHCI_CAN_DO_SDMA;
268 return 0;
269 }
270
271 #ifdef CONFIG_PM_SLEEP
ricoh_mmc_resume(struct sdhci_pci_chip * chip)272 static int ricoh_mmc_resume(struct sdhci_pci_chip *chip)
273 {
274 /* Apply a delay to allow controller to settle */
275 /* Otherwise it becomes confused if card state changed
276 during suspend */
277 msleep(500);
278 return sdhci_pci_resume_host(chip);
279 }
280 #endif
281
282 static const struct sdhci_pci_fixes sdhci_ricoh = {
283 .probe = ricoh_probe,
284 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
285 SDHCI_QUIRK_FORCE_DMA |
286 SDHCI_QUIRK_CLOCK_BEFORE_RESET,
287 };
288
289 static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
290 .probe_slot = ricoh_mmc_probe_slot,
291 #ifdef CONFIG_PM_SLEEP
292 .resume = ricoh_mmc_resume,
293 #endif
294 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
295 SDHCI_QUIRK_CLOCK_BEFORE_RESET |
296 SDHCI_QUIRK_NO_CARD_NO_RESET |
297 SDHCI_QUIRK_MISSING_CAPS
298 };
299
300 static const struct sdhci_pci_fixes sdhci_ene_712 = {
301 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
302 SDHCI_QUIRK_BROKEN_DMA,
303 };
304
305 static const struct sdhci_pci_fixes sdhci_ene_714 = {
306 .quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
307 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
308 SDHCI_QUIRK_BROKEN_DMA,
309 };
310
311 static const struct sdhci_pci_fixes sdhci_cafe = {
312 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
313 SDHCI_QUIRK_NO_BUSY_IRQ |
314 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
315 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
316 };
317
318 static const struct sdhci_pci_fixes sdhci_intel_qrk = {
319 .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
320 };
321
mrst_hc_probe_slot(struct sdhci_pci_slot * slot)322 static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
323 {
324 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
325 return 0;
326 }
327
328 /*
329 * ADMA operation is disabled for Moorestown platform due to
330 * hardware bugs.
331 */
mrst_hc_probe(struct sdhci_pci_chip * chip)332 static int mrst_hc_probe(struct sdhci_pci_chip *chip)
333 {
334 /*
335 * slots number is fixed here for MRST as SDIO3/5 are never used and
336 * have hardware bugs.
337 */
338 chip->num_slots = 1;
339 return 0;
340 }
341
pch_hc_probe_slot(struct sdhci_pci_slot * slot)342 static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
343 {
344 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
345 return 0;
346 }
347
348 #ifdef CONFIG_PM
349
sdhci_pci_sd_cd(int irq,void * dev_id)350 static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
351 {
352 struct sdhci_pci_slot *slot = dev_id;
353 struct sdhci_host *host = slot->host;
354
355 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
356 return IRQ_HANDLED;
357 }
358
sdhci_pci_add_own_cd(struct sdhci_pci_slot * slot)359 static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
360 {
361 int err, irq, gpio = slot->cd_gpio;
362
363 slot->cd_gpio = -EINVAL;
364 slot->cd_irq = -EINVAL;
365
366 if (!gpio_is_valid(gpio))
367 return;
368
369 err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
370 if (err < 0)
371 goto out;
372
373 err = gpio_direction_input(gpio);
374 if (err < 0)
375 goto out_free;
376
377 irq = gpio_to_irq(gpio);
378 if (irq < 0)
379 goto out_free;
380
381 err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
382 IRQF_TRIGGER_FALLING, "sd_cd", slot);
383 if (err)
384 goto out_free;
385
386 slot->cd_gpio = gpio;
387 slot->cd_irq = irq;
388
389 return;
390
391 out_free:
392 devm_gpio_free(&slot->chip->pdev->dev, gpio);
393 out:
394 dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
395 }
396
sdhci_pci_remove_own_cd(struct sdhci_pci_slot * slot)397 static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
398 {
399 if (slot->cd_irq >= 0)
400 free_irq(slot->cd_irq, slot);
401 }
402
403 #else
404
sdhci_pci_add_own_cd(struct sdhci_pci_slot * slot)405 static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
406 {
407 }
408
sdhci_pci_remove_own_cd(struct sdhci_pci_slot * slot)409 static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
410 {
411 }
412
413 #endif
414
mfd_emmc_probe_slot(struct sdhci_pci_slot * slot)415 static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
416 {
417 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
418 slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
419 return 0;
420 }
421
mfd_sdio_probe_slot(struct sdhci_pci_slot * slot)422 static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
423 {
424 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
425 return 0;
426 }
427
428 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
429 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
430 .probe_slot = mrst_hc_probe_slot,
431 };
432
433 static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
434 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
435 .probe = mrst_hc_probe,
436 };
437
438 static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
439 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
440 .allow_runtime_pm = true,
441 .own_cd_for_runtime_pm = true,
442 };
443
444 static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
445 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
446 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
447 .allow_runtime_pm = true,
448 .probe_slot = mfd_sdio_probe_slot,
449 };
450
451 static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
452 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
453 .allow_runtime_pm = true,
454 .probe_slot = mfd_emmc_probe_slot,
455 };
456
457 static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
458 .quirks = SDHCI_QUIRK_BROKEN_ADMA,
459 .probe_slot = pch_hc_probe_slot,
460 };
461
462 #ifdef CONFIG_X86
463
464 #define BYT_IOSF_SCCEP 0x63
465 #define BYT_IOSF_OCP_NETCTRL0 0x1078
466 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
467
byt_ocp_setting(struct pci_dev * pdev)468 static void byt_ocp_setting(struct pci_dev *pdev)
469 {
470 u32 val = 0;
471
472 if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
473 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
474 pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
475 pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
476 return;
477
478 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
479 &val)) {
480 dev_err(&pdev->dev, "%s read error\n", __func__);
481 return;
482 }
483
484 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
485 return;
486
487 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
488
489 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
490 val)) {
491 dev_err(&pdev->dev, "%s write error\n", __func__);
492 return;
493 }
494
495 dev_dbg(&pdev->dev, "%s completed\n", __func__);
496 }
497
498 #else
499
byt_ocp_setting(struct pci_dev * pdev)500 static inline void byt_ocp_setting(struct pci_dev *pdev)
501 {
502 }
503
504 #endif
505
506 enum {
507 INTEL_DSM_FNS = 0,
508 INTEL_DSM_V18_SWITCH = 3,
509 INTEL_DSM_V33_SWITCH = 4,
510 INTEL_DSM_DRV_STRENGTH = 9,
511 INTEL_DSM_D3_RETUNE = 10,
512 };
513
514 struct intel_host {
515 u32 dsm_fns;
516 int drv_strength;
517 bool d3_retune;
518 bool rpm_retune_ok;
519 bool needs_pwr_off;
520 u32 glk_rx_ctrl1;
521 u32 glk_tun_val;
522 u32 active_ltr;
523 u32 idle_ltr;
524 };
525
526 static const guid_t intel_dsm_guid =
527 GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
528 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
529
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)530 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
531 unsigned int fn, u32 *result)
532 {
533 union acpi_object *obj;
534 int err = 0;
535 size_t len;
536
537 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
538 if (!obj)
539 return -EOPNOTSUPP;
540
541 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
542 err = -EINVAL;
543 goto out;
544 }
545
546 len = min_t(size_t, obj->buffer.length, 4);
547
548 *result = 0;
549 memcpy(result, obj->buffer.pointer, len);
550 out:
551 ACPI_FREE(obj);
552
553 return err;
554 }
555
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)556 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
557 unsigned int fn, u32 *result)
558 {
559 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
560 return -EOPNOTSUPP;
561
562 return __intel_dsm(intel_host, dev, fn, result);
563 }
564
intel_dsm_init(struct intel_host * intel_host,struct device * dev,struct mmc_host * mmc)565 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
566 struct mmc_host *mmc)
567 {
568 int err;
569 u32 val;
570
571 intel_host->d3_retune = true;
572
573 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
574 if (err) {
575 pr_debug("%s: DSM not supported, error %d\n",
576 mmc_hostname(mmc), err);
577 return;
578 }
579
580 pr_debug("%s: DSM function mask %#x\n",
581 mmc_hostname(mmc), intel_host->dsm_fns);
582
583 err = intel_dsm(intel_host, dev, INTEL_DSM_DRV_STRENGTH, &val);
584 intel_host->drv_strength = err ? 0 : val;
585
586 err = intel_dsm(intel_host, dev, INTEL_DSM_D3_RETUNE, &val);
587 intel_host->d3_retune = err ? true : !!val;
588 }
589
sdhci_pci_int_hw_reset(struct sdhci_host * host)590 static void sdhci_pci_int_hw_reset(struct sdhci_host *host)
591 {
592 u8 reg;
593
594 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
595 reg |= 0x10;
596 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
597 /* For eMMC, minimum is 1us but give it 9us for good measure */
598 udelay(9);
599 reg &= ~0x10;
600 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
601 /* For eMMC, minimum is 200us but give it 300us for good measure */
602 usleep_range(300, 1000);
603 }
604
intel_select_drive_strength(struct mmc_card * card,unsigned int max_dtr,int host_drv,int card_drv,int * drv_type)605 static int intel_select_drive_strength(struct mmc_card *card,
606 unsigned int max_dtr, int host_drv,
607 int card_drv, int *drv_type)
608 {
609 struct sdhci_host *host = mmc_priv(card->host);
610 struct sdhci_pci_slot *slot = sdhci_priv(host);
611 struct intel_host *intel_host = sdhci_pci_priv(slot);
612
613 if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
614 return 0;
615
616 return intel_host->drv_strength;
617 }
618
bxt_get_cd(struct mmc_host * mmc)619 static int bxt_get_cd(struct mmc_host *mmc)
620 {
621 int gpio_cd = mmc_gpio_get_cd(mmc);
622 struct sdhci_host *host = mmc_priv(mmc);
623 unsigned long flags;
624 int ret = 0;
625
626 if (!gpio_cd)
627 return 0;
628
629 spin_lock_irqsave(&host->lock, flags);
630
631 if (host->flags & SDHCI_DEVICE_DEAD)
632 goto out;
633
634 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
635 out:
636 spin_unlock_irqrestore(&host->lock, flags);
637
638 return ret;
639 }
640
641 #define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
642 #define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
643
sdhci_intel_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)644 static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
645 unsigned short vdd)
646 {
647 struct sdhci_pci_slot *slot = sdhci_priv(host);
648 struct intel_host *intel_host = sdhci_pci_priv(slot);
649 int cntr;
650 u8 reg;
651
652 /*
653 * Bus power may control card power, but a full reset still may not
654 * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
655 * That might be needed to initialize correctly, if the card was left
656 * powered on previously.
657 */
658 if (intel_host->needs_pwr_off) {
659 intel_host->needs_pwr_off = false;
660 if (mode != MMC_POWER_OFF) {
661 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
662 usleep_range(10000, 12500);
663 }
664 }
665
666 sdhci_set_power(host, mode, vdd);
667
668 if (mode == MMC_POWER_OFF)
669 return;
670
671 /*
672 * Bus power might not enable after D3 -> D0 transition due to the
673 * present state not yet having propagated. Retry for up to 2ms.
674 */
675 for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
676 reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
677 if (reg & SDHCI_POWER_ON)
678 break;
679 udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
680 reg |= SDHCI_POWER_ON;
681 sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
682 }
683 }
684
sdhci_intel_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)685 static void sdhci_intel_set_uhs_signaling(struct sdhci_host *host,
686 unsigned int timing)
687 {
688 /* Set UHS timing to SDR25 for High Speed mode */
689 if (timing == MMC_TIMING_MMC_HS || timing == MMC_TIMING_SD_HS)
690 timing = MMC_TIMING_UHS_SDR25;
691 sdhci_set_uhs_signaling(host, timing);
692 }
693
694 #define INTEL_HS400_ES_REG 0x78
695 #define INTEL_HS400_ES_BIT BIT(0)
696
intel_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)697 static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
698 struct mmc_ios *ios)
699 {
700 struct sdhci_host *host = mmc_priv(mmc);
701 u32 val;
702
703 val = sdhci_readl(host, INTEL_HS400_ES_REG);
704 if (ios->enhanced_strobe)
705 val |= INTEL_HS400_ES_BIT;
706 else
707 val &= ~INTEL_HS400_ES_BIT;
708 sdhci_writel(host, val, INTEL_HS400_ES_REG);
709 }
710
intel_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)711 static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
712 struct mmc_ios *ios)
713 {
714 struct device *dev = mmc_dev(mmc);
715 struct sdhci_host *host = mmc_priv(mmc);
716 struct sdhci_pci_slot *slot = sdhci_priv(host);
717 struct intel_host *intel_host = sdhci_pci_priv(slot);
718 unsigned int fn;
719 u32 result = 0;
720 int err;
721
722 err = sdhci_start_signal_voltage_switch(mmc, ios);
723 if (err)
724 return err;
725
726 switch (ios->signal_voltage) {
727 case MMC_SIGNAL_VOLTAGE_330:
728 fn = INTEL_DSM_V33_SWITCH;
729 break;
730 case MMC_SIGNAL_VOLTAGE_180:
731 fn = INTEL_DSM_V18_SWITCH;
732 break;
733 default:
734 return 0;
735 }
736
737 err = intel_dsm(intel_host, dev, fn, &result);
738 pr_debug("%s: %s DSM fn %u error %d result %u\n",
739 mmc_hostname(mmc), __func__, fn, err, result);
740
741 return 0;
742 }
743
744 static const struct sdhci_ops sdhci_intel_byt_ops = {
745 .set_clock = sdhci_set_clock,
746 .set_power = sdhci_intel_set_power,
747 .enable_dma = sdhci_pci_enable_dma,
748 .set_bus_width = sdhci_set_bus_width,
749 .reset = sdhci_reset,
750 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
751 .hw_reset = sdhci_pci_hw_reset,
752 };
753
754 static const struct sdhci_ops sdhci_intel_glk_ops = {
755 .set_clock = sdhci_set_clock,
756 .set_power = sdhci_intel_set_power,
757 .enable_dma = sdhci_pci_enable_dma,
758 .set_bus_width = sdhci_set_bus_width,
759 .reset = sdhci_cqhci_reset,
760 .set_uhs_signaling = sdhci_intel_set_uhs_signaling,
761 .hw_reset = sdhci_pci_hw_reset,
762 .irq = sdhci_cqhci_irq,
763 };
764
byt_read_dsm(struct sdhci_pci_slot * slot)765 static void byt_read_dsm(struct sdhci_pci_slot *slot)
766 {
767 struct intel_host *intel_host = sdhci_pci_priv(slot);
768 struct device *dev = &slot->chip->pdev->dev;
769 struct mmc_host *mmc = slot->host->mmc;
770
771 intel_dsm_init(intel_host, dev, mmc);
772 slot->chip->rpm_retune = intel_host->d3_retune;
773 }
774
intel_execute_tuning(struct mmc_host * mmc,u32 opcode)775 static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
776 {
777 int err = sdhci_execute_tuning(mmc, opcode);
778 struct sdhci_host *host = mmc_priv(mmc);
779
780 if (err)
781 return err;
782
783 /*
784 * Tuning can leave the IP in an active state (Buffer Read Enable bit
785 * set) which prevents the entry to low power states (i.e. S0i3). Data
786 * reset will clear it.
787 */
788 sdhci_reset(host, SDHCI_RESET_DATA);
789
790 return 0;
791 }
792
793 #define INTEL_ACTIVELTR 0x804
794 #define INTEL_IDLELTR 0x808
795
796 #define INTEL_LTR_REQ BIT(15)
797 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
798 #define INTEL_LTR_SCALE_1US (2 << 10)
799 #define INTEL_LTR_SCALE_32US (3 << 10)
800 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
801
intel_cache_ltr(struct sdhci_pci_slot * slot)802 static void intel_cache_ltr(struct sdhci_pci_slot *slot)
803 {
804 struct intel_host *intel_host = sdhci_pci_priv(slot);
805 struct sdhci_host *host = slot->host;
806
807 intel_host->active_ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
808 intel_host->idle_ltr = readl(host->ioaddr + INTEL_IDLELTR);
809 }
810
intel_ltr_set(struct device * dev,s32 val)811 static void intel_ltr_set(struct device *dev, s32 val)
812 {
813 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
814 struct sdhci_pci_slot *slot = chip->slots[0];
815 struct intel_host *intel_host = sdhci_pci_priv(slot);
816 struct sdhci_host *host = slot->host;
817 u32 ltr;
818
819 pm_runtime_get_sync(dev);
820
821 /*
822 * Program latency tolerance (LTR) accordingly what has been asked
823 * by the PM QoS layer or disable it in case we were passed
824 * negative value or PM_QOS_LATENCY_ANY.
825 */
826 ltr = readl(host->ioaddr + INTEL_ACTIVELTR);
827
828 if (val == PM_QOS_LATENCY_ANY || val < 0) {
829 ltr &= ~INTEL_LTR_REQ;
830 } else {
831 ltr |= INTEL_LTR_REQ;
832 ltr &= ~INTEL_LTR_SCALE_MASK;
833 ltr &= ~INTEL_LTR_VALUE_MASK;
834
835 if (val > INTEL_LTR_VALUE_MASK) {
836 val >>= 5;
837 if (val > INTEL_LTR_VALUE_MASK)
838 val = INTEL_LTR_VALUE_MASK;
839 ltr |= INTEL_LTR_SCALE_32US | val;
840 } else {
841 ltr |= INTEL_LTR_SCALE_1US | val;
842 }
843 }
844
845 if (ltr == intel_host->active_ltr)
846 goto out;
847
848 writel(ltr, host->ioaddr + INTEL_ACTIVELTR);
849 writel(ltr, host->ioaddr + INTEL_IDLELTR);
850
851 /* Cache the values into lpss structure */
852 intel_cache_ltr(slot);
853 out:
854 pm_runtime_put_autosuspend(dev);
855 }
856
intel_use_ltr(struct sdhci_pci_chip * chip)857 static bool intel_use_ltr(struct sdhci_pci_chip *chip)
858 {
859 switch (chip->pdev->device) {
860 case PCI_DEVICE_ID_INTEL_BYT_EMMC:
861 case PCI_DEVICE_ID_INTEL_BYT_EMMC2:
862 case PCI_DEVICE_ID_INTEL_BYT_SDIO:
863 case PCI_DEVICE_ID_INTEL_BYT_SD:
864 case PCI_DEVICE_ID_INTEL_BSW_EMMC:
865 case PCI_DEVICE_ID_INTEL_BSW_SDIO:
866 case PCI_DEVICE_ID_INTEL_BSW_SD:
867 return false;
868 default:
869 return true;
870 }
871 }
872
intel_ltr_expose(struct sdhci_pci_chip * chip)873 static void intel_ltr_expose(struct sdhci_pci_chip *chip)
874 {
875 struct device *dev = &chip->pdev->dev;
876
877 if (!intel_use_ltr(chip))
878 return;
879
880 dev->power.set_latency_tolerance = intel_ltr_set;
881 dev_pm_qos_expose_latency_tolerance(dev);
882 }
883
intel_ltr_hide(struct sdhci_pci_chip * chip)884 static void intel_ltr_hide(struct sdhci_pci_chip *chip)
885 {
886 struct device *dev = &chip->pdev->dev;
887
888 if (!intel_use_ltr(chip))
889 return;
890
891 dev_pm_qos_hide_latency_tolerance(dev);
892 dev->power.set_latency_tolerance = NULL;
893 }
894
byt_probe_slot(struct sdhci_pci_slot * slot)895 static void byt_probe_slot(struct sdhci_pci_slot *slot)
896 {
897 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
898 struct device *dev = &slot->chip->pdev->dev;
899 struct mmc_host *mmc = slot->host->mmc;
900
901 byt_read_dsm(slot);
902
903 byt_ocp_setting(slot->chip->pdev);
904
905 ops->execute_tuning = intel_execute_tuning;
906 ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
907
908 device_property_read_u32(dev, "max-frequency", &mmc->f_max);
909
910 if (!mmc->slotno) {
911 slot->chip->slots[mmc->slotno] = slot;
912 intel_ltr_expose(slot->chip);
913 }
914 }
915
byt_add_debugfs(struct sdhci_pci_slot * slot)916 static void byt_add_debugfs(struct sdhci_pci_slot *slot)
917 {
918 struct intel_host *intel_host = sdhci_pci_priv(slot);
919 struct mmc_host *mmc = slot->host->mmc;
920 struct dentry *dir = mmc->debugfs_root;
921
922 if (!intel_use_ltr(slot->chip))
923 return;
924
925 debugfs_create_x32("active_ltr", 0444, dir, &intel_host->active_ltr);
926 debugfs_create_x32("idle_ltr", 0444, dir, &intel_host->idle_ltr);
927
928 intel_cache_ltr(slot);
929 }
930
byt_add_host(struct sdhci_pci_slot * slot)931 static int byt_add_host(struct sdhci_pci_slot *slot)
932 {
933 int ret = sdhci_add_host(slot->host);
934
935 if (!ret)
936 byt_add_debugfs(slot);
937 return ret;
938 }
939
byt_remove_slot(struct sdhci_pci_slot * slot,int dead)940 static void byt_remove_slot(struct sdhci_pci_slot *slot, int dead)
941 {
942 struct mmc_host *mmc = slot->host->mmc;
943
944 if (!mmc->slotno)
945 intel_ltr_hide(slot->chip);
946 }
947
byt_emmc_probe_slot(struct sdhci_pci_slot * slot)948 static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
949 {
950 byt_probe_slot(slot);
951 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
952 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
953 MMC_CAP_CMD_DURING_TFR |
954 MMC_CAP_WAIT_WHILE_BUSY;
955 slot->hw_reset = sdhci_pci_int_hw_reset;
956 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BSW_EMMC)
957 slot->host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
958 slot->host->mmc_host_ops.select_drive_strength =
959 intel_select_drive_strength;
960 return 0;
961 }
962
glk_broken_cqhci(struct sdhci_pci_slot * slot)963 static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
964 {
965 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
966 (dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
967 dmi_match(DMI_SYS_VENDOR, "IRBIS"));
968 }
969
jsl_broken_hs400es(struct sdhci_pci_slot * slot)970 static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
971 {
972 return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC &&
973 dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC.");
974 }
975
glk_emmc_probe_slot(struct sdhci_pci_slot * slot)976 static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
977 {
978 int ret = byt_emmc_probe_slot(slot);
979
980 if (!glk_broken_cqhci(slot))
981 slot->host->mmc->caps2 |= MMC_CAP2_CQE;
982
983 if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
984 if (!jsl_broken_hs400es(slot)) {
985 slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES;
986 slot->host->mmc_host_ops.hs400_enhanced_strobe =
987 intel_hs400_enhanced_strobe;
988 }
989 slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
990 }
991
992 return ret;
993 }
994
995 static const struct cqhci_host_ops glk_cqhci_ops = {
996 .enable = sdhci_cqe_enable,
997 .disable = sdhci_cqe_disable,
998 .dumpregs = sdhci_pci_dumpregs,
999 };
1000
glk_emmc_add_host(struct sdhci_pci_slot * slot)1001 static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
1002 {
1003 struct device *dev = &slot->chip->pdev->dev;
1004 struct sdhci_host *host = slot->host;
1005 struct cqhci_host *cq_host;
1006 bool dma64;
1007 int ret;
1008
1009 ret = sdhci_setup_host(host);
1010 if (ret)
1011 return ret;
1012
1013 cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
1014 if (!cq_host) {
1015 ret = -ENOMEM;
1016 goto cleanup;
1017 }
1018
1019 cq_host->mmio = host->ioaddr + 0x200;
1020 cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
1021 cq_host->ops = &glk_cqhci_ops;
1022
1023 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1024 if (dma64)
1025 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1026
1027 ret = cqhci_init(cq_host, host->mmc, dma64);
1028 if (ret)
1029 goto cleanup;
1030
1031 ret = __sdhci_add_host(host);
1032 if (ret)
1033 goto cleanup;
1034
1035 byt_add_debugfs(slot);
1036
1037 return 0;
1038
1039 cleanup:
1040 sdhci_cleanup_host(host);
1041 return ret;
1042 }
1043
1044 #ifdef CONFIG_PM
1045 #define GLK_RX_CTRL1 0x834
1046 #define GLK_TUN_VAL 0x840
1047 #define GLK_PATH_PLL GENMASK(13, 8)
1048 #define GLK_DLY GENMASK(6, 0)
1049 /* Workaround firmware failing to restore the tuning value */
glk_rpm_retune_wa(struct sdhci_pci_chip * chip,bool susp)1050 static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
1051 {
1052 struct sdhci_pci_slot *slot = chip->slots[0];
1053 struct intel_host *intel_host = sdhci_pci_priv(slot);
1054 struct sdhci_host *host = slot->host;
1055 u32 glk_rx_ctrl1;
1056 u32 glk_tun_val;
1057 u32 dly;
1058
1059 if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
1060 return;
1061
1062 glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
1063 glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
1064
1065 if (susp) {
1066 intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
1067 intel_host->glk_tun_val = glk_tun_val;
1068 return;
1069 }
1070
1071 if (!intel_host->glk_tun_val)
1072 return;
1073
1074 if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
1075 intel_host->rpm_retune_ok = true;
1076 return;
1077 }
1078
1079 dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
1080 (intel_host->glk_tun_val << 1));
1081 if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
1082 return;
1083
1084 glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
1085 sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
1086
1087 intel_host->rpm_retune_ok = true;
1088 chip->rpm_retune = true;
1089 mmc_retune_needed(host->mmc);
1090 pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
1091 }
1092
glk_rpm_retune_chk(struct sdhci_pci_chip * chip,bool susp)1093 static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
1094 {
1095 if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
1096 !chip->rpm_retune)
1097 glk_rpm_retune_wa(chip, susp);
1098 }
1099
glk_runtime_suspend(struct sdhci_pci_chip * chip)1100 static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
1101 {
1102 glk_rpm_retune_chk(chip, true);
1103
1104 return sdhci_cqhci_runtime_suspend(chip);
1105 }
1106
glk_runtime_resume(struct sdhci_pci_chip * chip)1107 static int glk_runtime_resume(struct sdhci_pci_chip *chip)
1108 {
1109 glk_rpm_retune_chk(chip, false);
1110
1111 return sdhci_cqhci_runtime_resume(chip);
1112 }
1113 #endif
1114
1115 #ifdef CONFIG_ACPI
ni_set_max_freq(struct sdhci_pci_slot * slot)1116 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
1117 {
1118 acpi_status status;
1119 unsigned long long max_freq;
1120
1121 status = acpi_evaluate_integer(ACPI_HANDLE(&slot->chip->pdev->dev),
1122 "MXFQ", NULL, &max_freq);
1123 if (ACPI_FAILURE(status)) {
1124 dev_err(&slot->chip->pdev->dev,
1125 "MXFQ not found in acpi table\n");
1126 return -EINVAL;
1127 }
1128
1129 slot->host->mmc->f_max = max_freq * 1000000;
1130
1131 return 0;
1132 }
1133 #else
ni_set_max_freq(struct sdhci_pci_slot * slot)1134 static inline int ni_set_max_freq(struct sdhci_pci_slot *slot)
1135 {
1136 return 0;
1137 }
1138 #endif
1139
ni_byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1140 static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1141 {
1142 int err;
1143
1144 byt_probe_slot(slot);
1145
1146 err = ni_set_max_freq(slot);
1147 if (err)
1148 return err;
1149
1150 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1151 MMC_CAP_WAIT_WHILE_BUSY;
1152 return 0;
1153 }
1154
byt_sdio_probe_slot(struct sdhci_pci_slot * slot)1155 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
1156 {
1157 byt_probe_slot(slot);
1158 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
1159 MMC_CAP_WAIT_WHILE_BUSY;
1160 return 0;
1161 }
1162
byt_needs_pwr_off(struct sdhci_pci_slot * slot)1163 static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
1164 {
1165 struct intel_host *intel_host = sdhci_pci_priv(slot);
1166 u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
1167
1168 intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
1169 }
1170
byt_sd_probe_slot(struct sdhci_pci_slot * slot)1171 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
1172 {
1173 byt_probe_slot(slot);
1174 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
1175 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
1176 slot->cd_idx = 0;
1177 slot->cd_override_level = true;
1178 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
1179 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
1180 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
1181 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_SD)
1182 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
1183
1184 if (slot->chip->pdev->subsystem_vendor == PCI_VENDOR_ID_NI &&
1185 slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
1186 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
1187
1188 byt_needs_pwr_off(slot);
1189
1190 return 0;
1191 }
1192
1193 #ifdef CONFIG_PM_SLEEP
1194
byt_resume(struct sdhci_pci_chip * chip)1195 static int byt_resume(struct sdhci_pci_chip *chip)
1196 {
1197 byt_ocp_setting(chip->pdev);
1198
1199 return sdhci_pci_resume_host(chip);
1200 }
1201
1202 #endif
1203
1204 #ifdef CONFIG_PM
1205
byt_runtime_resume(struct sdhci_pci_chip * chip)1206 static int byt_runtime_resume(struct sdhci_pci_chip *chip)
1207 {
1208 byt_ocp_setting(chip->pdev);
1209
1210 return sdhci_pci_runtime_resume_host(chip);
1211 }
1212
1213 #endif
1214
1215 static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
1216 #ifdef CONFIG_PM_SLEEP
1217 .resume = byt_resume,
1218 #endif
1219 #ifdef CONFIG_PM
1220 .runtime_resume = byt_runtime_resume,
1221 #endif
1222 .allow_runtime_pm = true,
1223 .probe_slot = byt_emmc_probe_slot,
1224 .add_host = byt_add_host,
1225 .remove_slot = byt_remove_slot,
1226 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1227 SDHCI_QUIRK_NO_LED,
1228 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1229 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1230 SDHCI_QUIRK2_STOP_WITH_TC,
1231 .ops = &sdhci_intel_byt_ops,
1232 .priv_size = sizeof(struct intel_host),
1233 };
1234
1235 static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
1236 .allow_runtime_pm = true,
1237 .probe_slot = glk_emmc_probe_slot,
1238 .add_host = glk_emmc_add_host,
1239 .remove_slot = byt_remove_slot,
1240 #ifdef CONFIG_PM_SLEEP
1241 .suspend = sdhci_cqhci_suspend,
1242 .resume = sdhci_cqhci_resume,
1243 #endif
1244 #ifdef CONFIG_PM
1245 .runtime_suspend = glk_runtime_suspend,
1246 .runtime_resume = glk_runtime_resume,
1247 #endif
1248 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1249 SDHCI_QUIRK_NO_LED,
1250 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1251 SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
1252 SDHCI_QUIRK2_STOP_WITH_TC,
1253 .ops = &sdhci_intel_glk_ops,
1254 .priv_size = sizeof(struct intel_host),
1255 };
1256
1257 static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
1258 #ifdef CONFIG_PM_SLEEP
1259 .resume = byt_resume,
1260 #endif
1261 #ifdef CONFIG_PM
1262 .runtime_resume = byt_runtime_resume,
1263 #endif
1264 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1265 SDHCI_QUIRK_NO_LED,
1266 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1267 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1268 .allow_runtime_pm = true,
1269 .probe_slot = ni_byt_sdio_probe_slot,
1270 .add_host = byt_add_host,
1271 .remove_slot = byt_remove_slot,
1272 .ops = &sdhci_intel_byt_ops,
1273 .priv_size = sizeof(struct intel_host),
1274 };
1275
1276 static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
1277 #ifdef CONFIG_PM_SLEEP
1278 .resume = byt_resume,
1279 #endif
1280 #ifdef CONFIG_PM
1281 .runtime_resume = byt_runtime_resume,
1282 #endif
1283 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1284 SDHCI_QUIRK_NO_LED,
1285 .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
1286 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1287 .allow_runtime_pm = true,
1288 .probe_slot = byt_sdio_probe_slot,
1289 .add_host = byt_add_host,
1290 .remove_slot = byt_remove_slot,
1291 .ops = &sdhci_intel_byt_ops,
1292 .priv_size = sizeof(struct intel_host),
1293 };
1294
1295 static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
1296 #ifdef CONFIG_PM_SLEEP
1297 .resume = byt_resume,
1298 #endif
1299 #ifdef CONFIG_PM
1300 .runtime_resume = byt_runtime_resume,
1301 #endif
1302 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
1303 SDHCI_QUIRK_NO_LED,
1304 .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
1305 SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1306 SDHCI_QUIRK2_STOP_WITH_TC,
1307 .allow_runtime_pm = true,
1308 .own_cd_for_runtime_pm = true,
1309 .probe_slot = byt_sd_probe_slot,
1310 .add_host = byt_add_host,
1311 .remove_slot = byt_remove_slot,
1312 .ops = &sdhci_intel_byt_ops,
1313 .priv_size = sizeof(struct intel_host),
1314 };
1315
1316 /* Define Host controllers for Intel Merrifield platform */
1317 #define INTEL_MRFLD_EMMC_0 0
1318 #define INTEL_MRFLD_EMMC_1 1
1319 #define INTEL_MRFLD_SD 2
1320 #define INTEL_MRFLD_SDIO 3
1321
1322 #ifdef CONFIG_ACPI
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1323 static void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot)
1324 {
1325 struct acpi_device *device, *child;
1326
1327 device = ACPI_COMPANION(&slot->chip->pdev->dev);
1328 if (!device)
1329 return;
1330
1331 acpi_device_fix_up_power(device);
1332 list_for_each_entry(child, &device->children, node)
1333 if (child->status.present && child->status.enabled)
1334 acpi_device_fix_up_power(child);
1335 }
1336 #else
intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot * slot)1337 static inline void intel_mrfld_mmc_fix_up_power_slot(struct sdhci_pci_slot *slot) {}
1338 #endif
1339
intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot * slot)1340 static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
1341 {
1342 unsigned int func = PCI_FUNC(slot->chip->pdev->devfn);
1343
1344 switch (func) {
1345 case INTEL_MRFLD_EMMC_0:
1346 case INTEL_MRFLD_EMMC_1:
1347 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1348 MMC_CAP_8_BIT_DATA |
1349 MMC_CAP_1_8V_DDR;
1350 break;
1351 case INTEL_MRFLD_SD:
1352 slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1353 break;
1354 case INTEL_MRFLD_SDIO:
1355 /* Advertise 2.0v for compatibility with the SDIO card's OCR */
1356 slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
1357 slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
1358 MMC_CAP_POWER_OFF_CARD;
1359 break;
1360 default:
1361 return -ENODEV;
1362 }
1363
1364 intel_mrfld_mmc_fix_up_power_slot(slot);
1365 return 0;
1366 }
1367
1368 static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
1369 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1370 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
1371 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1372 .allow_runtime_pm = true,
1373 .probe_slot = intel_mrfld_mmc_probe_slot,
1374 };
1375
jmicron_pmos(struct sdhci_pci_chip * chip,int on)1376 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
1377 {
1378 u8 scratch;
1379 int ret;
1380
1381 ret = pci_read_config_byte(chip->pdev, 0xAE, &scratch);
1382 if (ret)
1383 return ret;
1384
1385 /*
1386 * Turn PMOS on [bit 0], set over current detection to 2.4 V
1387 * [bit 1:2] and enable over current debouncing [bit 6].
1388 */
1389 if (on)
1390 scratch |= 0x47;
1391 else
1392 scratch &= ~0x47;
1393
1394 return pci_write_config_byte(chip->pdev, 0xAE, scratch);
1395 }
1396
jmicron_probe(struct sdhci_pci_chip * chip)1397 static int jmicron_probe(struct sdhci_pci_chip *chip)
1398 {
1399 int ret;
1400 u16 mmcdev = 0;
1401
1402 if (chip->pdev->revision == 0) {
1403 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
1404 SDHCI_QUIRK_32BIT_DMA_SIZE |
1405 SDHCI_QUIRK_32BIT_ADMA_SIZE |
1406 SDHCI_QUIRK_RESET_AFTER_REQUEST |
1407 SDHCI_QUIRK_BROKEN_SMALL_PIO;
1408 }
1409
1410 /*
1411 * JMicron chips can have two interfaces to the same hardware
1412 * in order to work around limitations in Microsoft's driver.
1413 * We need to make sure we only bind to one of them.
1414 *
1415 * This code assumes two things:
1416 *
1417 * 1. The PCI code adds subfunctions in order.
1418 *
1419 * 2. The MMC interface has a lower subfunction number
1420 * than the SD interface.
1421 */
1422 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
1423 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
1424 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
1425 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
1426
1427 if (mmcdev) {
1428 struct pci_dev *sd_dev;
1429
1430 sd_dev = NULL;
1431 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
1432 mmcdev, sd_dev)) != NULL) {
1433 if ((PCI_SLOT(chip->pdev->devfn) ==
1434 PCI_SLOT(sd_dev->devfn)) &&
1435 (chip->pdev->bus == sd_dev->bus))
1436 break;
1437 }
1438
1439 if (sd_dev) {
1440 pci_dev_put(sd_dev);
1441 dev_info(&chip->pdev->dev, "Refusing to bind to "
1442 "secondary interface.\n");
1443 return -ENODEV;
1444 }
1445 }
1446
1447 /*
1448 * JMicron chips need a bit of a nudge to enable the power
1449 * output pins.
1450 */
1451 ret = jmicron_pmos(chip, 1);
1452 if (ret) {
1453 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1454 return ret;
1455 }
1456
1457 /* quirk for unsable RO-detection on JM388 chips */
1458 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD ||
1459 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1460 chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT;
1461
1462 return 0;
1463 }
1464
jmicron_enable_mmc(struct sdhci_host * host,int on)1465 static void jmicron_enable_mmc(struct sdhci_host *host, int on)
1466 {
1467 u8 scratch;
1468
1469 scratch = readb(host->ioaddr + 0xC0);
1470
1471 if (on)
1472 scratch |= 0x01;
1473 else
1474 scratch &= ~0x01;
1475
1476 writeb(scratch, host->ioaddr + 0xC0);
1477 }
1478
jmicron_probe_slot(struct sdhci_pci_slot * slot)1479 static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
1480 {
1481 if (slot->chip->pdev->revision == 0) {
1482 u16 version;
1483
1484 version = readl(slot->host->ioaddr + SDHCI_HOST_VERSION);
1485 version = (version & SDHCI_VENDOR_VER_MASK) >>
1486 SDHCI_VENDOR_VER_SHIFT;
1487
1488 /*
1489 * Older versions of the chip have lots of nasty glitches
1490 * in the ADMA engine. It's best just to avoid it
1491 * completely.
1492 */
1493 if (version < 0xAC)
1494 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1495 }
1496
1497 /* JM388 MMC doesn't support 1.8V while SD supports it */
1498 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1499 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
1500 MMC_VDD_29_30 | MMC_VDD_30_31 |
1501 MMC_VDD_165_195; /* allow 1.8V */
1502 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
1503 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
1504 }
1505
1506 /*
1507 * The secondary interface requires a bit set to get the
1508 * interrupts.
1509 */
1510 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1511 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1512 jmicron_enable_mmc(slot->host, 1);
1513
1514 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
1515
1516 return 0;
1517 }
1518
jmicron_remove_slot(struct sdhci_pci_slot * slot,int dead)1519 static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
1520 {
1521 if (dead)
1522 return;
1523
1524 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1525 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
1526 jmicron_enable_mmc(slot->host, 0);
1527 }
1528
1529 #ifdef CONFIG_PM_SLEEP
jmicron_suspend(struct sdhci_pci_chip * chip)1530 static int jmicron_suspend(struct sdhci_pci_chip *chip)
1531 {
1532 int i, ret;
1533
1534 ret = sdhci_pci_suspend_host(chip);
1535 if (ret)
1536 return ret;
1537
1538 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1539 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1540 for (i = 0; i < chip->num_slots; i++)
1541 jmicron_enable_mmc(chip->slots[i]->host, 0);
1542 }
1543
1544 return 0;
1545 }
1546
jmicron_resume(struct sdhci_pci_chip * chip)1547 static int jmicron_resume(struct sdhci_pci_chip *chip)
1548 {
1549 int ret, i;
1550
1551 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
1552 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
1553 for (i = 0; i < chip->num_slots; i++)
1554 jmicron_enable_mmc(chip->slots[i]->host, 1);
1555 }
1556
1557 ret = jmicron_pmos(chip, 1);
1558 if (ret) {
1559 dev_err(&chip->pdev->dev, "Failure enabling card power\n");
1560 return ret;
1561 }
1562
1563 return sdhci_pci_resume_host(chip);
1564 }
1565 #endif
1566
1567 static const struct sdhci_pci_fixes sdhci_jmicron = {
1568 .probe = jmicron_probe,
1569
1570 .probe_slot = jmicron_probe_slot,
1571 .remove_slot = jmicron_remove_slot,
1572
1573 #ifdef CONFIG_PM_SLEEP
1574 .suspend = jmicron_suspend,
1575 .resume = jmicron_resume,
1576 #endif
1577 };
1578
1579 /* SysKonnect CardBus2SDIO extra registers */
1580 #define SYSKT_CTRL 0x200
1581 #define SYSKT_RDFIFO_STAT 0x204
1582 #define SYSKT_WRFIFO_STAT 0x208
1583 #define SYSKT_POWER_DATA 0x20c
1584 #define SYSKT_POWER_330 0xef
1585 #define SYSKT_POWER_300 0xf8
1586 #define SYSKT_POWER_184 0xcc
1587 #define SYSKT_POWER_CMD 0x20d
1588 #define SYSKT_POWER_START (1 << 7)
1589 #define SYSKT_POWER_STATUS 0x20e
1590 #define SYSKT_POWER_STATUS_OK (1 << 0)
1591 #define SYSKT_BOARD_REV 0x210
1592 #define SYSKT_CHIP_REV 0x211
1593 #define SYSKT_CONF_DATA 0x212
1594 #define SYSKT_CONF_DATA_1V8 (1 << 2)
1595 #define SYSKT_CONF_DATA_2V5 (1 << 1)
1596 #define SYSKT_CONF_DATA_3V3 (1 << 0)
1597
syskt_probe(struct sdhci_pci_chip * chip)1598 static int syskt_probe(struct sdhci_pci_chip *chip)
1599 {
1600 if ((chip->pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1601 chip->pdev->class &= ~0x0000FF;
1602 chip->pdev->class |= PCI_SDHCI_IFDMA;
1603 }
1604 return 0;
1605 }
1606
syskt_probe_slot(struct sdhci_pci_slot * slot)1607 static int syskt_probe_slot(struct sdhci_pci_slot *slot)
1608 {
1609 int tm, ps;
1610
1611 u8 board_rev = readb(slot->host->ioaddr + SYSKT_BOARD_REV);
1612 u8 chip_rev = readb(slot->host->ioaddr + SYSKT_CHIP_REV);
1613 dev_info(&slot->chip->pdev->dev, "SysKonnect CardBus2SDIO, "
1614 "board rev %d.%d, chip rev %d.%d\n",
1615 board_rev >> 4, board_rev & 0xf,
1616 chip_rev >> 4, chip_rev & 0xf);
1617 if (chip_rev >= 0x20)
1618 slot->host->quirks |= SDHCI_QUIRK_FORCE_DMA;
1619
1620 writeb(SYSKT_POWER_330, slot->host->ioaddr + SYSKT_POWER_DATA);
1621 writeb(SYSKT_POWER_START, slot->host->ioaddr + SYSKT_POWER_CMD);
1622 udelay(50);
1623 tm = 10; /* Wait max 1 ms */
1624 do {
1625 ps = readw(slot->host->ioaddr + SYSKT_POWER_STATUS);
1626 if (ps & SYSKT_POWER_STATUS_OK)
1627 break;
1628 udelay(100);
1629 } while (--tm);
1630 if (!tm) {
1631 dev_err(&slot->chip->pdev->dev,
1632 "power regulator never stabilized");
1633 writeb(0, slot->host->ioaddr + SYSKT_POWER_CMD);
1634 return -ENODEV;
1635 }
1636
1637 return 0;
1638 }
1639
1640 static const struct sdhci_pci_fixes sdhci_syskt = {
1641 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER,
1642 .probe = syskt_probe,
1643 .probe_slot = syskt_probe_slot,
1644 };
1645
via_probe(struct sdhci_pci_chip * chip)1646 static int via_probe(struct sdhci_pci_chip *chip)
1647 {
1648 if (chip->pdev->revision == 0x10)
1649 chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
1650
1651 return 0;
1652 }
1653
1654 static const struct sdhci_pci_fixes sdhci_via = {
1655 .probe = via_probe,
1656 };
1657
rtsx_probe_slot(struct sdhci_pci_slot * slot)1658 static int rtsx_probe_slot(struct sdhci_pci_slot *slot)
1659 {
1660 slot->host->mmc->caps2 |= MMC_CAP2_HS200;
1661 return 0;
1662 }
1663
1664 static const struct sdhci_pci_fixes sdhci_rtsx = {
1665 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1666 SDHCI_QUIRK2_BROKEN_64_BIT_DMA |
1667 SDHCI_QUIRK2_BROKEN_DDR50,
1668 .probe_slot = rtsx_probe_slot,
1669 };
1670
1671 /*AMD chipset generation*/
1672 enum amd_chipset_gen {
1673 AMD_CHIPSET_BEFORE_ML,
1674 AMD_CHIPSET_CZ,
1675 AMD_CHIPSET_NL,
1676 AMD_CHIPSET_UNKNOWN,
1677 };
1678
1679 /* AMD registers */
1680 #define AMD_SD_AUTO_PATTERN 0xB8
1681 #define AMD_MSLEEP_DURATION 4
1682 #define AMD_SD_MISC_CONTROL 0xD0
1683 #define AMD_MAX_TUNE_VALUE 0x0B
1684 #define AMD_AUTO_TUNE_SEL 0x10800
1685 #define AMD_FIFO_PTR 0x30
1686 #define AMD_BIT_MASK 0x1F
1687
amd_tuning_reset(struct sdhci_host * host)1688 static void amd_tuning_reset(struct sdhci_host *host)
1689 {
1690 unsigned int val;
1691
1692 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1693 val |= SDHCI_CTRL_PRESET_VAL_ENABLE | SDHCI_CTRL_EXEC_TUNING;
1694 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1695
1696 val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1697 val &= ~SDHCI_CTRL_EXEC_TUNING;
1698 sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
1699 }
1700
amd_config_tuning_phase(struct pci_dev * pdev,u8 phase)1701 static void amd_config_tuning_phase(struct pci_dev *pdev, u8 phase)
1702 {
1703 unsigned int val;
1704
1705 pci_read_config_dword(pdev, AMD_SD_AUTO_PATTERN, &val);
1706 val &= ~AMD_BIT_MASK;
1707 val |= (AMD_AUTO_TUNE_SEL | (phase << 1));
1708 pci_write_config_dword(pdev, AMD_SD_AUTO_PATTERN, val);
1709 }
1710
amd_enable_manual_tuning(struct pci_dev * pdev)1711 static void amd_enable_manual_tuning(struct pci_dev *pdev)
1712 {
1713 unsigned int val;
1714
1715 pci_read_config_dword(pdev, AMD_SD_MISC_CONTROL, &val);
1716 val |= AMD_FIFO_PTR;
1717 pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
1718 }
1719
amd_execute_tuning_hs200(struct sdhci_host * host,u32 opcode)1720 static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
1721 {
1722 struct sdhci_pci_slot *slot = sdhci_priv(host);
1723 struct pci_dev *pdev = slot->chip->pdev;
1724 u8 valid_win = 0;
1725 u8 valid_win_max = 0;
1726 u8 valid_win_end = 0;
1727 u8 ctrl, tune_around;
1728
1729 amd_tuning_reset(host);
1730
1731 for (tune_around = 0; tune_around < 12; tune_around++) {
1732 amd_config_tuning_phase(pdev, tune_around);
1733
1734 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1735 valid_win = 0;
1736 msleep(AMD_MSLEEP_DURATION);
1737 ctrl = SDHCI_RESET_CMD | SDHCI_RESET_DATA;
1738 sdhci_writeb(host, ctrl, SDHCI_SOFTWARE_RESET);
1739 } else if (++valid_win > valid_win_max) {
1740 valid_win_max = valid_win;
1741 valid_win_end = tune_around;
1742 }
1743 }
1744
1745 if (!valid_win_max) {
1746 dev_err(&pdev->dev, "no tuning point found\n");
1747 return -EIO;
1748 }
1749
1750 amd_config_tuning_phase(pdev, valid_win_end - valid_win_max / 2);
1751
1752 amd_enable_manual_tuning(pdev);
1753
1754 host->mmc->retune_period = 0;
1755
1756 return 0;
1757 }
1758
amd_execute_tuning(struct mmc_host * mmc,u32 opcode)1759 static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
1760 {
1761 struct sdhci_host *host = mmc_priv(mmc);
1762
1763 /* AMD requires custom HS200 tuning */
1764 if (host->timing == MMC_TIMING_MMC_HS200)
1765 return amd_execute_tuning_hs200(host, opcode);
1766
1767 /* Otherwise perform standard SDHCI tuning */
1768 return sdhci_execute_tuning(mmc, opcode);
1769 }
1770
amd_probe_slot(struct sdhci_pci_slot * slot)1771 static int amd_probe_slot(struct sdhci_pci_slot *slot)
1772 {
1773 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
1774
1775 ops->execute_tuning = amd_execute_tuning;
1776
1777 return 0;
1778 }
1779
amd_probe(struct sdhci_pci_chip * chip)1780 static int amd_probe(struct sdhci_pci_chip *chip)
1781 {
1782 struct pci_dev *smbus_dev;
1783 enum amd_chipset_gen gen;
1784
1785 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1786 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
1787 if (smbus_dev) {
1788 gen = AMD_CHIPSET_BEFORE_ML;
1789 } else {
1790 smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
1791 PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
1792 if (smbus_dev) {
1793 if (smbus_dev->revision < 0x51)
1794 gen = AMD_CHIPSET_CZ;
1795 else
1796 gen = AMD_CHIPSET_NL;
1797 } else {
1798 gen = AMD_CHIPSET_UNKNOWN;
1799 }
1800 }
1801
1802 pci_dev_put(smbus_dev);
1803
1804 if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
1805 chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
1806
1807 return 0;
1808 }
1809
sdhci_read_present_state(struct sdhci_host * host)1810 static u32 sdhci_read_present_state(struct sdhci_host *host)
1811 {
1812 return sdhci_readl(host, SDHCI_PRESENT_STATE);
1813 }
1814
amd_sdhci_reset(struct sdhci_host * host,u8 mask)1815 static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
1816 {
1817 struct sdhci_pci_slot *slot = sdhci_priv(host);
1818 struct pci_dev *pdev = slot->chip->pdev;
1819 u32 present_state;
1820
1821 /*
1822 * SDHC 0x7906 requires a hard reset to clear all internal state.
1823 * Otherwise it can get into a bad state where the DATA lines are always
1824 * read as zeros.
1825 */
1826 if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
1827 pci_clear_master(pdev);
1828
1829 pci_save_state(pdev);
1830
1831 pci_set_power_state(pdev, PCI_D3cold);
1832 pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
1833 pdev->current_state);
1834 pci_set_power_state(pdev, PCI_D0);
1835
1836 pci_restore_state(pdev);
1837
1838 /*
1839 * SDHCI_RESET_ALL says the card detect logic should not be
1840 * reset, but since we need to reset the entire controller
1841 * we should wait until the card detect logic has stabilized.
1842 *
1843 * This normally takes about 40ms.
1844 */
1845 readx_poll_timeout(
1846 sdhci_read_present_state,
1847 host,
1848 present_state,
1849 present_state & SDHCI_CD_STABLE,
1850 10000,
1851 100000
1852 );
1853 }
1854
1855 return sdhci_reset(host, mask);
1856 }
1857
1858 static const struct sdhci_ops amd_sdhci_pci_ops = {
1859 .set_clock = sdhci_set_clock,
1860 .enable_dma = sdhci_pci_enable_dma,
1861 .set_bus_width = sdhci_set_bus_width,
1862 .reset = amd_sdhci_reset,
1863 .set_uhs_signaling = sdhci_set_uhs_signaling,
1864 };
1865
1866 static const struct sdhci_pci_fixes sdhci_amd = {
1867 .probe = amd_probe,
1868 .ops = &amd_sdhci_pci_ops,
1869 .probe_slot = amd_probe_slot,
1870 };
1871
1872 static const struct pci_device_id pci_ids[] = {
1873 SDHCI_PCI_DEVICE(RICOH, R5C822, ricoh),
1874 SDHCI_PCI_DEVICE(RICOH, R5C843, ricoh_mmc),
1875 SDHCI_PCI_DEVICE(RICOH, R5CE822, ricoh_mmc),
1876 SDHCI_PCI_DEVICE(RICOH, R5CE823, ricoh_mmc),
1877 SDHCI_PCI_DEVICE(ENE, CB712_SD, ene_712),
1878 SDHCI_PCI_DEVICE(ENE, CB712_SD_2, ene_712),
1879 SDHCI_PCI_DEVICE(ENE, CB714_SD, ene_714),
1880 SDHCI_PCI_DEVICE(ENE, CB714_SD_2, ene_714),
1881 SDHCI_PCI_DEVICE(MARVELL, 88ALP01_SD, cafe),
1882 SDHCI_PCI_DEVICE(JMICRON, JMB38X_SD, jmicron),
1883 SDHCI_PCI_DEVICE(JMICRON, JMB38X_MMC, jmicron),
1884 SDHCI_PCI_DEVICE(JMICRON, JMB388_SD, jmicron),
1885 SDHCI_PCI_DEVICE(JMICRON, JMB388_ESD, jmicron),
1886 SDHCI_PCI_DEVICE(SYSKONNECT, 8000, syskt),
1887 SDHCI_PCI_DEVICE(VIA, 95D0, via),
1888 SDHCI_PCI_DEVICE(REALTEK, 5250, rtsx),
1889 SDHCI_PCI_DEVICE(INTEL, QRK_SD, intel_qrk),
1890 SDHCI_PCI_DEVICE(INTEL, MRST_SD0, intel_mrst_hc0),
1891 SDHCI_PCI_DEVICE(INTEL, MRST_SD1, intel_mrst_hc1_hc2),
1892 SDHCI_PCI_DEVICE(INTEL, MRST_SD2, intel_mrst_hc1_hc2),
1893 SDHCI_PCI_DEVICE(INTEL, MFD_SD, intel_mfd_sd),
1894 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO1, intel_mfd_sdio),
1895 SDHCI_PCI_DEVICE(INTEL, MFD_SDIO2, intel_mfd_sdio),
1896 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC0, intel_mfd_emmc),
1897 SDHCI_PCI_DEVICE(INTEL, MFD_EMMC1, intel_mfd_emmc),
1898 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO0, intel_pch_sdio),
1899 SDHCI_PCI_DEVICE(INTEL, PCH_SDIO1, intel_pch_sdio),
1900 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC, intel_byt_emmc),
1901 SDHCI_PCI_SUBDEVICE(INTEL, BYT_SDIO, NI, 7884, ni_byt_sdio),
1902 SDHCI_PCI_DEVICE(INTEL, BYT_SDIO, intel_byt_sdio),
1903 SDHCI_PCI_DEVICE(INTEL, BYT_SD, intel_byt_sd),
1904 SDHCI_PCI_DEVICE(INTEL, BYT_EMMC2, intel_byt_emmc),
1905 SDHCI_PCI_DEVICE(INTEL, BSW_EMMC, intel_byt_emmc),
1906 SDHCI_PCI_DEVICE(INTEL, BSW_SDIO, intel_byt_sdio),
1907 SDHCI_PCI_DEVICE(INTEL, BSW_SD, intel_byt_sd),
1908 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO0, intel_mfd_sd),
1909 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO1, intel_mfd_sdio),
1910 SDHCI_PCI_DEVICE(INTEL, CLV_SDIO2, intel_mfd_sdio),
1911 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC0, intel_mfd_emmc),
1912 SDHCI_PCI_DEVICE(INTEL, CLV_EMMC1, intel_mfd_emmc),
1913 SDHCI_PCI_DEVICE(INTEL, MRFLD_MMC, intel_mrfld_mmc),
1914 SDHCI_PCI_DEVICE(INTEL, SPT_EMMC, intel_byt_emmc),
1915 SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
1916 SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
1917 SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
1918 SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
1919 SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
1920 SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
1921 SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
1922 SDHCI_PCI_DEVICE(INTEL, BXTM_EMMC, intel_byt_emmc),
1923 SDHCI_PCI_DEVICE(INTEL, BXTM_SDIO, intel_byt_sdio),
1924 SDHCI_PCI_DEVICE(INTEL, BXTM_SD, intel_byt_sd),
1925 SDHCI_PCI_DEVICE(INTEL, APL_EMMC, intel_byt_emmc),
1926 SDHCI_PCI_DEVICE(INTEL, APL_SDIO, intel_byt_sdio),
1927 SDHCI_PCI_DEVICE(INTEL, APL_SD, intel_byt_sd),
1928 SDHCI_PCI_DEVICE(INTEL, GLK_EMMC, intel_glk_emmc),
1929 SDHCI_PCI_DEVICE(INTEL, GLK_SDIO, intel_byt_sdio),
1930 SDHCI_PCI_DEVICE(INTEL, GLK_SD, intel_byt_sd),
1931 SDHCI_PCI_DEVICE(INTEL, CNP_EMMC, intel_glk_emmc),
1932 SDHCI_PCI_DEVICE(INTEL, CNP_SD, intel_byt_sd),
1933 SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
1934 SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
1935 SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
1936 SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc),
1937 SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd),
1938 SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
1939 SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
1940 SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
1941 SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
1942 SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
1943 SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
1944 SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
1945 SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc),
1946 SDHCI_PCI_DEVICE(O2, 8120, o2),
1947 SDHCI_PCI_DEVICE(O2, 8220, o2),
1948 SDHCI_PCI_DEVICE(O2, 8221, o2),
1949 SDHCI_PCI_DEVICE(O2, 8320, o2),
1950 SDHCI_PCI_DEVICE(O2, 8321, o2),
1951 SDHCI_PCI_DEVICE(O2, FUJIN2, o2),
1952 SDHCI_PCI_DEVICE(O2, SDS0, o2),
1953 SDHCI_PCI_DEVICE(O2, SDS1, o2),
1954 SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
1955 SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
1956 SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
1957 SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
1958 SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
1959 SDHCI_PCI_DEVICE(GLI, 9755, gl9755),
1960 SDHCI_PCI_DEVICE(GLI, 9763E, gl9763e),
1961 SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
1962 /* Generic SD host controller */
1963 {PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
1964 { /* end: all zeroes */ },
1965 };
1966
1967 MODULE_DEVICE_TABLE(pci, pci_ids);
1968
1969 /*****************************************************************************\
1970 * *
1971 * SDHCI core callbacks *
1972 * *
1973 \*****************************************************************************/
1974
sdhci_pci_enable_dma(struct sdhci_host * host)1975 int sdhci_pci_enable_dma(struct sdhci_host *host)
1976 {
1977 struct sdhci_pci_slot *slot;
1978 struct pci_dev *pdev;
1979
1980 slot = sdhci_priv(host);
1981 pdev = slot->chip->pdev;
1982
1983 if (((pdev->class & 0xFFFF00) == (PCI_CLASS_SYSTEM_SDHCI << 8)) &&
1984 ((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1985 (host->flags & SDHCI_USE_SDMA)) {
1986 dev_warn(&pdev->dev, "Will use DMA mode even though HW "
1987 "doesn't fully claim to support it.\n");
1988 }
1989
1990 pci_set_master(pdev);
1991
1992 return 0;
1993 }
1994
sdhci_pci_gpio_hw_reset(struct sdhci_host * host)1995 static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
1996 {
1997 struct sdhci_pci_slot *slot = sdhci_priv(host);
1998 int rst_n_gpio = slot->rst_n_gpio;
1999
2000 if (!gpio_is_valid(rst_n_gpio))
2001 return;
2002 gpio_set_value_cansleep(rst_n_gpio, 0);
2003 /* For eMMC, minimum is 1us but give it 10us for good measure */
2004 udelay(10);
2005 gpio_set_value_cansleep(rst_n_gpio, 1);
2006 /* For eMMC, minimum is 200us but give it 300us for good measure */
2007 usleep_range(300, 1000);
2008 }
2009
sdhci_pci_hw_reset(struct sdhci_host * host)2010 static void sdhci_pci_hw_reset(struct sdhci_host *host)
2011 {
2012 struct sdhci_pci_slot *slot = sdhci_priv(host);
2013
2014 if (slot->hw_reset)
2015 slot->hw_reset(host);
2016 }
2017
2018 static const struct sdhci_ops sdhci_pci_ops = {
2019 .set_clock = sdhci_set_clock,
2020 .enable_dma = sdhci_pci_enable_dma,
2021 .set_bus_width = sdhci_set_bus_width,
2022 .reset = sdhci_reset,
2023 .set_uhs_signaling = sdhci_set_uhs_signaling,
2024 .hw_reset = sdhci_pci_hw_reset,
2025 };
2026
2027 /*****************************************************************************\
2028 * *
2029 * Suspend/resume *
2030 * *
2031 \*****************************************************************************/
2032
2033 #ifdef CONFIG_PM_SLEEP
sdhci_pci_suspend(struct device * dev)2034 static int sdhci_pci_suspend(struct device *dev)
2035 {
2036 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2037
2038 if (!chip)
2039 return 0;
2040
2041 if (chip->fixes && chip->fixes->suspend)
2042 return chip->fixes->suspend(chip);
2043
2044 return sdhci_pci_suspend_host(chip);
2045 }
2046
sdhci_pci_resume(struct device * dev)2047 static int sdhci_pci_resume(struct device *dev)
2048 {
2049 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2050
2051 if (!chip)
2052 return 0;
2053
2054 if (chip->fixes && chip->fixes->resume)
2055 return chip->fixes->resume(chip);
2056
2057 return sdhci_pci_resume_host(chip);
2058 }
2059 #endif
2060
2061 #ifdef CONFIG_PM
sdhci_pci_runtime_suspend(struct device * dev)2062 static int sdhci_pci_runtime_suspend(struct device *dev)
2063 {
2064 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2065
2066 if (!chip)
2067 return 0;
2068
2069 if (chip->fixes && chip->fixes->runtime_suspend)
2070 return chip->fixes->runtime_suspend(chip);
2071
2072 return sdhci_pci_runtime_suspend_host(chip);
2073 }
2074
sdhci_pci_runtime_resume(struct device * dev)2075 static int sdhci_pci_runtime_resume(struct device *dev)
2076 {
2077 struct sdhci_pci_chip *chip = dev_get_drvdata(dev);
2078
2079 if (!chip)
2080 return 0;
2081
2082 if (chip->fixes && chip->fixes->runtime_resume)
2083 return chip->fixes->runtime_resume(chip);
2084
2085 return sdhci_pci_runtime_resume_host(chip);
2086 }
2087 #endif
2088
2089 static const struct dev_pm_ops sdhci_pci_pm_ops = {
2090 SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
2091 SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
2092 sdhci_pci_runtime_resume, NULL)
2093 };
2094
2095 /*****************************************************************************\
2096 * *
2097 * Device probing/removal *
2098 * *
2099 \*****************************************************************************/
2100
sdhci_pci_probe_slot(struct pci_dev * pdev,struct sdhci_pci_chip * chip,int first_bar,int slotno)2101 static struct sdhci_pci_slot *sdhci_pci_probe_slot(
2102 struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
2103 int slotno)
2104 {
2105 struct sdhci_pci_slot *slot;
2106 struct sdhci_host *host;
2107 int ret, bar = first_bar + slotno;
2108 size_t priv_size = chip->fixes ? chip->fixes->priv_size : 0;
2109
2110 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
2111 dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
2112 return ERR_PTR(-ENODEV);
2113 }
2114
2115 if (pci_resource_len(pdev, bar) < 0x100) {
2116 dev_err(&pdev->dev, "Invalid iomem size. You may "
2117 "experience problems.\n");
2118 }
2119
2120 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
2121 dev_err(&pdev->dev, "Vendor specific interface. Aborting.\n");
2122 return ERR_PTR(-ENODEV);
2123 }
2124
2125 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
2126 dev_err(&pdev->dev, "Unknown interface. Aborting.\n");
2127 return ERR_PTR(-ENODEV);
2128 }
2129
2130 host = sdhci_alloc_host(&pdev->dev, sizeof(*slot) + priv_size);
2131 if (IS_ERR(host)) {
2132 dev_err(&pdev->dev, "cannot allocate host\n");
2133 return ERR_CAST(host);
2134 }
2135
2136 slot = sdhci_priv(host);
2137
2138 slot->chip = chip;
2139 slot->host = host;
2140 slot->rst_n_gpio = -EINVAL;
2141 slot->cd_gpio = -EINVAL;
2142 slot->cd_idx = -1;
2143
2144 /* Retrieve platform data if there is any */
2145 if (*sdhci_pci_get_data)
2146 slot->data = sdhci_pci_get_data(pdev, slotno);
2147
2148 if (slot->data) {
2149 if (slot->data->setup) {
2150 ret = slot->data->setup(slot->data);
2151 if (ret) {
2152 dev_err(&pdev->dev, "platform setup failed\n");
2153 goto free;
2154 }
2155 }
2156 slot->rst_n_gpio = slot->data->rst_n_gpio;
2157 slot->cd_gpio = slot->data->cd_gpio;
2158 }
2159
2160 host->hw_name = "PCI";
2161 host->ops = chip->fixes && chip->fixes->ops ?
2162 chip->fixes->ops :
2163 &sdhci_pci_ops;
2164 host->quirks = chip->quirks;
2165 host->quirks2 = chip->quirks2;
2166
2167 host->irq = pdev->irq;
2168
2169 ret = pcim_iomap_regions(pdev, BIT(bar), mmc_hostname(host->mmc));
2170 if (ret) {
2171 dev_err(&pdev->dev, "cannot request region\n");
2172 goto cleanup;
2173 }
2174
2175 host->ioaddr = pcim_iomap_table(pdev)[bar];
2176
2177 if (chip->fixes && chip->fixes->probe_slot) {
2178 ret = chip->fixes->probe_slot(slot);
2179 if (ret)
2180 goto cleanup;
2181 }
2182
2183 if (gpio_is_valid(slot->rst_n_gpio)) {
2184 if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
2185 gpio_direction_output(slot->rst_n_gpio, 1);
2186 slot->host->mmc->caps |= MMC_CAP_HW_RESET;
2187 slot->hw_reset = sdhci_pci_gpio_hw_reset;
2188 } else {
2189 dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
2190 slot->rst_n_gpio = -EINVAL;
2191 }
2192 }
2193
2194 host->mmc->pm_caps = MMC_PM_KEEP_POWER;
2195 host->mmc->slotno = slotno;
2196 host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
2197
2198 if (device_can_wakeup(&pdev->dev))
2199 host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2200
2201 if (host->mmc->caps & MMC_CAP_CD_WAKE)
2202 device_init_wakeup(&pdev->dev, true);
2203
2204 if (slot->cd_idx >= 0) {
2205 ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
2206 slot->cd_override_level, 0);
2207 if (ret && ret != -EPROBE_DEFER)
2208 ret = mmc_gpiod_request_cd(host->mmc, NULL,
2209 slot->cd_idx,
2210 slot->cd_override_level,
2211 0);
2212 if (ret == -EPROBE_DEFER)
2213 goto remove;
2214
2215 if (ret) {
2216 dev_warn(&pdev->dev, "failed to setup card detect gpio\n");
2217 slot->cd_idx = -1;
2218 }
2219 }
2220
2221 if (chip->fixes && chip->fixes->add_host)
2222 ret = chip->fixes->add_host(slot);
2223 else
2224 ret = sdhci_add_host(host);
2225 if (ret)
2226 goto remove;
2227
2228 sdhci_pci_add_own_cd(slot);
2229
2230 /*
2231 * Check if the chip needs a separate GPIO for card detect to wake up
2232 * from runtime suspend. If it is not there, don't allow runtime PM.
2233 * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
2234 */
2235 if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
2236 !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
2237 chip->allow_runtime_pm = false;
2238
2239 return slot;
2240
2241 remove:
2242 if (chip->fixes && chip->fixes->remove_slot)
2243 chip->fixes->remove_slot(slot, 0);
2244
2245 cleanup:
2246 if (slot->data && slot->data->cleanup)
2247 slot->data->cleanup(slot->data);
2248
2249 free:
2250 sdhci_free_host(host);
2251
2252 return ERR_PTR(ret);
2253 }
2254
sdhci_pci_remove_slot(struct sdhci_pci_slot * slot)2255 static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
2256 {
2257 int dead;
2258 u32 scratch;
2259
2260 sdhci_pci_remove_own_cd(slot);
2261
2262 dead = 0;
2263 scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
2264 if (scratch == (u32)-1)
2265 dead = 1;
2266
2267 sdhci_remove_host(slot->host, dead);
2268
2269 if (slot->chip->fixes && slot->chip->fixes->remove_slot)
2270 slot->chip->fixes->remove_slot(slot, dead);
2271
2272 if (slot->data && slot->data->cleanup)
2273 slot->data->cleanup(slot->data);
2274
2275 sdhci_free_host(slot->host);
2276 }
2277
sdhci_pci_runtime_pm_allow(struct device * dev)2278 static void sdhci_pci_runtime_pm_allow(struct device *dev)
2279 {
2280 pm_suspend_ignore_children(dev, 1);
2281 pm_runtime_set_autosuspend_delay(dev, 50);
2282 pm_runtime_use_autosuspend(dev);
2283 pm_runtime_allow(dev);
2284 /* Stay active until mmc core scans for a card */
2285 pm_runtime_put_noidle(dev);
2286 }
2287
sdhci_pci_runtime_pm_forbid(struct device * dev)2288 static void sdhci_pci_runtime_pm_forbid(struct device *dev)
2289 {
2290 pm_runtime_forbid(dev);
2291 pm_runtime_get_noresume(dev);
2292 }
2293
sdhci_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2294 static int sdhci_pci_probe(struct pci_dev *pdev,
2295 const struct pci_device_id *ent)
2296 {
2297 struct sdhci_pci_chip *chip;
2298 struct sdhci_pci_slot *slot;
2299
2300 u8 slots, first_bar;
2301 int ret, i;
2302
2303 BUG_ON(pdev == NULL);
2304 BUG_ON(ent == NULL);
2305
2306 dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
2307 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
2308
2309 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
2310 if (ret)
2311 return ret;
2312
2313 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
2314 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots);
2315
2316 BUG_ON(slots > MAX_SLOTS);
2317
2318 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
2319 if (ret)
2320 return ret;
2321
2322 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
2323
2324 if (first_bar > 5) {
2325 dev_err(&pdev->dev, "Invalid first BAR. Aborting.\n");
2326 return -ENODEV;
2327 }
2328
2329 ret = pcim_enable_device(pdev);
2330 if (ret)
2331 return ret;
2332
2333 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2334 if (!chip)
2335 return -ENOMEM;
2336
2337 chip->pdev = pdev;
2338 chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
2339 if (chip->fixes) {
2340 chip->quirks = chip->fixes->quirks;
2341 chip->quirks2 = chip->fixes->quirks2;
2342 chip->allow_runtime_pm = chip->fixes->allow_runtime_pm;
2343 }
2344 chip->num_slots = slots;
2345 chip->pm_retune = true;
2346 chip->rpm_retune = true;
2347
2348 pci_set_drvdata(pdev, chip);
2349
2350 if (chip->fixes && chip->fixes->probe) {
2351 ret = chip->fixes->probe(chip);
2352 if (ret)
2353 return ret;
2354 }
2355
2356 slots = chip->num_slots; /* Quirk may have changed this */
2357
2358 for (i = 0; i < slots; i++) {
2359 slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
2360 if (IS_ERR(slot)) {
2361 for (i--; i >= 0; i--)
2362 sdhci_pci_remove_slot(chip->slots[i]);
2363 return PTR_ERR(slot);
2364 }
2365
2366 chip->slots[i] = slot;
2367 }
2368
2369 if (chip->allow_runtime_pm)
2370 sdhci_pci_runtime_pm_allow(&pdev->dev);
2371
2372 return 0;
2373 }
2374
sdhci_pci_remove(struct pci_dev * pdev)2375 static void sdhci_pci_remove(struct pci_dev *pdev)
2376 {
2377 int i;
2378 struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
2379
2380 if (chip->allow_runtime_pm)
2381 sdhci_pci_runtime_pm_forbid(&pdev->dev);
2382
2383 for (i = 0; i < chip->num_slots; i++)
2384 sdhci_pci_remove_slot(chip->slots[i]);
2385 }
2386
2387 static struct pci_driver sdhci_driver = {
2388 .name = "sdhci-pci",
2389 .id_table = pci_ids,
2390 .probe = sdhci_pci_probe,
2391 .remove = sdhci_pci_remove,
2392 .driver = {
2393 .pm = &sdhci_pci_pm_ops
2394 },
2395 };
2396
2397 module_pci_driver(sdhci_driver);
2398
2399 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
2400 MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver");
2401 MODULE_LICENSE("GPL");
2402