1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/swiotlb.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/of.h>
27
28 #include <linux/leds.h>
29
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/slot-gpio.h>
35
36 #include "sdhci.h"
37
38 #define DRIVER_NAME "sdhci"
39
40 #define DBG(f, x...) \
41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
42
43 #define SDHCI_DUMP(f, x...) \
44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
45
46 #define MAX_TUNING_LOOP 40
47
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
50
51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
52
53 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
54
sdhci_dumpregs(struct sdhci_host * host)55 void sdhci_dumpregs(struct sdhci_host *host)
56 {
57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
58
59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
60 sdhci_readl(host, SDHCI_DMA_ADDRESS),
61 sdhci_readw(host, SDHCI_HOST_VERSION));
62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
63 sdhci_readw(host, SDHCI_BLOCK_SIZE),
64 sdhci_readw(host, SDHCI_BLOCK_COUNT));
65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
66 sdhci_readl(host, SDHCI_ARGUMENT),
67 sdhci_readw(host, SDHCI_TRANSFER_MODE));
68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
69 sdhci_readl(host, SDHCI_PRESENT_STATE),
70 sdhci_readb(host, SDHCI_HOST_CONTROL));
71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
72 sdhci_readb(host, SDHCI_POWER_CONTROL),
73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
76 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
79 sdhci_readl(host, SDHCI_INT_STATUS));
80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
81 sdhci_readl(host, SDHCI_INT_ENABLE),
82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
83 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
84 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
87 sdhci_readl(host, SDHCI_CAPABILITIES),
88 sdhci_readl(host, SDHCI_CAPABILITIES_1));
89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
90 sdhci_readw(host, SDHCI_COMMAND),
91 sdhci_readl(host, SDHCI_MAX_CURRENT));
92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
93 sdhci_readl(host, SDHCI_RESPONSE),
94 sdhci_readl(host, SDHCI_RESPONSE + 4));
95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
96 sdhci_readl(host, SDHCI_RESPONSE + 8),
97 sdhci_readl(host, SDHCI_RESPONSE + 12));
98 SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 sdhci_readw(host, SDHCI_HOST_CONTROL2));
100
101 if (host->flags & SDHCI_USE_ADMA) {
102 if (host->flags & SDHCI_USE_64_BIT_DMA) {
103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 sdhci_readl(host, SDHCI_ADMA_ERROR),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
106 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
107 } else {
108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
109 sdhci_readl(host, SDHCI_ADMA_ERROR),
110 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
111 }
112 }
113
114 if (host->ops->dump_vendor_regs)
115 host->ops->dump_vendor_regs(host);
116
117 SDHCI_DUMP("============================================\n");
118 }
119 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
120
121 /*****************************************************************************\
122 * *
123 * Low level functions *
124 * *
125 \*****************************************************************************/
126
sdhci_do_enable_v4_mode(struct sdhci_host * host)127 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
128 {
129 u16 ctrl2;
130
131 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
132 if (ctrl2 & SDHCI_CTRL_V4_MODE)
133 return;
134
135 ctrl2 |= SDHCI_CTRL_V4_MODE;
136 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
137 }
138
139 /*
140 * This can be called before sdhci_add_host() by Vendor's host controller
141 * driver to enable v4 mode if supported.
142 */
sdhci_enable_v4_mode(struct sdhci_host * host)143 void sdhci_enable_v4_mode(struct sdhci_host *host)
144 {
145 host->v4_mode = true;
146 sdhci_do_enable_v4_mode(host);
147 }
148 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
149
sdhci_data_line_cmd(struct mmc_command * cmd)150 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
151 {
152 return cmd->data || cmd->flags & MMC_RSP_BUSY;
153 }
154
sdhci_set_card_detection(struct sdhci_host * host,bool enable)155 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
156 {
157 u32 present;
158
159 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
160 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
161 return;
162
163 if (enable) {
164 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
165 SDHCI_CARD_PRESENT;
166
167 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
168 SDHCI_INT_CARD_INSERT;
169 } else {
170 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
171 }
172
173 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
174 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
175 }
176
sdhci_enable_card_detection(struct sdhci_host * host)177 static void sdhci_enable_card_detection(struct sdhci_host *host)
178 {
179 sdhci_set_card_detection(host, true);
180 }
181
sdhci_disable_card_detection(struct sdhci_host * host)182 static void sdhci_disable_card_detection(struct sdhci_host *host)
183 {
184 sdhci_set_card_detection(host, false);
185 }
186
sdhci_runtime_pm_bus_on(struct sdhci_host * host)187 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
188 {
189 if (host->bus_on)
190 return;
191 host->bus_on = true;
192 pm_runtime_get_noresume(host->mmc->parent);
193 }
194
sdhci_runtime_pm_bus_off(struct sdhci_host * host)195 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
196 {
197 if (!host->bus_on)
198 return;
199 host->bus_on = false;
200 pm_runtime_put_noidle(host->mmc->parent);
201 }
202
sdhci_reset(struct sdhci_host * host,u8 mask)203 void sdhci_reset(struct sdhci_host *host, u8 mask)
204 {
205 ktime_t timeout;
206
207 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
208
209 if (mask & SDHCI_RESET_ALL) {
210 host->clock = 0;
211 /* Reset-all turns off SD Bus Power */
212 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
213 sdhci_runtime_pm_bus_off(host);
214 }
215
216 /* Wait max 100 ms */
217 timeout = ktime_add_ms(ktime_get(), 100);
218
219 /* hw clears the bit when it's done */
220 while (1) {
221 bool timedout = ktime_after(ktime_get(), timeout);
222
223 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
224 break;
225 if (timedout) {
226 pr_err("%s: Reset 0x%x never completed.\n",
227 mmc_hostname(host->mmc), (int)mask);
228 sdhci_dumpregs(host);
229 return;
230 }
231 udelay(10);
232 }
233 }
234 EXPORT_SYMBOL_GPL(sdhci_reset);
235
sdhci_do_reset(struct sdhci_host * host,u8 mask)236 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
237 {
238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
239 struct mmc_host *mmc = host->mmc;
240
241 if (!mmc->ops->get_cd(mmc))
242 return;
243 }
244
245 host->ops->reset(host, mask);
246
247 if (mask & SDHCI_RESET_ALL) {
248 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
249 if (host->ops->enable_dma)
250 host->ops->enable_dma(host);
251 }
252
253 /* Resetting the controller clears many */
254 host->preset_enabled = false;
255 }
256 }
257
sdhci_set_default_irqs(struct sdhci_host * host)258 static void sdhci_set_default_irqs(struct sdhci_host *host)
259 {
260 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
261 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
262 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
263 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
264 SDHCI_INT_RESPONSE;
265
266 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
267 host->tuning_mode == SDHCI_TUNING_MODE_3)
268 host->ier |= SDHCI_INT_RETUNE;
269
270 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
271 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
272 }
273
sdhci_config_dma(struct sdhci_host * host)274 static void sdhci_config_dma(struct sdhci_host *host)
275 {
276 u8 ctrl;
277 u16 ctrl2;
278
279 if (host->version < SDHCI_SPEC_200)
280 return;
281
282 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
283
284 /*
285 * Always adjust the DMA selection as some controllers
286 * (e.g. JMicron) can't do PIO properly when the selection
287 * is ADMA.
288 */
289 ctrl &= ~SDHCI_CTRL_DMA_MASK;
290 if (!(host->flags & SDHCI_REQ_USE_DMA))
291 goto out;
292
293 /* Note if DMA Select is zero then SDMA is selected */
294 if (host->flags & SDHCI_USE_ADMA)
295 ctrl |= SDHCI_CTRL_ADMA32;
296
297 if (host->flags & SDHCI_USE_64_BIT_DMA) {
298 /*
299 * If v4 mode, all supported DMA can be 64-bit addressing if
300 * controller supports 64-bit system address, otherwise only
301 * ADMA can support 64-bit addressing.
302 */
303 if (host->v4_mode) {
304 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
305 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
306 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
307 } else if (host->flags & SDHCI_USE_ADMA) {
308 /*
309 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
310 * set SDHCI_CTRL_ADMA64.
311 */
312 ctrl |= SDHCI_CTRL_ADMA64;
313 }
314 }
315
316 out:
317 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
318 }
319
sdhci_init(struct sdhci_host * host,int soft)320 static void sdhci_init(struct sdhci_host *host, int soft)
321 {
322 struct mmc_host *mmc = host->mmc;
323 unsigned long flags;
324
325 if (soft)
326 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
327 else
328 sdhci_do_reset(host, SDHCI_RESET_ALL);
329
330 if (host->v4_mode)
331 sdhci_do_enable_v4_mode(host);
332
333 spin_lock_irqsave(&host->lock, flags);
334 sdhci_set_default_irqs(host);
335 spin_unlock_irqrestore(&host->lock, flags);
336
337 host->cqe_on = false;
338
339 if (soft) {
340 /* force clock reconfiguration */
341 host->clock = 0;
342 host->reinit_uhs = true;
343 mmc->ops->set_ios(mmc, &mmc->ios);
344 }
345 }
346
sdhci_reinit(struct sdhci_host * host)347 static void sdhci_reinit(struct sdhci_host *host)
348 {
349 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
350
351 sdhci_init(host, 0);
352 sdhci_enable_card_detection(host);
353
354 /*
355 * A change to the card detect bits indicates a change in present state,
356 * refer sdhci_set_card_detection(). A card detect interrupt might have
357 * been missed while the host controller was being reset, so trigger a
358 * rescan to check.
359 */
360 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
361 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
362 }
363
__sdhci_led_activate(struct sdhci_host * host)364 static void __sdhci_led_activate(struct sdhci_host *host)
365 {
366 u8 ctrl;
367
368 if (host->quirks & SDHCI_QUIRK_NO_LED)
369 return;
370
371 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
372 ctrl |= SDHCI_CTRL_LED;
373 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
374 }
375
__sdhci_led_deactivate(struct sdhci_host * host)376 static void __sdhci_led_deactivate(struct sdhci_host *host)
377 {
378 u8 ctrl;
379
380 if (host->quirks & SDHCI_QUIRK_NO_LED)
381 return;
382
383 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
384 ctrl &= ~SDHCI_CTRL_LED;
385 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
386 }
387
388 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)389 static void sdhci_led_control(struct led_classdev *led,
390 enum led_brightness brightness)
391 {
392 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
393 unsigned long flags;
394
395 spin_lock_irqsave(&host->lock, flags);
396
397 if (host->runtime_suspended)
398 goto out;
399
400 if (brightness == LED_OFF)
401 __sdhci_led_deactivate(host);
402 else
403 __sdhci_led_activate(host);
404 out:
405 spin_unlock_irqrestore(&host->lock, flags);
406 }
407
sdhci_led_register(struct sdhci_host * host)408 static int sdhci_led_register(struct sdhci_host *host)
409 {
410 struct mmc_host *mmc = host->mmc;
411
412 if (host->quirks & SDHCI_QUIRK_NO_LED)
413 return 0;
414
415 snprintf(host->led_name, sizeof(host->led_name),
416 "%s::", mmc_hostname(mmc));
417
418 host->led.name = host->led_name;
419 host->led.brightness = LED_OFF;
420 host->led.default_trigger = mmc_hostname(mmc);
421 host->led.brightness_set = sdhci_led_control;
422
423 return led_classdev_register(mmc_dev(mmc), &host->led);
424 }
425
sdhci_led_unregister(struct sdhci_host * host)426 static void sdhci_led_unregister(struct sdhci_host *host)
427 {
428 if (host->quirks & SDHCI_QUIRK_NO_LED)
429 return;
430
431 led_classdev_unregister(&host->led);
432 }
433
sdhci_led_activate(struct sdhci_host * host)434 static inline void sdhci_led_activate(struct sdhci_host *host)
435 {
436 }
437
sdhci_led_deactivate(struct sdhci_host * host)438 static inline void sdhci_led_deactivate(struct sdhci_host *host)
439 {
440 }
441
442 #else
443
sdhci_led_register(struct sdhci_host * host)444 static inline int sdhci_led_register(struct sdhci_host *host)
445 {
446 return 0;
447 }
448
sdhci_led_unregister(struct sdhci_host * host)449 static inline void sdhci_led_unregister(struct sdhci_host *host)
450 {
451 }
452
sdhci_led_activate(struct sdhci_host * host)453 static inline void sdhci_led_activate(struct sdhci_host *host)
454 {
455 __sdhci_led_activate(host);
456 }
457
sdhci_led_deactivate(struct sdhci_host * host)458 static inline void sdhci_led_deactivate(struct sdhci_host *host)
459 {
460 __sdhci_led_deactivate(host);
461 }
462
463 #endif
464
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)465 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
466 unsigned long timeout)
467 {
468 if (sdhci_data_line_cmd(mrq->cmd))
469 mod_timer(&host->data_timer, timeout);
470 else
471 mod_timer(&host->timer, timeout);
472 }
473
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)474 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
475 {
476 if (sdhci_data_line_cmd(mrq->cmd))
477 del_timer(&host->data_timer);
478 else
479 del_timer(&host->timer);
480 }
481
sdhci_has_requests(struct sdhci_host * host)482 static inline bool sdhci_has_requests(struct sdhci_host *host)
483 {
484 return host->cmd || host->data_cmd;
485 }
486
487 /*****************************************************************************\
488 * *
489 * Core functions *
490 * *
491 \*****************************************************************************/
492
sdhci_read_block_pio(struct sdhci_host * host)493 static void sdhci_read_block_pio(struct sdhci_host *host)
494 {
495 unsigned long flags;
496 size_t blksize, len, chunk;
497 u32 scratch;
498 u8 *buf;
499
500 DBG("PIO reading\n");
501
502 blksize = host->data->blksz;
503 chunk = 0;
504
505 local_irq_save(flags);
506
507 while (blksize) {
508 BUG_ON(!sg_miter_next(&host->sg_miter));
509
510 len = min(host->sg_miter.length, blksize);
511
512 blksize -= len;
513 host->sg_miter.consumed = len;
514
515 buf = host->sg_miter.addr;
516
517 while (len) {
518 if (chunk == 0) {
519 scratch = sdhci_readl(host, SDHCI_BUFFER);
520 chunk = 4;
521 }
522
523 *buf = scratch & 0xFF;
524
525 buf++;
526 scratch >>= 8;
527 chunk--;
528 len--;
529 }
530 }
531
532 sg_miter_stop(&host->sg_miter);
533
534 local_irq_restore(flags);
535 }
536
sdhci_write_block_pio(struct sdhci_host * host)537 static void sdhci_write_block_pio(struct sdhci_host *host)
538 {
539 unsigned long flags;
540 size_t blksize, len, chunk;
541 u32 scratch;
542 u8 *buf;
543
544 DBG("PIO writing\n");
545
546 blksize = host->data->blksz;
547 chunk = 0;
548 scratch = 0;
549
550 local_irq_save(flags);
551
552 while (blksize) {
553 BUG_ON(!sg_miter_next(&host->sg_miter));
554
555 len = min(host->sg_miter.length, blksize);
556
557 blksize -= len;
558 host->sg_miter.consumed = len;
559
560 buf = host->sg_miter.addr;
561
562 while (len) {
563 scratch |= (u32)*buf << (chunk * 8);
564
565 buf++;
566 chunk++;
567 len--;
568
569 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
570 sdhci_writel(host, scratch, SDHCI_BUFFER);
571 chunk = 0;
572 scratch = 0;
573 }
574 }
575 }
576
577 sg_miter_stop(&host->sg_miter);
578
579 local_irq_restore(flags);
580 }
581
sdhci_transfer_pio(struct sdhci_host * host)582 static void sdhci_transfer_pio(struct sdhci_host *host)
583 {
584 u32 mask;
585
586 if (host->blocks == 0)
587 return;
588
589 if (host->data->flags & MMC_DATA_READ)
590 mask = SDHCI_DATA_AVAILABLE;
591 else
592 mask = SDHCI_SPACE_AVAILABLE;
593
594 /*
595 * Some controllers (JMicron JMB38x) mess up the buffer bits
596 * for transfers < 4 bytes. As long as it is just one block,
597 * we can ignore the bits.
598 */
599 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
600 (host->data->blocks == 1))
601 mask = ~0;
602
603 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
604 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
605 udelay(100);
606
607 if (host->data->flags & MMC_DATA_READ)
608 sdhci_read_block_pio(host);
609 else
610 sdhci_write_block_pio(host);
611
612 host->blocks--;
613 if (host->blocks == 0)
614 break;
615 }
616
617 DBG("PIO transfer complete.\n");
618 }
619
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)620 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
621 struct mmc_data *data, int cookie)
622 {
623 int sg_count;
624
625 /*
626 * If the data buffers are already mapped, return the previous
627 * dma_map_sg() result.
628 */
629 if (data->host_cookie == COOKIE_PRE_MAPPED)
630 return data->sg_count;
631
632 /* Bounce write requests to the bounce buffer */
633 if (host->bounce_buffer) {
634 unsigned int length = data->blksz * data->blocks;
635
636 if (length > host->bounce_buffer_size) {
637 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
638 mmc_hostname(host->mmc), length,
639 host->bounce_buffer_size);
640 return -EIO;
641 }
642 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
643 /* Copy the data to the bounce buffer */
644 if (host->ops->copy_to_bounce_buffer) {
645 host->ops->copy_to_bounce_buffer(host,
646 data, length);
647 } else {
648 sg_copy_to_buffer(data->sg, data->sg_len,
649 host->bounce_buffer, length);
650 }
651 }
652 /* Switch ownership to the DMA */
653 dma_sync_single_for_device(host->mmc->parent,
654 host->bounce_addr,
655 host->bounce_buffer_size,
656 mmc_get_dma_dir(data));
657 /* Just a dummy value */
658 sg_count = 1;
659 } else {
660 /* Just access the data directly from memory */
661 sg_count = dma_map_sg(mmc_dev(host->mmc),
662 data->sg, data->sg_len,
663 mmc_get_dma_dir(data));
664 }
665
666 if (sg_count == 0)
667 return -ENOSPC;
668
669 data->sg_count = sg_count;
670 data->host_cookie = cookie;
671
672 return sg_count;
673 }
674
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)675 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
676 {
677 local_irq_save(*flags);
678 return kmap_atomic(sg_page(sg)) + sg->offset;
679 }
680
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)681 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
682 {
683 kunmap_atomic(buffer);
684 local_irq_restore(*flags);
685 }
686
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)687 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
688 dma_addr_t addr, int len, unsigned int cmd)
689 {
690 struct sdhci_adma2_64_desc *dma_desc = *desc;
691
692 /* 32-bit and 64-bit descriptors have these members in same position */
693 dma_desc->cmd = cpu_to_le16(cmd);
694 dma_desc->len = cpu_to_le16(len);
695 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
696
697 if (host->flags & SDHCI_USE_64_BIT_DMA)
698 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
699
700 *desc += host->desc_sz;
701 }
702 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
703
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)704 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
705 void **desc, dma_addr_t addr,
706 int len, unsigned int cmd)
707 {
708 if (host->ops->adma_write_desc)
709 host->ops->adma_write_desc(host, desc, addr, len, cmd);
710 else
711 sdhci_adma_write_desc(host, desc, addr, len, cmd);
712 }
713
sdhci_adma_mark_end(void * desc)714 static void sdhci_adma_mark_end(void *desc)
715 {
716 struct sdhci_adma2_64_desc *dma_desc = desc;
717
718 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
719 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
720 }
721
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)722 static void sdhci_adma_table_pre(struct sdhci_host *host,
723 struct mmc_data *data, int sg_count)
724 {
725 struct scatterlist *sg;
726 unsigned long flags;
727 dma_addr_t addr, align_addr;
728 void *desc, *align;
729 char *buffer;
730 int len, offset, i;
731
732 /*
733 * The spec does not specify endianness of descriptor table.
734 * We currently guess that it is LE.
735 */
736
737 host->sg_count = sg_count;
738
739 desc = host->adma_table;
740 align = host->align_buffer;
741
742 align_addr = host->align_addr;
743
744 for_each_sg(data->sg, sg, host->sg_count, i) {
745 addr = sg_dma_address(sg);
746 len = sg_dma_len(sg);
747
748 /*
749 * The SDHCI specification states that ADMA addresses must
750 * be 32-bit aligned. If they aren't, then we use a bounce
751 * buffer for the (up to three) bytes that screw up the
752 * alignment.
753 */
754 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
755 SDHCI_ADMA2_MASK;
756 if (offset) {
757 if (data->flags & MMC_DATA_WRITE) {
758 buffer = sdhci_kmap_atomic(sg, &flags);
759 memcpy(align, buffer, offset);
760 sdhci_kunmap_atomic(buffer, &flags);
761 }
762
763 /* tran, valid */
764 __sdhci_adma_write_desc(host, &desc, align_addr,
765 offset, ADMA2_TRAN_VALID);
766
767 BUG_ON(offset > 65536);
768
769 align += SDHCI_ADMA2_ALIGN;
770 align_addr += SDHCI_ADMA2_ALIGN;
771
772 addr += offset;
773 len -= offset;
774 }
775
776 /*
777 * The block layer forces a minimum segment size of PAGE_SIZE,
778 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
779 * multiple descriptors, noting that the ADMA table is sized
780 * for 4KiB chunks anyway, so it will be big enough.
781 */
782 while (len > host->max_adma) {
783 int n = 32 * 1024; /* 32KiB*/
784
785 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
786 addr += n;
787 len -= n;
788 }
789
790 /* tran, valid */
791 if (len)
792 __sdhci_adma_write_desc(host, &desc, addr, len,
793 ADMA2_TRAN_VALID);
794
795 /*
796 * If this triggers then we have a calculation bug
797 * somewhere. :/
798 */
799 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
800 }
801
802 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
803 /* Mark the last descriptor as the terminating descriptor */
804 if (desc != host->adma_table) {
805 desc -= host->desc_sz;
806 sdhci_adma_mark_end(desc);
807 }
808 } else {
809 /* Add a terminating entry - nop, end, valid */
810 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
811 }
812 }
813
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)814 static void sdhci_adma_table_post(struct sdhci_host *host,
815 struct mmc_data *data)
816 {
817 struct scatterlist *sg;
818 int i, size;
819 void *align;
820 char *buffer;
821 unsigned long flags;
822
823 if (data->flags & MMC_DATA_READ) {
824 bool has_unaligned = false;
825
826 /* Do a quick scan of the SG list for any unaligned mappings */
827 for_each_sg(data->sg, sg, host->sg_count, i)
828 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
829 has_unaligned = true;
830 break;
831 }
832
833 if (has_unaligned) {
834 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
835 data->sg_len, DMA_FROM_DEVICE);
836
837 align = host->align_buffer;
838
839 for_each_sg(data->sg, sg, host->sg_count, i) {
840 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
841 size = SDHCI_ADMA2_ALIGN -
842 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
843
844 buffer = sdhci_kmap_atomic(sg, &flags);
845 memcpy(buffer, align, size);
846 sdhci_kunmap_atomic(buffer, &flags);
847
848 align += SDHCI_ADMA2_ALIGN;
849 }
850 }
851 }
852 }
853 }
854
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)855 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
856 {
857 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
858 if (host->flags & SDHCI_USE_64_BIT_DMA)
859 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
860 }
861
sdhci_sdma_address(struct sdhci_host * host)862 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
863 {
864 if (host->bounce_buffer)
865 return host->bounce_addr;
866 else
867 return sg_dma_address(host->data->sg);
868 }
869
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)870 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
871 {
872 if (host->v4_mode)
873 sdhci_set_adma_addr(host, addr);
874 else
875 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
876 }
877
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)878 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
879 struct mmc_command *cmd,
880 struct mmc_data *data)
881 {
882 unsigned int target_timeout;
883
884 /* timeout in us */
885 if (!data) {
886 target_timeout = cmd->busy_timeout * 1000;
887 } else {
888 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
889 if (host->clock && data->timeout_clks) {
890 unsigned long long val;
891
892 /*
893 * data->timeout_clks is in units of clock cycles.
894 * host->clock is in Hz. target_timeout is in us.
895 * Hence, us = 1000000 * cycles / Hz. Round up.
896 */
897 val = 1000000ULL * data->timeout_clks;
898 if (do_div(val, host->clock))
899 target_timeout++;
900 target_timeout += val;
901 }
902 }
903
904 return target_timeout;
905 }
906
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)907 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
908 struct mmc_command *cmd)
909 {
910 struct mmc_data *data = cmd->data;
911 struct mmc_host *mmc = host->mmc;
912 struct mmc_ios *ios = &mmc->ios;
913 unsigned char bus_width = 1 << ios->bus_width;
914 unsigned int blksz;
915 unsigned int freq;
916 u64 target_timeout;
917 u64 transfer_time;
918
919 target_timeout = sdhci_target_timeout(host, cmd, data);
920 target_timeout *= NSEC_PER_USEC;
921
922 if (data) {
923 blksz = data->blksz;
924 freq = host->mmc->actual_clock ? : host->clock;
925 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
926 do_div(transfer_time, freq);
927 /* multiply by '2' to account for any unknowns */
928 transfer_time = transfer_time * 2;
929 /* calculate timeout for the entire data */
930 host->data_timeout = data->blocks * target_timeout +
931 transfer_time;
932 } else {
933 host->data_timeout = target_timeout;
934 }
935
936 if (host->data_timeout)
937 host->data_timeout += MMC_CMD_TRANSFER_TIME;
938 }
939
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)940 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
941 bool *too_big)
942 {
943 u8 count;
944 struct mmc_data *data;
945 unsigned target_timeout, current_timeout;
946
947 *too_big = true;
948
949 /*
950 * If the host controller provides us with an incorrect timeout
951 * value, just skip the check and use 0xE. The hardware may take
952 * longer to time out, but that's much better than having a too-short
953 * timeout value.
954 */
955 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
956 return 0xE;
957
958 /* Unspecified command, asume max */
959 if (cmd == NULL)
960 return 0xE;
961
962 data = cmd->data;
963 /* Unspecified timeout, assume max */
964 if (!data && !cmd->busy_timeout)
965 return 0xE;
966
967 /* timeout in us */
968 target_timeout = sdhci_target_timeout(host, cmd, data);
969
970 /*
971 * Figure out needed cycles.
972 * We do this in steps in order to fit inside a 32 bit int.
973 * The first step is the minimum timeout, which will have a
974 * minimum resolution of 6 bits:
975 * (1) 2^13*1000 > 2^22,
976 * (2) host->timeout_clk < 2^16
977 * =>
978 * (1) / (2) > 2^6
979 */
980 count = 0;
981 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
982 while (current_timeout < target_timeout) {
983 count++;
984 current_timeout <<= 1;
985 if (count >= 0xF)
986 break;
987 }
988
989 if (count >= 0xF) {
990 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
991 DBG("Too large timeout 0x%x requested for CMD%d!\n",
992 count, cmd->opcode);
993 count = 0xE;
994 } else {
995 *too_big = false;
996 }
997
998 return count;
999 }
1000
sdhci_set_transfer_irqs(struct sdhci_host * host)1001 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1002 {
1003 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1004 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1005
1006 if (host->flags & SDHCI_REQ_USE_DMA)
1007 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1008 else
1009 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1010
1011 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1012 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1013 else
1014 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1015
1016 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1017 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1018 }
1019
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1020 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1021 {
1022 if (enable)
1023 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1024 else
1025 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1026 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1027 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1028 }
1029 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1030
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1031 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1032 {
1033 bool too_big = false;
1034 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1035
1036 if (too_big &&
1037 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1038 sdhci_calc_sw_timeout(host, cmd);
1039 sdhci_set_data_timeout_irq(host, false);
1040 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1041 sdhci_set_data_timeout_irq(host, true);
1042 }
1043
1044 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1045 }
1046 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1047
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1048 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1049 {
1050 if (host->ops->set_timeout)
1051 host->ops->set_timeout(host, cmd);
1052 else
1053 __sdhci_set_timeout(host, cmd);
1054 }
1055
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1056 static void sdhci_initialize_data(struct sdhci_host *host,
1057 struct mmc_data *data)
1058 {
1059 WARN_ON(host->data);
1060
1061 /* Sanity checks */
1062 BUG_ON(data->blksz * data->blocks > 524288);
1063 BUG_ON(data->blksz > host->mmc->max_blk_size);
1064 BUG_ON(data->blocks > 65535);
1065
1066 host->data = data;
1067 host->data_early = 0;
1068 host->data->bytes_xfered = 0;
1069 }
1070
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1071 static inline void sdhci_set_block_info(struct sdhci_host *host,
1072 struct mmc_data *data)
1073 {
1074 /* Set the DMA boundary value and block size */
1075 sdhci_writew(host,
1076 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1077 SDHCI_BLOCK_SIZE);
1078 /*
1079 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1080 * can be supported, in that case 16-bit block count register must be 0.
1081 */
1082 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1083 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1084 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1085 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1086 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1087 } else {
1088 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1089 }
1090 }
1091
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1092 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1093 {
1094 struct mmc_data *data = cmd->data;
1095
1096 sdhci_initialize_data(host, data);
1097
1098 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1099 struct scatterlist *sg;
1100 unsigned int length_mask, offset_mask;
1101 int i;
1102
1103 host->flags |= SDHCI_REQ_USE_DMA;
1104
1105 /*
1106 * FIXME: This doesn't account for merging when mapping the
1107 * scatterlist.
1108 *
1109 * The assumption here being that alignment and lengths are
1110 * the same after DMA mapping to device address space.
1111 */
1112 length_mask = 0;
1113 offset_mask = 0;
1114 if (host->flags & SDHCI_USE_ADMA) {
1115 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1116 length_mask = 3;
1117 /*
1118 * As we use up to 3 byte chunks to work
1119 * around alignment problems, we need to
1120 * check the offset as well.
1121 */
1122 offset_mask = 3;
1123 }
1124 } else {
1125 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1126 length_mask = 3;
1127 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1128 offset_mask = 3;
1129 }
1130
1131 if (unlikely(length_mask | offset_mask)) {
1132 for_each_sg(data->sg, sg, data->sg_len, i) {
1133 if (sg->length & length_mask) {
1134 DBG("Reverting to PIO because of transfer size (%d)\n",
1135 sg->length);
1136 host->flags &= ~SDHCI_REQ_USE_DMA;
1137 break;
1138 }
1139 if (sg->offset & offset_mask) {
1140 DBG("Reverting to PIO because of bad alignment\n");
1141 host->flags &= ~SDHCI_REQ_USE_DMA;
1142 break;
1143 }
1144 }
1145 }
1146 }
1147
1148 if (host->flags & SDHCI_REQ_USE_DMA) {
1149 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1150
1151 if (sg_cnt <= 0) {
1152 /*
1153 * This only happens when someone fed
1154 * us an invalid request.
1155 */
1156 WARN_ON(1);
1157 host->flags &= ~SDHCI_REQ_USE_DMA;
1158 } else if (host->flags & SDHCI_USE_ADMA) {
1159 sdhci_adma_table_pre(host, data, sg_cnt);
1160 sdhci_set_adma_addr(host, host->adma_addr);
1161 } else {
1162 WARN_ON(sg_cnt != 1);
1163 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1164 }
1165 }
1166
1167 sdhci_config_dma(host);
1168
1169 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1170 int flags;
1171
1172 flags = SG_MITER_ATOMIC;
1173 if (host->data->flags & MMC_DATA_READ)
1174 flags |= SG_MITER_TO_SG;
1175 else
1176 flags |= SG_MITER_FROM_SG;
1177 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1178 host->blocks = data->blocks;
1179 }
1180
1181 sdhci_set_transfer_irqs(host);
1182
1183 sdhci_set_block_info(host, data);
1184 }
1185
1186 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1187
sdhci_external_dma_init(struct sdhci_host * host)1188 static int sdhci_external_dma_init(struct sdhci_host *host)
1189 {
1190 int ret = 0;
1191 struct mmc_host *mmc = host->mmc;
1192
1193 host->tx_chan = dma_request_chan(mmc->parent, "tx");
1194 if (IS_ERR(host->tx_chan)) {
1195 ret = PTR_ERR(host->tx_chan);
1196 if (ret != -EPROBE_DEFER)
1197 pr_warn("Failed to request TX DMA channel.\n");
1198 host->tx_chan = NULL;
1199 return ret;
1200 }
1201
1202 host->rx_chan = dma_request_chan(mmc->parent, "rx");
1203 if (IS_ERR(host->rx_chan)) {
1204 if (host->tx_chan) {
1205 dma_release_channel(host->tx_chan);
1206 host->tx_chan = NULL;
1207 }
1208
1209 ret = PTR_ERR(host->rx_chan);
1210 if (ret != -EPROBE_DEFER)
1211 pr_warn("Failed to request RX DMA channel.\n");
1212 host->rx_chan = NULL;
1213 }
1214
1215 return ret;
1216 }
1217
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1218 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1219 struct mmc_data *data)
1220 {
1221 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1222 }
1223
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1224 static int sdhci_external_dma_setup(struct sdhci_host *host,
1225 struct mmc_command *cmd)
1226 {
1227 int ret, i;
1228 enum dma_transfer_direction dir;
1229 struct dma_async_tx_descriptor *desc;
1230 struct mmc_data *data = cmd->data;
1231 struct dma_chan *chan;
1232 struct dma_slave_config cfg;
1233 dma_cookie_t cookie;
1234 int sg_cnt;
1235
1236 if (!host->mapbase)
1237 return -EINVAL;
1238
1239 memset(&cfg, 0, sizeof(cfg));
1240 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1241 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1242 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1243 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1244 cfg.src_maxburst = data->blksz / 4;
1245 cfg.dst_maxburst = data->blksz / 4;
1246
1247 /* Sanity check: all the SG entries must be aligned by block size. */
1248 for (i = 0; i < data->sg_len; i++) {
1249 if ((data->sg + i)->length % data->blksz)
1250 return -EINVAL;
1251 }
1252
1253 chan = sdhci_external_dma_channel(host, data);
1254
1255 ret = dmaengine_slave_config(chan, &cfg);
1256 if (ret)
1257 return ret;
1258
1259 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1260 if (sg_cnt <= 0)
1261 return -EINVAL;
1262
1263 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1264 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1265 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1266 if (!desc)
1267 return -EINVAL;
1268
1269 desc->callback = NULL;
1270 desc->callback_param = NULL;
1271
1272 cookie = dmaengine_submit(desc);
1273 if (dma_submit_error(cookie))
1274 ret = cookie;
1275
1276 return ret;
1277 }
1278
sdhci_external_dma_release(struct sdhci_host * host)1279 static void sdhci_external_dma_release(struct sdhci_host *host)
1280 {
1281 if (host->tx_chan) {
1282 dma_release_channel(host->tx_chan);
1283 host->tx_chan = NULL;
1284 }
1285
1286 if (host->rx_chan) {
1287 dma_release_channel(host->rx_chan);
1288 host->rx_chan = NULL;
1289 }
1290
1291 sdhci_switch_external_dma(host, false);
1292 }
1293
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1294 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1295 struct mmc_command *cmd)
1296 {
1297 struct mmc_data *data = cmd->data;
1298
1299 sdhci_initialize_data(host, data);
1300
1301 host->flags |= SDHCI_REQ_USE_DMA;
1302 sdhci_set_transfer_irqs(host);
1303
1304 sdhci_set_block_info(host, data);
1305 }
1306
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1307 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1308 struct mmc_command *cmd)
1309 {
1310 if (!sdhci_external_dma_setup(host, cmd)) {
1311 __sdhci_external_dma_prepare_data(host, cmd);
1312 } else {
1313 sdhci_external_dma_release(host);
1314 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1315 mmc_hostname(host->mmc));
1316 sdhci_prepare_data(host, cmd);
1317 }
1318 }
1319
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1320 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1321 struct mmc_command *cmd)
1322 {
1323 struct dma_chan *chan;
1324
1325 if (!cmd->data)
1326 return;
1327
1328 chan = sdhci_external_dma_channel(host, cmd->data);
1329 if (chan)
1330 dma_async_issue_pending(chan);
1331 }
1332
1333 #else
1334
sdhci_external_dma_init(struct sdhci_host * host)1335 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1336 {
1337 return -EOPNOTSUPP;
1338 }
1339
sdhci_external_dma_release(struct sdhci_host * host)1340 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1341 {
1342 }
1343
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1344 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1345 struct mmc_command *cmd)
1346 {
1347 /* This should never happen */
1348 WARN_ON_ONCE(1);
1349 }
1350
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1351 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1352 struct mmc_command *cmd)
1353 {
1354 }
1355
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1356 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1357 struct mmc_data *data)
1358 {
1359 return NULL;
1360 }
1361
1362 #endif
1363
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1364 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1365 {
1366 host->use_external_dma = en;
1367 }
1368 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1369
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1370 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1371 struct mmc_request *mrq)
1372 {
1373 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1374 !mrq->cap_cmd_during_tfr;
1375 }
1376
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1377 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1378 struct mmc_request *mrq)
1379 {
1380 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1381 }
1382
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1383 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1384 struct mmc_request *mrq)
1385 {
1386 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1387 }
1388
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1389 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1390 struct mmc_command *cmd,
1391 u16 *mode)
1392 {
1393 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1394 (cmd->opcode != SD_IO_RW_EXTENDED);
1395 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1396 u16 ctrl2;
1397
1398 /*
1399 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1400 * Select' is recommended rather than use of 'Auto CMD12
1401 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1402 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1403 */
1404 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1405 (use_cmd12 || use_cmd23)) {
1406 *mode |= SDHCI_TRNS_AUTO_SEL;
1407
1408 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1409 if (use_cmd23)
1410 ctrl2 |= SDHCI_CMD23_ENABLE;
1411 else
1412 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1413 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1414
1415 return;
1416 }
1417
1418 /*
1419 * If we are sending CMD23, CMD12 never gets sent
1420 * on successful completion (so no Auto-CMD12).
1421 */
1422 if (use_cmd12)
1423 *mode |= SDHCI_TRNS_AUTO_CMD12;
1424 else if (use_cmd23)
1425 *mode |= SDHCI_TRNS_AUTO_CMD23;
1426 }
1427
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1428 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1429 struct mmc_command *cmd)
1430 {
1431 u16 mode = 0;
1432 struct mmc_data *data = cmd->data;
1433
1434 if (data == NULL) {
1435 if (host->quirks2 &
1436 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1437 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1438 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1439 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1440 } else {
1441 /* clear Auto CMD settings for no data CMDs */
1442 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1443 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1444 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1445 }
1446 return;
1447 }
1448
1449 WARN_ON(!host->data);
1450
1451 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1452 mode = SDHCI_TRNS_BLK_CNT_EN;
1453
1454 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1455 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1456 sdhci_auto_cmd_select(host, cmd, &mode);
1457 if (sdhci_auto_cmd23(host, cmd->mrq))
1458 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1459 }
1460
1461 if (data->flags & MMC_DATA_READ)
1462 mode |= SDHCI_TRNS_READ;
1463 if (host->flags & SDHCI_REQ_USE_DMA)
1464 mode |= SDHCI_TRNS_DMA;
1465
1466 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1467 }
1468
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1469 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1470 {
1471 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1472 ((mrq->cmd && mrq->cmd->error) ||
1473 (mrq->sbc && mrq->sbc->error) ||
1474 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1475 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1476 }
1477
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1478 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1479 {
1480 int i;
1481
1482 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1483 if (host->mrqs_done[i] == mrq) {
1484 WARN_ON(1);
1485 return;
1486 }
1487 }
1488
1489 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1490 if (!host->mrqs_done[i]) {
1491 host->mrqs_done[i] = mrq;
1492 break;
1493 }
1494 }
1495
1496 WARN_ON(i >= SDHCI_MAX_MRQS);
1497 }
1498
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1499 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1500 {
1501 if (host->cmd && host->cmd->mrq == mrq)
1502 host->cmd = NULL;
1503
1504 if (host->data_cmd && host->data_cmd->mrq == mrq)
1505 host->data_cmd = NULL;
1506
1507 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1508 host->deferred_cmd = NULL;
1509
1510 if (host->data && host->data->mrq == mrq)
1511 host->data = NULL;
1512
1513 if (sdhci_needs_reset(host, mrq))
1514 host->pending_reset = true;
1515
1516 sdhci_set_mrq_done(host, mrq);
1517
1518 sdhci_del_timer(host, mrq);
1519
1520 if (!sdhci_has_requests(host))
1521 sdhci_led_deactivate(host);
1522 }
1523
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1524 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1525 {
1526 __sdhci_finish_mrq(host, mrq);
1527
1528 queue_work(host->complete_wq, &host->complete_work);
1529 }
1530
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1531 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1532 {
1533 struct mmc_command *data_cmd = host->data_cmd;
1534 struct mmc_data *data = host->data;
1535
1536 host->data = NULL;
1537 host->data_cmd = NULL;
1538
1539 /*
1540 * The controller needs a reset of internal state machines upon error
1541 * conditions.
1542 */
1543 if (data->error) {
1544 if (!host->cmd || host->cmd == data_cmd)
1545 sdhci_do_reset(host, SDHCI_RESET_CMD);
1546 sdhci_do_reset(host, SDHCI_RESET_DATA);
1547 }
1548
1549 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1550 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1551 sdhci_adma_table_post(host, data);
1552
1553 /*
1554 * The specification states that the block count register must
1555 * be updated, but it does not specify at what point in the
1556 * data flow. That makes the register entirely useless to read
1557 * back so we have to assume that nothing made it to the card
1558 * in the event of an error.
1559 */
1560 if (data->error)
1561 data->bytes_xfered = 0;
1562 else
1563 data->bytes_xfered = data->blksz * data->blocks;
1564
1565 /*
1566 * Need to send CMD12 if -
1567 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1568 * b) error in multiblock transfer
1569 */
1570 if (data->stop &&
1571 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1572 data->error)) {
1573 /*
1574 * 'cap_cmd_during_tfr' request must not use the command line
1575 * after mmc_command_done() has been called. It is upper layer's
1576 * responsibility to send the stop command if required.
1577 */
1578 if (data->mrq->cap_cmd_during_tfr) {
1579 __sdhci_finish_mrq(host, data->mrq);
1580 } else {
1581 /* Avoid triggering warning in sdhci_send_command() */
1582 host->cmd = NULL;
1583 if (!sdhci_send_command(host, data->stop)) {
1584 if (sw_data_timeout) {
1585 /*
1586 * This is anyway a sw data timeout, so
1587 * give up now.
1588 */
1589 data->stop->error = -EIO;
1590 __sdhci_finish_mrq(host, data->mrq);
1591 } else {
1592 WARN_ON(host->deferred_cmd);
1593 host->deferred_cmd = data->stop;
1594 }
1595 }
1596 }
1597 } else {
1598 __sdhci_finish_mrq(host, data->mrq);
1599 }
1600 }
1601
sdhci_finish_data(struct sdhci_host * host)1602 static void sdhci_finish_data(struct sdhci_host *host)
1603 {
1604 __sdhci_finish_data(host, false);
1605 }
1606
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1607 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1608 {
1609 int flags;
1610 u32 mask;
1611 unsigned long timeout;
1612
1613 WARN_ON(host->cmd);
1614
1615 /* Initially, a command has no error */
1616 cmd->error = 0;
1617
1618 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1619 cmd->opcode == MMC_STOP_TRANSMISSION)
1620 cmd->flags |= MMC_RSP_BUSY;
1621
1622 mask = SDHCI_CMD_INHIBIT;
1623 if (sdhci_data_line_cmd(cmd))
1624 mask |= SDHCI_DATA_INHIBIT;
1625
1626 /* We shouldn't wait for data inihibit for stop commands, even
1627 though they might use busy signaling */
1628 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1629 mask &= ~SDHCI_DATA_INHIBIT;
1630
1631 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1632 return false;
1633
1634 host->cmd = cmd;
1635 host->data_timeout = 0;
1636 if (sdhci_data_line_cmd(cmd)) {
1637 WARN_ON(host->data_cmd);
1638 host->data_cmd = cmd;
1639 sdhci_set_timeout(host, cmd);
1640 }
1641
1642 if (cmd->data) {
1643 if (host->use_external_dma)
1644 sdhci_external_dma_prepare_data(host, cmd);
1645 else
1646 sdhci_prepare_data(host, cmd);
1647 }
1648
1649 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1650
1651 sdhci_set_transfer_mode(host, cmd);
1652
1653 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1654 WARN_ONCE(1, "Unsupported response type!\n");
1655 /*
1656 * This does not happen in practice because 136-bit response
1657 * commands never have busy waiting, so rather than complicate
1658 * the error path, just remove busy waiting and continue.
1659 */
1660 cmd->flags &= ~MMC_RSP_BUSY;
1661 }
1662
1663 if (!(cmd->flags & MMC_RSP_PRESENT))
1664 flags = SDHCI_CMD_RESP_NONE;
1665 else if (cmd->flags & MMC_RSP_136)
1666 flags = SDHCI_CMD_RESP_LONG;
1667 else if (cmd->flags & MMC_RSP_BUSY)
1668 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1669 else
1670 flags = SDHCI_CMD_RESP_SHORT;
1671
1672 if (cmd->flags & MMC_RSP_CRC)
1673 flags |= SDHCI_CMD_CRC;
1674 if (cmd->flags & MMC_RSP_OPCODE)
1675 flags |= SDHCI_CMD_INDEX;
1676
1677 /* CMD19 is special in that the Data Present Select should be set */
1678 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1679 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1680 flags |= SDHCI_CMD_DATA;
1681
1682 timeout = jiffies;
1683 if (host->data_timeout)
1684 timeout += nsecs_to_jiffies(host->data_timeout);
1685 else if (!cmd->data && cmd->busy_timeout > 9000)
1686 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1687 else
1688 timeout += 10 * HZ;
1689 sdhci_mod_timer(host, cmd->mrq, timeout);
1690
1691 if (host->use_external_dma)
1692 sdhci_external_dma_pre_transfer(host, cmd);
1693
1694 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1695
1696 return true;
1697 }
1698
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1699 static bool sdhci_present_error(struct sdhci_host *host,
1700 struct mmc_command *cmd, bool present)
1701 {
1702 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1703 cmd->error = -ENOMEDIUM;
1704 return true;
1705 }
1706
1707 return false;
1708 }
1709
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1710 static bool sdhci_send_command_retry(struct sdhci_host *host,
1711 struct mmc_command *cmd,
1712 unsigned long flags)
1713 __releases(host->lock)
1714 __acquires(host->lock)
1715 {
1716 struct mmc_command *deferred_cmd = host->deferred_cmd;
1717 int timeout = 10; /* Approx. 10 ms */
1718 bool present;
1719
1720 while (!sdhci_send_command(host, cmd)) {
1721 if (!timeout--) {
1722 pr_err("%s: Controller never released inhibit bit(s).\n",
1723 mmc_hostname(host->mmc));
1724 sdhci_dumpregs(host);
1725 cmd->error = -EIO;
1726 return false;
1727 }
1728
1729 spin_unlock_irqrestore(&host->lock, flags);
1730
1731 usleep_range(1000, 1250);
1732
1733 present = host->mmc->ops->get_cd(host->mmc);
1734
1735 spin_lock_irqsave(&host->lock, flags);
1736
1737 /* A deferred command might disappear, handle that */
1738 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1739 return true;
1740
1741 if (sdhci_present_error(host, cmd, present))
1742 return false;
1743 }
1744
1745 if (cmd == host->deferred_cmd)
1746 host->deferred_cmd = NULL;
1747
1748 return true;
1749 }
1750
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1751 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1752 {
1753 int i, reg;
1754
1755 for (i = 0; i < 4; i++) {
1756 reg = SDHCI_RESPONSE + (3 - i) * 4;
1757 cmd->resp[i] = sdhci_readl(host, reg);
1758 }
1759
1760 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1761 return;
1762
1763 /* CRC is stripped so we need to do some shifting */
1764 for (i = 0; i < 4; i++) {
1765 cmd->resp[i] <<= 8;
1766 if (i != 3)
1767 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1768 }
1769 }
1770
sdhci_finish_command(struct sdhci_host * host)1771 static void sdhci_finish_command(struct sdhci_host *host)
1772 {
1773 struct mmc_command *cmd = host->cmd;
1774
1775 host->cmd = NULL;
1776
1777 if (cmd->flags & MMC_RSP_PRESENT) {
1778 if (cmd->flags & MMC_RSP_136) {
1779 sdhci_read_rsp_136(host, cmd);
1780 } else {
1781 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1782 }
1783 }
1784
1785 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1786 mmc_command_done(host->mmc, cmd->mrq);
1787
1788 /*
1789 * The host can send and interrupt when the busy state has
1790 * ended, allowing us to wait without wasting CPU cycles.
1791 * The busy signal uses DAT0 so this is similar to waiting
1792 * for data to complete.
1793 *
1794 * Note: The 1.0 specification is a bit ambiguous about this
1795 * feature so there might be some problems with older
1796 * controllers.
1797 */
1798 if (cmd->flags & MMC_RSP_BUSY) {
1799 if (cmd->data) {
1800 DBG("Cannot wait for busy signal when also doing a data transfer");
1801 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1802 cmd == host->data_cmd) {
1803 /* Command complete before busy is ended */
1804 return;
1805 }
1806 }
1807
1808 /* Finished CMD23, now send actual command. */
1809 if (cmd == cmd->mrq->sbc) {
1810 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1811 WARN_ON(host->deferred_cmd);
1812 host->deferred_cmd = cmd->mrq->cmd;
1813 }
1814 } else {
1815
1816 /* Processed actual command. */
1817 if (host->data && host->data_early)
1818 sdhci_finish_data(host);
1819
1820 if (!cmd->data)
1821 __sdhci_finish_mrq(host, cmd->mrq);
1822 }
1823 }
1824
sdhci_get_preset_value(struct sdhci_host * host)1825 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1826 {
1827 u16 preset = 0;
1828
1829 switch (host->timing) {
1830 case MMC_TIMING_MMC_HS:
1831 case MMC_TIMING_SD_HS:
1832 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1833 break;
1834 case MMC_TIMING_UHS_SDR12:
1835 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1836 break;
1837 case MMC_TIMING_UHS_SDR25:
1838 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1839 break;
1840 case MMC_TIMING_UHS_SDR50:
1841 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1842 break;
1843 case MMC_TIMING_UHS_SDR104:
1844 case MMC_TIMING_MMC_HS200:
1845 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1846 break;
1847 case MMC_TIMING_UHS_DDR50:
1848 case MMC_TIMING_MMC_DDR52:
1849 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1850 break;
1851 case MMC_TIMING_MMC_HS400:
1852 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1853 break;
1854 default:
1855 pr_warn("%s: Invalid UHS-I mode selected\n",
1856 mmc_hostname(host->mmc));
1857 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1858 break;
1859 }
1860 return preset;
1861 }
1862
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1863 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1864 unsigned int *actual_clock)
1865 {
1866 int div = 0; /* Initialized for compiler warning */
1867 int real_div = div, clk_mul = 1;
1868 u16 clk = 0;
1869 bool switch_base_clk = false;
1870
1871 if (host->version >= SDHCI_SPEC_300) {
1872 if (host->preset_enabled) {
1873 u16 pre_val;
1874
1875 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1876 pre_val = sdhci_get_preset_value(host);
1877 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1878 if (host->clk_mul &&
1879 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1880 clk = SDHCI_PROG_CLOCK_MODE;
1881 real_div = div + 1;
1882 clk_mul = host->clk_mul;
1883 } else {
1884 real_div = max_t(int, 1, div << 1);
1885 }
1886 goto clock_set;
1887 }
1888
1889 /*
1890 * Check if the Host Controller supports Programmable Clock
1891 * Mode.
1892 */
1893 if (host->clk_mul) {
1894 for (div = 1; div <= 1024; div++) {
1895 if ((host->max_clk * host->clk_mul / div)
1896 <= clock)
1897 break;
1898 }
1899 if ((host->max_clk * host->clk_mul / div) <= clock) {
1900 /*
1901 * Set Programmable Clock Mode in the Clock
1902 * Control register.
1903 */
1904 clk = SDHCI_PROG_CLOCK_MODE;
1905 real_div = div;
1906 clk_mul = host->clk_mul;
1907 div--;
1908 } else {
1909 /*
1910 * Divisor can be too small to reach clock
1911 * speed requirement. Then use the base clock.
1912 */
1913 switch_base_clk = true;
1914 }
1915 }
1916
1917 if (!host->clk_mul || switch_base_clk) {
1918 /* Version 3.00 divisors must be a multiple of 2. */
1919 if (host->max_clk <= clock)
1920 div = 1;
1921 else {
1922 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1923 div += 2) {
1924 if ((host->max_clk / div) <= clock)
1925 break;
1926 }
1927 }
1928 real_div = div;
1929 div >>= 1;
1930 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1931 && !div && host->max_clk <= 25000000)
1932 div = 1;
1933 }
1934 } else {
1935 /* Version 2.00 divisors must be a power of 2. */
1936 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1937 if ((host->max_clk / div) <= clock)
1938 break;
1939 }
1940 real_div = div;
1941 div >>= 1;
1942 }
1943
1944 clock_set:
1945 if (real_div)
1946 *actual_clock = (host->max_clk * clk_mul) / real_div;
1947 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1948 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1949 << SDHCI_DIVIDER_HI_SHIFT;
1950
1951 return clk;
1952 }
1953 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1954
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1955 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1956 {
1957 ktime_t timeout;
1958
1959 clk |= SDHCI_CLOCK_INT_EN;
1960 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1961
1962 /* Wait max 150 ms */
1963 timeout = ktime_add_ms(ktime_get(), 150);
1964 while (1) {
1965 bool timedout = ktime_after(ktime_get(), timeout);
1966
1967 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1968 if (clk & SDHCI_CLOCK_INT_STABLE)
1969 break;
1970 if (timedout) {
1971 pr_err("%s: Internal clock never stabilised.\n",
1972 mmc_hostname(host->mmc));
1973 sdhci_dumpregs(host);
1974 return;
1975 }
1976 udelay(10);
1977 }
1978
1979 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1980 clk |= SDHCI_CLOCK_PLL_EN;
1981 clk &= ~SDHCI_CLOCK_INT_STABLE;
1982 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1983
1984 /* Wait max 150 ms */
1985 timeout = ktime_add_ms(ktime_get(), 150);
1986 while (1) {
1987 bool timedout = ktime_after(ktime_get(), timeout);
1988
1989 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1990 if (clk & SDHCI_CLOCK_INT_STABLE)
1991 break;
1992 if (timedout) {
1993 pr_err("%s: PLL clock never stabilised.\n",
1994 mmc_hostname(host->mmc));
1995 sdhci_dumpregs(host);
1996 return;
1997 }
1998 udelay(10);
1999 }
2000 }
2001
2002 clk |= SDHCI_CLOCK_CARD_EN;
2003 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2004 }
2005 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2006
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)2007 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2008 {
2009 u16 clk;
2010
2011 host->mmc->actual_clock = 0;
2012
2013 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2014
2015 if (clock == 0)
2016 return;
2017
2018 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2019 sdhci_enable_clk(host, clk);
2020 }
2021 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2022
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2023 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2024 unsigned short vdd)
2025 {
2026 struct mmc_host *mmc = host->mmc;
2027
2028 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2029
2030 if (mode != MMC_POWER_OFF)
2031 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2032 else
2033 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2034 }
2035
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2036 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2037 unsigned short vdd)
2038 {
2039 u8 pwr = 0;
2040
2041 if (mode != MMC_POWER_OFF) {
2042 switch (1 << vdd) {
2043 case MMC_VDD_165_195:
2044 /*
2045 * Without a regulator, SDHCI does not support 2.0v
2046 * so we only get here if the driver deliberately
2047 * added the 2.0v range to ocr_avail. Map it to 1.8v
2048 * for the purpose of turning on the power.
2049 */
2050 case MMC_VDD_20_21:
2051 pwr = SDHCI_POWER_180;
2052 break;
2053 case MMC_VDD_29_30:
2054 case MMC_VDD_30_31:
2055 pwr = SDHCI_POWER_300;
2056 break;
2057 case MMC_VDD_32_33:
2058 case MMC_VDD_33_34:
2059 /*
2060 * 3.4 ~ 3.6V are valid only for those platforms where it's
2061 * known that the voltage range is supported by hardware.
2062 */
2063 case MMC_VDD_34_35:
2064 case MMC_VDD_35_36:
2065 pwr = SDHCI_POWER_330;
2066 break;
2067 default:
2068 WARN(1, "%s: Invalid vdd %#x\n",
2069 mmc_hostname(host->mmc), vdd);
2070 break;
2071 }
2072 }
2073
2074 if (host->pwr == pwr)
2075 return;
2076
2077 host->pwr = pwr;
2078
2079 if (pwr == 0) {
2080 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2081 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2082 sdhci_runtime_pm_bus_off(host);
2083 } else {
2084 /*
2085 * Spec says that we should clear the power reg before setting
2086 * a new value. Some controllers don't seem to like this though.
2087 */
2088 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2089 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2090
2091 /*
2092 * At least the Marvell CaFe chip gets confused if we set the
2093 * voltage and set turn on power at the same time, so set the
2094 * voltage first.
2095 */
2096 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2097 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2098
2099 pwr |= SDHCI_POWER_ON;
2100
2101 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2102
2103 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2104 sdhci_runtime_pm_bus_on(host);
2105
2106 /*
2107 * Some controllers need an extra 10ms delay of 10ms before
2108 * they can apply clock after applying power
2109 */
2110 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2111 mdelay(10);
2112 }
2113 }
2114 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2115
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2116 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2117 unsigned short vdd)
2118 {
2119 if (IS_ERR(host->mmc->supply.vmmc))
2120 sdhci_set_power_noreg(host, mode, vdd);
2121 else
2122 sdhci_set_power_reg(host, mode, vdd);
2123 }
2124 EXPORT_SYMBOL_GPL(sdhci_set_power);
2125
2126 /*
2127 * Some controllers need to configure a valid bus voltage on their power
2128 * register regardless of whether an external regulator is taking care of power
2129 * supply. This helper function takes care of it if set as the controller's
2130 * sdhci_ops.set_power callback.
2131 */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2132 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2133 unsigned char mode,
2134 unsigned short vdd)
2135 {
2136 if (!IS_ERR(host->mmc->supply.vmmc)) {
2137 struct mmc_host *mmc = host->mmc;
2138
2139 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2140 }
2141 sdhci_set_power_noreg(host, mode, vdd);
2142 }
2143 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2144
2145 /*****************************************************************************\
2146 * *
2147 * MMC callbacks *
2148 * *
2149 \*****************************************************************************/
2150
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2151 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2152 {
2153 struct sdhci_host *host = mmc_priv(mmc);
2154 struct mmc_command *cmd;
2155 unsigned long flags;
2156 bool present;
2157
2158 /* Firstly check card presence */
2159 present = mmc->ops->get_cd(mmc);
2160
2161 spin_lock_irqsave(&host->lock, flags);
2162
2163 sdhci_led_activate(host);
2164
2165 if (sdhci_present_error(host, mrq->cmd, present))
2166 goto out_finish;
2167
2168 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2169
2170 if (!sdhci_send_command_retry(host, cmd, flags))
2171 goto out_finish;
2172
2173 spin_unlock_irqrestore(&host->lock, flags);
2174
2175 return;
2176
2177 out_finish:
2178 sdhci_finish_mrq(host, mrq);
2179 spin_unlock_irqrestore(&host->lock, flags);
2180 }
2181 EXPORT_SYMBOL_GPL(sdhci_request);
2182
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2183 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2184 {
2185 struct sdhci_host *host = mmc_priv(mmc);
2186 struct mmc_command *cmd;
2187 unsigned long flags;
2188 int ret = 0;
2189
2190 spin_lock_irqsave(&host->lock, flags);
2191
2192 if (sdhci_present_error(host, mrq->cmd, true)) {
2193 sdhci_finish_mrq(host, mrq);
2194 goto out_finish;
2195 }
2196
2197 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2198
2199 /*
2200 * The HSQ may send a command in interrupt context without polling
2201 * the busy signaling, which means we should return BUSY if controller
2202 * has not released inhibit bits to allow HSQ trying to send request
2203 * again in non-atomic context. So we should not finish this request
2204 * here.
2205 */
2206 if (!sdhci_send_command(host, cmd))
2207 ret = -EBUSY;
2208 else
2209 sdhci_led_activate(host);
2210
2211 out_finish:
2212 spin_unlock_irqrestore(&host->lock, flags);
2213 return ret;
2214 }
2215 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2216
sdhci_set_bus_width(struct sdhci_host * host,int width)2217 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2218 {
2219 u8 ctrl;
2220
2221 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2222 if (width == MMC_BUS_WIDTH_8) {
2223 ctrl &= ~SDHCI_CTRL_4BITBUS;
2224 ctrl |= SDHCI_CTRL_8BITBUS;
2225 } else {
2226 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2227 ctrl &= ~SDHCI_CTRL_8BITBUS;
2228 if (width == MMC_BUS_WIDTH_4)
2229 ctrl |= SDHCI_CTRL_4BITBUS;
2230 else
2231 ctrl &= ~SDHCI_CTRL_4BITBUS;
2232 }
2233 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2234 }
2235 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2236
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2237 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2238 {
2239 u16 ctrl_2;
2240
2241 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2242 /* Select Bus Speed Mode for host */
2243 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2244 if ((timing == MMC_TIMING_MMC_HS200) ||
2245 (timing == MMC_TIMING_UHS_SDR104))
2246 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2247 else if (timing == MMC_TIMING_UHS_SDR12)
2248 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2249 else if (timing == MMC_TIMING_UHS_SDR25)
2250 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2251 else if (timing == MMC_TIMING_UHS_SDR50)
2252 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2253 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2254 (timing == MMC_TIMING_MMC_DDR52))
2255 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2256 else if (timing == MMC_TIMING_MMC_HS400)
2257 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2258 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2259 }
2260 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2261
sdhci_timing_has_preset(unsigned char timing)2262 static bool sdhci_timing_has_preset(unsigned char timing)
2263 {
2264 switch (timing) {
2265 case MMC_TIMING_UHS_SDR12:
2266 case MMC_TIMING_UHS_SDR25:
2267 case MMC_TIMING_UHS_SDR50:
2268 case MMC_TIMING_UHS_SDR104:
2269 case MMC_TIMING_UHS_DDR50:
2270 case MMC_TIMING_MMC_DDR52:
2271 return true;
2272 };
2273 return false;
2274 }
2275
sdhci_preset_needed(struct sdhci_host * host,unsigned char timing)2276 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
2277 {
2278 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2279 sdhci_timing_has_preset(timing);
2280 }
2281
sdhci_presetable_values_change(struct sdhci_host * host,struct mmc_ios * ios)2282 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
2283 {
2284 /*
2285 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2286 * Frequency. Check if preset values need to be enabled, or the Driver
2287 * Strength needs updating. Note, clock changes are handled separately.
2288 */
2289 return !host->preset_enabled &&
2290 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
2291 }
2292
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2293 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2294 {
2295 struct sdhci_host *host = mmc_priv(mmc);
2296 bool reinit_uhs = host->reinit_uhs;
2297 bool turning_on_clk = false;
2298 u8 ctrl;
2299
2300 host->reinit_uhs = false;
2301
2302 if (ios->power_mode == MMC_POWER_UNDEFINED)
2303 return;
2304
2305 if (host->flags & SDHCI_DEVICE_DEAD) {
2306 if (!IS_ERR(mmc->supply.vmmc) &&
2307 ios->power_mode == MMC_POWER_OFF)
2308 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2309 return;
2310 }
2311
2312 /*
2313 * Reset the chip on each power off.
2314 * Should clear out any weird states.
2315 */
2316 if (ios->power_mode == MMC_POWER_OFF) {
2317 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2318 sdhci_reinit(host);
2319 }
2320
2321 if (host->version >= SDHCI_SPEC_300 &&
2322 (ios->power_mode == MMC_POWER_UP) &&
2323 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2324 sdhci_enable_preset_value(host, false);
2325
2326 if (!ios->clock || ios->clock != host->clock) {
2327 turning_on_clk = ios->clock && !host->clock;
2328
2329 host->ops->set_clock(host, ios->clock);
2330 host->clock = ios->clock;
2331
2332 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2333 host->clock) {
2334 host->timeout_clk = host->mmc->actual_clock ?
2335 host->mmc->actual_clock / 1000 :
2336 host->clock / 1000;
2337 host->mmc->max_busy_timeout =
2338 host->ops->get_max_timeout_count ?
2339 host->ops->get_max_timeout_count(host) :
2340 1 << 27;
2341 host->mmc->max_busy_timeout /= host->timeout_clk;
2342 }
2343 }
2344
2345 if (host->ops->set_power)
2346 host->ops->set_power(host, ios->power_mode, ios->vdd);
2347 else
2348 sdhci_set_power(host, ios->power_mode, ios->vdd);
2349
2350 if (host->ops->platform_send_init_74_clocks)
2351 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2352
2353 host->ops->set_bus_width(host, ios->bus_width);
2354
2355 /*
2356 * Special case to avoid multiple clock changes during voltage
2357 * switching.
2358 */
2359 if (!reinit_uhs &&
2360 turning_on_clk &&
2361 host->timing == ios->timing &&
2362 host->version >= SDHCI_SPEC_300 &&
2363 !sdhci_presetable_values_change(host, ios))
2364 return;
2365
2366 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2367
2368 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2369 if (ios->timing == MMC_TIMING_SD_HS ||
2370 ios->timing == MMC_TIMING_MMC_HS ||
2371 ios->timing == MMC_TIMING_MMC_HS400 ||
2372 ios->timing == MMC_TIMING_MMC_HS200 ||
2373 ios->timing == MMC_TIMING_MMC_DDR52 ||
2374 ios->timing == MMC_TIMING_UHS_SDR50 ||
2375 ios->timing == MMC_TIMING_UHS_SDR104 ||
2376 ios->timing == MMC_TIMING_UHS_DDR50 ||
2377 ios->timing == MMC_TIMING_UHS_SDR25)
2378 ctrl |= SDHCI_CTRL_HISPD;
2379 else
2380 ctrl &= ~SDHCI_CTRL_HISPD;
2381 }
2382
2383 if (host->version >= SDHCI_SPEC_300) {
2384 u16 clk, ctrl_2;
2385
2386 if (!host->preset_enabled) {
2387 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2388 /*
2389 * We only need to set Driver Strength if the
2390 * preset value enable is not set.
2391 */
2392 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2393 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2394 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2395 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2396 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2397 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2398 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2399 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2400 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2401 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2402 else {
2403 pr_warn("%s: invalid driver type, default to driver type B\n",
2404 mmc_hostname(mmc));
2405 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2406 }
2407
2408 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2409 host->drv_type = ios->drv_type;
2410 } else {
2411 /*
2412 * According to SDHC Spec v3.00, if the Preset Value
2413 * Enable in the Host Control 2 register is set, we
2414 * need to reset SD Clock Enable before changing High
2415 * Speed Enable to avoid generating clock gliches.
2416 */
2417
2418 /* Reset SD Clock Enable */
2419 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2420 clk &= ~SDHCI_CLOCK_CARD_EN;
2421 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2422
2423 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2424
2425 /* Re-enable SD Clock */
2426 host->ops->set_clock(host, host->clock);
2427 }
2428
2429 /* Reset SD Clock Enable */
2430 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2431 clk &= ~SDHCI_CLOCK_CARD_EN;
2432 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2433
2434 host->ops->set_uhs_signaling(host, ios->timing);
2435 host->timing = ios->timing;
2436
2437 if (sdhci_preset_needed(host, ios->timing)) {
2438 u16 preset;
2439
2440 sdhci_enable_preset_value(host, true);
2441 preset = sdhci_get_preset_value(host);
2442 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2443 preset);
2444 host->drv_type = ios->drv_type;
2445 }
2446
2447 /* Re-enable SD Clock */
2448 host->ops->set_clock(host, host->clock);
2449 } else
2450 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2451
2452 /*
2453 * Some (ENE) controllers go apeshit on some ios operation,
2454 * signalling timeout and CRC errors even on CMD0. Resetting
2455 * it on each ios seems to solve the problem.
2456 */
2457 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2458 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2459 }
2460 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2461
sdhci_get_cd(struct mmc_host * mmc)2462 static int sdhci_get_cd(struct mmc_host *mmc)
2463 {
2464 struct sdhci_host *host = mmc_priv(mmc);
2465 int gpio_cd = mmc_gpio_get_cd(mmc);
2466
2467 if (host->flags & SDHCI_DEVICE_DEAD)
2468 return 0;
2469
2470 /* If nonremovable, assume that the card is always present. */
2471 if (!mmc_card_is_removable(host->mmc))
2472 return 1;
2473
2474 /*
2475 * Try slot gpio detect, if defined it take precedence
2476 * over build in controller functionality
2477 */
2478 if (gpio_cd >= 0)
2479 return !!gpio_cd;
2480
2481 /* If polling, assume that the card is always present. */
2482 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2483 return 1;
2484
2485 /* Host native card detect */
2486 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2487 }
2488
sdhci_check_ro(struct sdhci_host * host)2489 static int sdhci_check_ro(struct sdhci_host *host)
2490 {
2491 unsigned long flags;
2492 int is_readonly;
2493
2494 spin_lock_irqsave(&host->lock, flags);
2495
2496 if (host->flags & SDHCI_DEVICE_DEAD)
2497 is_readonly = 0;
2498 else if (host->ops->get_ro)
2499 is_readonly = host->ops->get_ro(host);
2500 else if (mmc_can_gpio_ro(host->mmc))
2501 is_readonly = mmc_gpio_get_ro(host->mmc);
2502 else
2503 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2504 & SDHCI_WRITE_PROTECT);
2505
2506 spin_unlock_irqrestore(&host->lock, flags);
2507
2508 /* This quirk needs to be replaced by a callback-function later */
2509 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2510 !is_readonly : is_readonly;
2511 }
2512
2513 #define SAMPLE_COUNT 5
2514
sdhci_get_ro(struct mmc_host * mmc)2515 static int sdhci_get_ro(struct mmc_host *mmc)
2516 {
2517 struct sdhci_host *host = mmc_priv(mmc);
2518 int i, ro_count;
2519
2520 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2521 return sdhci_check_ro(host);
2522
2523 ro_count = 0;
2524 for (i = 0; i < SAMPLE_COUNT; i++) {
2525 if (sdhci_check_ro(host)) {
2526 if (++ro_count > SAMPLE_COUNT / 2)
2527 return 1;
2528 }
2529 msleep(30);
2530 }
2531 return 0;
2532 }
2533
sdhci_hw_reset(struct mmc_host * mmc)2534 static void sdhci_hw_reset(struct mmc_host *mmc)
2535 {
2536 struct sdhci_host *host = mmc_priv(mmc);
2537
2538 if (host->ops && host->ops->hw_reset)
2539 host->ops->hw_reset(host);
2540 }
2541
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2542 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2543 {
2544 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2545 if (enable)
2546 host->ier |= SDHCI_INT_CARD_INT;
2547 else
2548 host->ier &= ~SDHCI_INT_CARD_INT;
2549
2550 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2551 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2552 }
2553 }
2554
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2555 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2556 {
2557 struct sdhci_host *host = mmc_priv(mmc);
2558 unsigned long flags;
2559
2560 if (enable)
2561 pm_runtime_get_noresume(host->mmc->parent);
2562
2563 spin_lock_irqsave(&host->lock, flags);
2564 sdhci_enable_sdio_irq_nolock(host, enable);
2565 spin_unlock_irqrestore(&host->lock, flags);
2566
2567 if (!enable)
2568 pm_runtime_put_noidle(host->mmc->parent);
2569 }
2570 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2571
sdhci_ack_sdio_irq(struct mmc_host * mmc)2572 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2573 {
2574 struct sdhci_host *host = mmc_priv(mmc);
2575 unsigned long flags;
2576
2577 spin_lock_irqsave(&host->lock, flags);
2578 sdhci_enable_sdio_irq_nolock(host, true);
2579 spin_unlock_irqrestore(&host->lock, flags);
2580 }
2581
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2582 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2583 struct mmc_ios *ios)
2584 {
2585 struct sdhci_host *host = mmc_priv(mmc);
2586 u16 ctrl;
2587 int ret;
2588
2589 /*
2590 * Signal Voltage Switching is only applicable for Host Controllers
2591 * v3.00 and above.
2592 */
2593 if (host->version < SDHCI_SPEC_300)
2594 return 0;
2595
2596 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2597
2598 switch (ios->signal_voltage) {
2599 case MMC_SIGNAL_VOLTAGE_330:
2600 if (!(host->flags & SDHCI_SIGNALING_330))
2601 return -EINVAL;
2602 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2603 ctrl &= ~SDHCI_CTRL_VDD_180;
2604 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2605
2606 if (!IS_ERR(mmc->supply.vqmmc)) {
2607 ret = mmc_regulator_set_vqmmc(mmc, ios);
2608 if (ret < 0) {
2609 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2610 mmc_hostname(mmc));
2611 return -EIO;
2612 }
2613 }
2614 /* Wait for 5ms */
2615 usleep_range(5000, 5500);
2616
2617 /* 3.3V regulator output should be stable within 5 ms */
2618 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2619 if (!(ctrl & SDHCI_CTRL_VDD_180))
2620 return 0;
2621
2622 pr_warn("%s: 3.3V regulator output did not become stable\n",
2623 mmc_hostname(mmc));
2624
2625 return -EAGAIN;
2626 case MMC_SIGNAL_VOLTAGE_180:
2627 if (!(host->flags & SDHCI_SIGNALING_180))
2628 return -EINVAL;
2629 if (!IS_ERR(mmc->supply.vqmmc)) {
2630 ret = mmc_regulator_set_vqmmc(mmc, ios);
2631 if (ret < 0) {
2632 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2633 mmc_hostname(mmc));
2634 return -EIO;
2635 }
2636 }
2637
2638 /*
2639 * Enable 1.8V Signal Enable in the Host Control2
2640 * register
2641 */
2642 ctrl |= SDHCI_CTRL_VDD_180;
2643 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2644
2645 /* Some controller need to do more when switching */
2646 if (host->ops->voltage_switch)
2647 host->ops->voltage_switch(host);
2648
2649 /* 1.8V regulator output should be stable within 5 ms */
2650 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2651 if (ctrl & SDHCI_CTRL_VDD_180)
2652 return 0;
2653
2654 pr_warn("%s: 1.8V regulator output did not become stable\n",
2655 mmc_hostname(mmc));
2656
2657 return -EAGAIN;
2658 case MMC_SIGNAL_VOLTAGE_120:
2659 if (!(host->flags & SDHCI_SIGNALING_120))
2660 return -EINVAL;
2661 if (!IS_ERR(mmc->supply.vqmmc)) {
2662 ret = mmc_regulator_set_vqmmc(mmc, ios);
2663 if (ret < 0) {
2664 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2665 mmc_hostname(mmc));
2666 return -EIO;
2667 }
2668 }
2669 return 0;
2670 default:
2671 /* No signal voltage switch required */
2672 return 0;
2673 }
2674 }
2675 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2676
sdhci_card_busy(struct mmc_host * mmc)2677 static int sdhci_card_busy(struct mmc_host *mmc)
2678 {
2679 struct sdhci_host *host = mmc_priv(mmc);
2680 u32 present_state;
2681
2682 /* Check whether DAT[0] is 0 */
2683 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2684
2685 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2686 }
2687
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2688 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2689 {
2690 struct sdhci_host *host = mmc_priv(mmc);
2691 unsigned long flags;
2692
2693 spin_lock_irqsave(&host->lock, flags);
2694 host->flags |= SDHCI_HS400_TUNING;
2695 spin_unlock_irqrestore(&host->lock, flags);
2696
2697 return 0;
2698 }
2699
sdhci_start_tuning(struct sdhci_host * host)2700 void sdhci_start_tuning(struct sdhci_host *host)
2701 {
2702 u16 ctrl;
2703
2704 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2705 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2706 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2707 ctrl |= SDHCI_CTRL_TUNED_CLK;
2708 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2709
2710 /*
2711 * As per the Host Controller spec v3.00, tuning command
2712 * generates Buffer Read Ready interrupt, so enable that.
2713 *
2714 * Note: The spec clearly says that when tuning sequence
2715 * is being performed, the controller does not generate
2716 * interrupts other than Buffer Read Ready interrupt. But
2717 * to make sure we don't hit a controller bug, we _only_
2718 * enable Buffer Read Ready interrupt here.
2719 */
2720 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2721 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2722 }
2723 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2724
sdhci_end_tuning(struct sdhci_host * host)2725 void sdhci_end_tuning(struct sdhci_host *host)
2726 {
2727 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2728 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2729 }
2730 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2731
sdhci_reset_tuning(struct sdhci_host * host)2732 void sdhci_reset_tuning(struct sdhci_host *host)
2733 {
2734 u16 ctrl;
2735
2736 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2737 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2738 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2739 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2740 }
2741 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2742
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2743 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2744 {
2745 sdhci_reset_tuning(host);
2746
2747 sdhci_do_reset(host, SDHCI_RESET_CMD);
2748 sdhci_do_reset(host, SDHCI_RESET_DATA);
2749
2750 sdhci_end_tuning(host);
2751
2752 mmc_abort_tuning(host->mmc, opcode);
2753 }
2754 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2755
2756 /*
2757 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2758 * tuning command does not have a data payload (or rather the hardware does it
2759 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2760 * interrupt setup is different to other commands and there is no timeout
2761 * interrupt so special handling is needed.
2762 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2763 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2764 {
2765 struct mmc_host *mmc = host->mmc;
2766 struct mmc_command cmd = {};
2767 struct mmc_request mrq = {};
2768 unsigned long flags;
2769 u32 b = host->sdma_boundary;
2770
2771 spin_lock_irqsave(&host->lock, flags);
2772
2773 cmd.opcode = opcode;
2774 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2775 cmd.mrq = &mrq;
2776
2777 mrq.cmd = &cmd;
2778 /*
2779 * In response to CMD19, the card sends 64 bytes of tuning
2780 * block to the Host Controller. So we set the block size
2781 * to 64 here.
2782 */
2783 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2784 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2785 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2786 else
2787 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2788
2789 /*
2790 * The tuning block is sent by the card to the host controller.
2791 * So we set the TRNS_READ bit in the Transfer Mode register.
2792 * This also takes care of setting DMA Enable and Multi Block
2793 * Select in the same register to 0.
2794 */
2795 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2796
2797 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2798 spin_unlock_irqrestore(&host->lock, flags);
2799 host->tuning_done = 0;
2800 return;
2801 }
2802
2803 host->cmd = NULL;
2804
2805 sdhci_del_timer(host, &mrq);
2806
2807 host->tuning_done = 0;
2808
2809 spin_unlock_irqrestore(&host->lock, flags);
2810
2811 /* Wait for Buffer Read Ready interrupt */
2812 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2813 msecs_to_jiffies(50));
2814
2815 }
2816 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2817
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2818 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2819 {
2820 int i;
2821
2822 /*
2823 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2824 * of loops reaches tuning loop count.
2825 */
2826 for (i = 0; i < host->tuning_loop_count; i++) {
2827 u16 ctrl;
2828
2829 sdhci_send_tuning(host, opcode);
2830
2831 if (!host->tuning_done) {
2832 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2833 mmc_hostname(host->mmc));
2834 sdhci_abort_tuning(host, opcode);
2835 return -ETIMEDOUT;
2836 }
2837
2838 /* Spec does not require a delay between tuning cycles */
2839 if (host->tuning_delay > 0)
2840 mdelay(host->tuning_delay);
2841
2842 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2843 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2844 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2845 return 0; /* Success! */
2846 break;
2847 }
2848
2849 }
2850
2851 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2852 mmc_hostname(host->mmc));
2853 sdhci_reset_tuning(host);
2854 return -EAGAIN;
2855 }
2856
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2857 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2858 {
2859 struct sdhci_host *host = mmc_priv(mmc);
2860 int err = 0;
2861 unsigned int tuning_count = 0;
2862 bool hs400_tuning;
2863
2864 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2865
2866 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2867 tuning_count = host->tuning_count;
2868
2869 /*
2870 * The Host Controller needs tuning in case of SDR104 and DDR50
2871 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2872 * the Capabilities register.
2873 * If the Host Controller supports the HS200 mode then the
2874 * tuning function has to be executed.
2875 */
2876 switch (host->timing) {
2877 /* HS400 tuning is done in HS200 mode */
2878 case MMC_TIMING_MMC_HS400:
2879 err = -EINVAL;
2880 goto out;
2881
2882 case MMC_TIMING_MMC_HS200:
2883 /*
2884 * Periodic re-tuning for HS400 is not expected to be needed, so
2885 * disable it here.
2886 */
2887 if (hs400_tuning)
2888 tuning_count = 0;
2889 break;
2890
2891 case MMC_TIMING_UHS_SDR104:
2892 case MMC_TIMING_UHS_DDR50:
2893 break;
2894
2895 case MMC_TIMING_UHS_SDR50:
2896 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2897 break;
2898 fallthrough;
2899
2900 default:
2901 goto out;
2902 }
2903
2904 if (host->ops->platform_execute_tuning) {
2905 err = host->ops->platform_execute_tuning(host, opcode);
2906 goto out;
2907 }
2908
2909 host->mmc->retune_period = tuning_count;
2910
2911 if (host->tuning_delay < 0)
2912 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2913
2914 sdhci_start_tuning(host);
2915
2916 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2917
2918 sdhci_end_tuning(host);
2919 out:
2920 host->flags &= ~SDHCI_HS400_TUNING;
2921
2922 return err;
2923 }
2924 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2925
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2926 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2927 {
2928 /* Host Controller v3.00 defines preset value registers */
2929 if (host->version < SDHCI_SPEC_300)
2930 return;
2931
2932 /*
2933 * We only enable or disable Preset Value if they are not already
2934 * enabled or disabled respectively. Otherwise, we bail out.
2935 */
2936 if (host->preset_enabled != enable) {
2937 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2938
2939 if (enable)
2940 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2941 else
2942 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2943
2944 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2945
2946 if (enable)
2947 host->flags |= SDHCI_PV_ENABLED;
2948 else
2949 host->flags &= ~SDHCI_PV_ENABLED;
2950
2951 host->preset_enabled = enable;
2952 }
2953 }
2954
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2955 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2956 int err)
2957 {
2958 struct sdhci_host *host = mmc_priv(mmc);
2959 struct mmc_data *data = mrq->data;
2960
2961 if (data->host_cookie != COOKIE_UNMAPPED)
2962 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2963 mmc_get_dma_dir(data));
2964
2965 data->host_cookie = COOKIE_UNMAPPED;
2966 }
2967
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2968 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2969 {
2970 struct sdhci_host *host = mmc_priv(mmc);
2971
2972 mrq->data->host_cookie = COOKIE_UNMAPPED;
2973
2974 /*
2975 * No pre-mapping in the pre hook if we're using the bounce buffer,
2976 * for that we would need two bounce buffers since one buffer is
2977 * in flight when this is getting called.
2978 */
2979 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2980 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2981 }
2982
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2983 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2984 {
2985 if (host->data_cmd) {
2986 host->data_cmd->error = err;
2987 sdhci_finish_mrq(host, host->data_cmd->mrq);
2988 }
2989
2990 if (host->cmd) {
2991 host->cmd->error = err;
2992 sdhci_finish_mrq(host, host->cmd->mrq);
2993 }
2994 }
2995
sdhci_card_event(struct mmc_host * mmc)2996 static void sdhci_card_event(struct mmc_host *mmc)
2997 {
2998 struct sdhci_host *host = mmc_priv(mmc);
2999 unsigned long flags;
3000 int present;
3001
3002 /* First check if client has provided their own card event */
3003 if (host->ops->card_event)
3004 host->ops->card_event(host);
3005
3006 present = mmc->ops->get_cd(mmc);
3007
3008 spin_lock_irqsave(&host->lock, flags);
3009
3010 /* Check sdhci_has_requests() first in case we are runtime suspended */
3011 if (sdhci_has_requests(host) && !present) {
3012 pr_err("%s: Card removed during transfer!\n",
3013 mmc_hostname(host->mmc));
3014 pr_err("%s: Resetting controller.\n",
3015 mmc_hostname(host->mmc));
3016
3017 sdhci_do_reset(host, SDHCI_RESET_CMD);
3018 sdhci_do_reset(host, SDHCI_RESET_DATA);
3019
3020 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3021 }
3022
3023 spin_unlock_irqrestore(&host->lock, flags);
3024 }
3025
3026 static const struct mmc_host_ops sdhci_ops = {
3027 .request = sdhci_request,
3028 .post_req = sdhci_post_req,
3029 .pre_req = sdhci_pre_req,
3030 .set_ios = sdhci_set_ios,
3031 .get_cd = sdhci_get_cd,
3032 .get_ro = sdhci_get_ro,
3033 .hw_reset = sdhci_hw_reset,
3034 .enable_sdio_irq = sdhci_enable_sdio_irq,
3035 .ack_sdio_irq = sdhci_ack_sdio_irq,
3036 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
3037 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
3038 .execute_tuning = sdhci_execute_tuning,
3039 .card_event = sdhci_card_event,
3040 .card_busy = sdhci_card_busy,
3041 };
3042
3043 /*****************************************************************************\
3044 * *
3045 * Request done *
3046 * *
3047 \*****************************************************************************/
3048
sdhci_request_done(struct sdhci_host * host)3049 static bool sdhci_request_done(struct sdhci_host *host)
3050 {
3051 unsigned long flags;
3052 struct mmc_request *mrq;
3053 int i;
3054
3055 spin_lock_irqsave(&host->lock, flags);
3056
3057 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3058 mrq = host->mrqs_done[i];
3059 if (mrq)
3060 break;
3061 }
3062
3063 if (!mrq) {
3064 spin_unlock_irqrestore(&host->lock, flags);
3065 return true;
3066 }
3067
3068 /*
3069 * The controller needs a reset of internal state machines
3070 * upon error conditions.
3071 */
3072 if (sdhci_needs_reset(host, mrq)) {
3073 /*
3074 * Do not finish until command and data lines are available for
3075 * reset. Note there can only be one other mrq, so it cannot
3076 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3077 * would both be null.
3078 */
3079 if (host->cmd || host->data_cmd) {
3080 spin_unlock_irqrestore(&host->lock, flags);
3081 return true;
3082 }
3083
3084 /* Some controllers need this kick or reset won't work here */
3085 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3086 /* This is to force an update */
3087 host->ops->set_clock(host, host->clock);
3088
3089 /*
3090 * Spec says we should do both at the same time, but Ricoh
3091 * controllers do not like that.
3092 */
3093 sdhci_do_reset(host, SDHCI_RESET_CMD);
3094 sdhci_do_reset(host, SDHCI_RESET_DATA);
3095
3096 host->pending_reset = false;
3097 }
3098
3099 /*
3100 * Always unmap the data buffers if they were mapped by
3101 * sdhci_prepare_data() whenever we finish with a request.
3102 * This avoids leaking DMA mappings on error.
3103 */
3104 if (host->flags & SDHCI_REQ_USE_DMA) {
3105 struct mmc_data *data = mrq->data;
3106
3107 if (host->use_external_dma && data &&
3108 (mrq->cmd->error || data->error)) {
3109 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3110
3111 host->mrqs_done[i] = NULL;
3112 spin_unlock_irqrestore(&host->lock, flags);
3113 dmaengine_terminate_sync(chan);
3114 spin_lock_irqsave(&host->lock, flags);
3115 sdhci_set_mrq_done(host, mrq);
3116 }
3117
3118 if (data && data->host_cookie == COOKIE_MAPPED) {
3119 if (host->bounce_buffer) {
3120 /*
3121 * On reads, copy the bounced data into the
3122 * sglist
3123 */
3124 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3125 unsigned int length = data->bytes_xfered;
3126
3127 if (length > host->bounce_buffer_size) {
3128 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3129 mmc_hostname(host->mmc),
3130 host->bounce_buffer_size,
3131 data->bytes_xfered);
3132 /* Cap it down and continue */
3133 length = host->bounce_buffer_size;
3134 }
3135 dma_sync_single_for_cpu(
3136 host->mmc->parent,
3137 host->bounce_addr,
3138 host->bounce_buffer_size,
3139 DMA_FROM_DEVICE);
3140 sg_copy_from_buffer(data->sg,
3141 data->sg_len,
3142 host->bounce_buffer,
3143 length);
3144 } else {
3145 /* No copying, just switch ownership */
3146 dma_sync_single_for_cpu(
3147 host->mmc->parent,
3148 host->bounce_addr,
3149 host->bounce_buffer_size,
3150 mmc_get_dma_dir(data));
3151 }
3152 } else {
3153 /* Unmap the raw data */
3154 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3155 data->sg_len,
3156 mmc_get_dma_dir(data));
3157 }
3158 data->host_cookie = COOKIE_UNMAPPED;
3159 }
3160 }
3161
3162 host->mrqs_done[i] = NULL;
3163
3164 spin_unlock_irqrestore(&host->lock, flags);
3165
3166 if (host->ops->request_done)
3167 host->ops->request_done(host, mrq);
3168 else
3169 mmc_request_done(host->mmc, mrq);
3170
3171 return false;
3172 }
3173
sdhci_complete_work(struct work_struct * work)3174 static void sdhci_complete_work(struct work_struct *work)
3175 {
3176 struct sdhci_host *host = container_of(work, struct sdhci_host,
3177 complete_work);
3178
3179 while (!sdhci_request_done(host))
3180 ;
3181 }
3182
sdhci_timeout_timer(struct timer_list * t)3183 static void sdhci_timeout_timer(struct timer_list *t)
3184 {
3185 struct sdhci_host *host;
3186 unsigned long flags;
3187
3188 host = from_timer(host, t, timer);
3189
3190 spin_lock_irqsave(&host->lock, flags);
3191
3192 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3193 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3194 mmc_hostname(host->mmc));
3195 sdhci_dumpregs(host);
3196
3197 host->cmd->error = -ETIMEDOUT;
3198 sdhci_finish_mrq(host, host->cmd->mrq);
3199 }
3200
3201 spin_unlock_irqrestore(&host->lock, flags);
3202 }
3203
sdhci_timeout_data_timer(struct timer_list * t)3204 static void sdhci_timeout_data_timer(struct timer_list *t)
3205 {
3206 struct sdhci_host *host;
3207 unsigned long flags;
3208
3209 host = from_timer(host, t, data_timer);
3210
3211 spin_lock_irqsave(&host->lock, flags);
3212
3213 if (host->data || host->data_cmd ||
3214 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3215 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3216 mmc_hostname(host->mmc));
3217 sdhci_dumpregs(host);
3218
3219 if (host->data) {
3220 host->data->error = -ETIMEDOUT;
3221 __sdhci_finish_data(host, true);
3222 queue_work(host->complete_wq, &host->complete_work);
3223 } else if (host->data_cmd) {
3224 host->data_cmd->error = -ETIMEDOUT;
3225 sdhci_finish_mrq(host, host->data_cmd->mrq);
3226 } else {
3227 host->cmd->error = -ETIMEDOUT;
3228 sdhci_finish_mrq(host, host->cmd->mrq);
3229 }
3230 }
3231
3232 spin_unlock_irqrestore(&host->lock, flags);
3233 }
3234
3235 /*****************************************************************************\
3236 * *
3237 * Interrupt handling *
3238 * *
3239 \*****************************************************************************/
3240
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3241 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3242 {
3243 /* Handle auto-CMD12 error */
3244 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3245 struct mmc_request *mrq = host->data_cmd->mrq;
3246 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3247 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3248 SDHCI_INT_DATA_TIMEOUT :
3249 SDHCI_INT_DATA_CRC;
3250
3251 /* Treat auto-CMD12 error the same as data error */
3252 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3253 *intmask_p |= data_err_bit;
3254 return;
3255 }
3256 }
3257
3258 if (!host->cmd) {
3259 /*
3260 * SDHCI recovers from errors by resetting the cmd and data
3261 * circuits. Until that is done, there very well might be more
3262 * interrupts, so ignore them in that case.
3263 */
3264 if (host->pending_reset)
3265 return;
3266 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3267 mmc_hostname(host->mmc), (unsigned)intmask);
3268 sdhci_dumpregs(host);
3269 return;
3270 }
3271
3272 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3273 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3274 if (intmask & SDHCI_INT_TIMEOUT)
3275 host->cmd->error = -ETIMEDOUT;
3276 else
3277 host->cmd->error = -EILSEQ;
3278
3279 /* Treat data command CRC error the same as data CRC error */
3280 if (host->cmd->data &&
3281 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3282 SDHCI_INT_CRC) {
3283 host->cmd = NULL;
3284 *intmask_p |= SDHCI_INT_DATA_CRC;
3285 return;
3286 }
3287
3288 __sdhci_finish_mrq(host, host->cmd->mrq);
3289 return;
3290 }
3291
3292 /* Handle auto-CMD23 error */
3293 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3294 struct mmc_request *mrq = host->cmd->mrq;
3295 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3296 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3297 -ETIMEDOUT :
3298 -EILSEQ;
3299
3300 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3301 mrq->sbc->error = err;
3302 __sdhci_finish_mrq(host, mrq);
3303 return;
3304 }
3305 }
3306
3307 if (intmask & SDHCI_INT_RESPONSE)
3308 sdhci_finish_command(host);
3309 }
3310
sdhci_adma_show_error(struct sdhci_host * host)3311 static void sdhci_adma_show_error(struct sdhci_host *host)
3312 {
3313 void *desc = host->adma_table;
3314 dma_addr_t dma = host->adma_addr;
3315
3316 sdhci_dumpregs(host);
3317
3318 while (true) {
3319 struct sdhci_adma2_64_desc *dma_desc = desc;
3320
3321 if (host->flags & SDHCI_USE_64_BIT_DMA)
3322 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3323 (unsigned long long)dma,
3324 le32_to_cpu(dma_desc->addr_hi),
3325 le32_to_cpu(dma_desc->addr_lo),
3326 le16_to_cpu(dma_desc->len),
3327 le16_to_cpu(dma_desc->cmd));
3328 else
3329 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3330 (unsigned long long)dma,
3331 le32_to_cpu(dma_desc->addr_lo),
3332 le16_to_cpu(dma_desc->len),
3333 le16_to_cpu(dma_desc->cmd));
3334
3335 desc += host->desc_sz;
3336 dma += host->desc_sz;
3337
3338 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3339 break;
3340 }
3341 }
3342
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3343 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3344 {
3345 u32 command;
3346
3347 /* CMD19 generates _only_ Buffer Read Ready interrupt */
3348 if (intmask & SDHCI_INT_DATA_AVAIL) {
3349 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3350 if (command == MMC_SEND_TUNING_BLOCK ||
3351 command == MMC_SEND_TUNING_BLOCK_HS200) {
3352 host->tuning_done = 1;
3353 wake_up(&host->buf_ready_int);
3354 return;
3355 }
3356 }
3357
3358 if (!host->data) {
3359 struct mmc_command *data_cmd = host->data_cmd;
3360
3361 /*
3362 * The "data complete" interrupt is also used to
3363 * indicate that a busy state has ended. See comment
3364 * above in sdhci_cmd_irq().
3365 */
3366 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3367 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3368 host->data_cmd = NULL;
3369 data_cmd->error = -ETIMEDOUT;
3370 __sdhci_finish_mrq(host, data_cmd->mrq);
3371 return;
3372 }
3373 if (intmask & SDHCI_INT_DATA_END) {
3374 host->data_cmd = NULL;
3375 /*
3376 * Some cards handle busy-end interrupt
3377 * before the command completed, so make
3378 * sure we do things in the proper order.
3379 */
3380 if (host->cmd == data_cmd)
3381 return;
3382
3383 __sdhci_finish_mrq(host, data_cmd->mrq);
3384 return;
3385 }
3386 }
3387
3388 /*
3389 * SDHCI recovers from errors by resetting the cmd and data
3390 * circuits. Until that is done, there very well might be more
3391 * interrupts, so ignore them in that case.
3392 */
3393 if (host->pending_reset)
3394 return;
3395
3396 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3397 mmc_hostname(host->mmc), (unsigned)intmask);
3398 sdhci_dumpregs(host);
3399
3400 return;
3401 }
3402
3403 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3404 host->data->error = -ETIMEDOUT;
3405 else if (intmask & SDHCI_INT_DATA_END_BIT)
3406 host->data->error = -EILSEQ;
3407 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3408 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3409 != MMC_BUS_TEST_R)
3410 host->data->error = -EILSEQ;
3411 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3412 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3413 intmask);
3414 sdhci_adma_show_error(host);
3415 host->data->error = -EIO;
3416 if (host->ops->adma_workaround)
3417 host->ops->adma_workaround(host, intmask);
3418 }
3419
3420 if (host->data->error)
3421 sdhci_finish_data(host);
3422 else {
3423 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3424 sdhci_transfer_pio(host);
3425
3426 /*
3427 * We currently don't do anything fancy with DMA
3428 * boundaries, but as we can't disable the feature
3429 * we need to at least restart the transfer.
3430 *
3431 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3432 * should return a valid address to continue from, but as
3433 * some controllers are faulty, don't trust them.
3434 */
3435 if (intmask & SDHCI_INT_DMA_END) {
3436 dma_addr_t dmastart, dmanow;
3437
3438 dmastart = sdhci_sdma_address(host);
3439 dmanow = dmastart + host->data->bytes_xfered;
3440 /*
3441 * Force update to the next DMA block boundary.
3442 */
3443 dmanow = (dmanow &
3444 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3445 SDHCI_DEFAULT_BOUNDARY_SIZE;
3446 host->data->bytes_xfered = dmanow - dmastart;
3447 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3448 &dmastart, host->data->bytes_xfered, &dmanow);
3449 sdhci_set_sdma_addr(host, dmanow);
3450 }
3451
3452 if (intmask & SDHCI_INT_DATA_END) {
3453 if (host->cmd == host->data_cmd) {
3454 /*
3455 * Data managed to finish before the
3456 * command completed. Make sure we do
3457 * things in the proper order.
3458 */
3459 host->data_early = 1;
3460 } else {
3461 sdhci_finish_data(host);
3462 }
3463 }
3464 }
3465 }
3466
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3467 static inline bool sdhci_defer_done(struct sdhci_host *host,
3468 struct mmc_request *mrq)
3469 {
3470 struct mmc_data *data = mrq->data;
3471
3472 return host->pending_reset || host->always_defer_done ||
3473 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3474 data->host_cookie == COOKIE_MAPPED);
3475 }
3476
sdhci_irq(int irq,void * dev_id)3477 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3478 {
3479 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3480 irqreturn_t result = IRQ_NONE;
3481 struct sdhci_host *host = dev_id;
3482 u32 intmask, mask, unexpected = 0;
3483 int max_loops = 16;
3484 int i;
3485
3486 spin_lock(&host->lock);
3487
3488 if (host->runtime_suspended) {
3489 spin_unlock(&host->lock);
3490 return IRQ_NONE;
3491 }
3492
3493 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3494 if (!intmask || intmask == 0xffffffff) {
3495 result = IRQ_NONE;
3496 goto out;
3497 }
3498
3499 do {
3500 DBG("IRQ status 0x%08x\n", intmask);
3501
3502 if (host->ops->irq) {
3503 intmask = host->ops->irq(host, intmask);
3504 if (!intmask)
3505 goto cont;
3506 }
3507
3508 /* Clear selected interrupts. */
3509 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3510 SDHCI_INT_BUS_POWER);
3511 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3512
3513 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3514 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3515 SDHCI_CARD_PRESENT;
3516
3517 /*
3518 * There is a observation on i.mx esdhc. INSERT
3519 * bit will be immediately set again when it gets
3520 * cleared, if a card is inserted. We have to mask
3521 * the irq to prevent interrupt storm which will
3522 * freeze the system. And the REMOVE gets the
3523 * same situation.
3524 *
3525 * More testing are needed here to ensure it works
3526 * for other platforms though.
3527 */
3528 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3529 SDHCI_INT_CARD_REMOVE);
3530 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3531 SDHCI_INT_CARD_INSERT;
3532 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3533 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3534
3535 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3536 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3537
3538 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3539 SDHCI_INT_CARD_REMOVE);
3540 result = IRQ_WAKE_THREAD;
3541 }
3542
3543 if (intmask & SDHCI_INT_CMD_MASK)
3544 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3545
3546 if (intmask & SDHCI_INT_DATA_MASK)
3547 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3548
3549 if (intmask & SDHCI_INT_BUS_POWER)
3550 pr_err("%s: Card is consuming too much power!\n",
3551 mmc_hostname(host->mmc));
3552
3553 if (intmask & SDHCI_INT_RETUNE)
3554 mmc_retune_needed(host->mmc);
3555
3556 if ((intmask & SDHCI_INT_CARD_INT) &&
3557 (host->ier & SDHCI_INT_CARD_INT)) {
3558 sdhci_enable_sdio_irq_nolock(host, false);
3559 sdio_signal_irq(host->mmc);
3560 }
3561
3562 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3563 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3564 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3565 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3566
3567 if (intmask) {
3568 unexpected |= intmask;
3569 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3570 }
3571 cont:
3572 if (result == IRQ_NONE)
3573 result = IRQ_HANDLED;
3574
3575 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3576 } while (intmask && --max_loops);
3577
3578 /* Determine if mrqs can be completed immediately */
3579 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3580 struct mmc_request *mrq = host->mrqs_done[i];
3581
3582 if (!mrq)
3583 continue;
3584
3585 if (sdhci_defer_done(host, mrq)) {
3586 result = IRQ_WAKE_THREAD;
3587 } else {
3588 mrqs_done[i] = mrq;
3589 host->mrqs_done[i] = NULL;
3590 }
3591 }
3592 out:
3593 if (host->deferred_cmd)
3594 result = IRQ_WAKE_THREAD;
3595
3596 spin_unlock(&host->lock);
3597
3598 /* Process mrqs ready for immediate completion */
3599 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3600 if (!mrqs_done[i])
3601 continue;
3602
3603 if (host->ops->request_done)
3604 host->ops->request_done(host, mrqs_done[i]);
3605 else
3606 mmc_request_done(host->mmc, mrqs_done[i]);
3607 }
3608
3609 if (unexpected) {
3610 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3611 mmc_hostname(host->mmc), unexpected);
3612 sdhci_dumpregs(host);
3613 }
3614
3615 return result;
3616 }
3617
sdhci_thread_irq(int irq,void * dev_id)3618 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3619 {
3620 struct sdhci_host *host = dev_id;
3621 struct mmc_command *cmd;
3622 unsigned long flags;
3623 u32 isr;
3624
3625 while (!sdhci_request_done(host))
3626 ;
3627
3628 spin_lock_irqsave(&host->lock, flags);
3629
3630 isr = host->thread_isr;
3631 host->thread_isr = 0;
3632
3633 cmd = host->deferred_cmd;
3634 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3635 sdhci_finish_mrq(host, cmd->mrq);
3636
3637 spin_unlock_irqrestore(&host->lock, flags);
3638
3639 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3640 struct mmc_host *mmc = host->mmc;
3641
3642 mmc->ops->card_event(mmc);
3643 mmc_detect_change(mmc, msecs_to_jiffies(200));
3644 }
3645
3646 return IRQ_HANDLED;
3647 }
3648
3649 /*****************************************************************************\
3650 * *
3651 * Suspend/resume *
3652 * *
3653 \*****************************************************************************/
3654
3655 #ifdef CONFIG_PM
3656
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3657 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3658 {
3659 return mmc_card_is_removable(host->mmc) &&
3660 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3661 !mmc_can_gpio_cd(host->mmc);
3662 }
3663
3664 /*
3665 * To enable wakeup events, the corresponding events have to be enabled in
3666 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3667 * Table' in the SD Host Controller Standard Specification.
3668 * It is useless to restore SDHCI_INT_ENABLE state in
3669 * sdhci_disable_irq_wakeups() since it will be set by
3670 * sdhci_enable_card_detection() or sdhci_init().
3671 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3672 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3673 {
3674 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3675 SDHCI_WAKE_ON_INT;
3676 u32 irq_val = 0;
3677 u8 wake_val = 0;
3678 u8 val;
3679
3680 if (sdhci_cd_irq_can_wakeup(host)) {
3681 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3682 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3683 }
3684
3685 if (mmc_card_wake_sdio_irq(host->mmc)) {
3686 wake_val |= SDHCI_WAKE_ON_INT;
3687 irq_val |= SDHCI_INT_CARD_INT;
3688 }
3689
3690 if (!irq_val)
3691 return false;
3692
3693 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3694 val &= ~mask;
3695 val |= wake_val;
3696 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3697
3698 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3699
3700 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3701
3702 return host->irq_wake_enabled;
3703 }
3704
sdhci_disable_irq_wakeups(struct sdhci_host * host)3705 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3706 {
3707 u8 val;
3708 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3709 | SDHCI_WAKE_ON_INT;
3710
3711 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3712 val &= ~mask;
3713 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3714
3715 disable_irq_wake(host->irq);
3716
3717 host->irq_wake_enabled = false;
3718 }
3719
sdhci_suspend_host(struct sdhci_host * host)3720 int sdhci_suspend_host(struct sdhci_host *host)
3721 {
3722 sdhci_disable_card_detection(host);
3723
3724 mmc_retune_timer_stop(host->mmc);
3725
3726 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3727 !sdhci_enable_irq_wakeups(host)) {
3728 host->ier = 0;
3729 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3730 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3731 free_irq(host->irq, host);
3732 }
3733
3734 return 0;
3735 }
3736
3737 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3738
sdhci_resume_host(struct sdhci_host * host)3739 int sdhci_resume_host(struct sdhci_host *host)
3740 {
3741 struct mmc_host *mmc = host->mmc;
3742 int ret = 0;
3743
3744 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3745 if (host->ops->enable_dma)
3746 host->ops->enable_dma(host);
3747 }
3748
3749 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3750 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3751 /* Card keeps power but host controller does not */
3752 sdhci_init(host, 0);
3753 host->pwr = 0;
3754 host->clock = 0;
3755 host->reinit_uhs = true;
3756 mmc->ops->set_ios(mmc, &mmc->ios);
3757 } else {
3758 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3759 }
3760
3761 if (host->irq_wake_enabled) {
3762 sdhci_disable_irq_wakeups(host);
3763 } else {
3764 ret = request_threaded_irq(host->irq, sdhci_irq,
3765 sdhci_thread_irq, IRQF_SHARED,
3766 mmc_hostname(host->mmc), host);
3767 if (ret)
3768 return ret;
3769 }
3770
3771 sdhci_enable_card_detection(host);
3772
3773 return ret;
3774 }
3775
3776 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3777
sdhci_runtime_suspend_host(struct sdhci_host * host)3778 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3779 {
3780 unsigned long flags;
3781
3782 mmc_retune_timer_stop(host->mmc);
3783
3784 spin_lock_irqsave(&host->lock, flags);
3785 host->ier &= SDHCI_INT_CARD_INT;
3786 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3787 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3788 spin_unlock_irqrestore(&host->lock, flags);
3789
3790 synchronize_hardirq(host->irq);
3791
3792 spin_lock_irqsave(&host->lock, flags);
3793 host->runtime_suspended = true;
3794 spin_unlock_irqrestore(&host->lock, flags);
3795
3796 return 0;
3797 }
3798 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3799
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3800 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3801 {
3802 struct mmc_host *mmc = host->mmc;
3803 unsigned long flags;
3804 int host_flags = host->flags;
3805
3806 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3807 if (host->ops->enable_dma)
3808 host->ops->enable_dma(host);
3809 }
3810
3811 sdhci_init(host, soft_reset);
3812
3813 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3814 mmc->ios.power_mode != MMC_POWER_OFF) {
3815 /* Force clock and power re-program */
3816 host->pwr = 0;
3817 host->clock = 0;
3818 host->reinit_uhs = true;
3819 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3820 mmc->ops->set_ios(mmc, &mmc->ios);
3821
3822 if ((host_flags & SDHCI_PV_ENABLED) &&
3823 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3824 spin_lock_irqsave(&host->lock, flags);
3825 sdhci_enable_preset_value(host, true);
3826 spin_unlock_irqrestore(&host->lock, flags);
3827 }
3828
3829 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3830 mmc->ops->hs400_enhanced_strobe)
3831 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3832 }
3833
3834 spin_lock_irqsave(&host->lock, flags);
3835
3836 host->runtime_suspended = false;
3837
3838 /* Enable SDIO IRQ */
3839 if (sdio_irq_claimed(mmc))
3840 sdhci_enable_sdio_irq_nolock(host, true);
3841
3842 /* Enable Card Detection */
3843 sdhci_enable_card_detection(host);
3844
3845 spin_unlock_irqrestore(&host->lock, flags);
3846
3847 return 0;
3848 }
3849 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3850
3851 #endif /* CONFIG_PM */
3852
3853 /*****************************************************************************\
3854 * *
3855 * Command Queue Engine (CQE) helpers *
3856 * *
3857 \*****************************************************************************/
3858
sdhci_cqe_enable(struct mmc_host * mmc)3859 void sdhci_cqe_enable(struct mmc_host *mmc)
3860 {
3861 struct sdhci_host *host = mmc_priv(mmc);
3862 unsigned long flags;
3863 u8 ctrl;
3864
3865 spin_lock_irqsave(&host->lock, flags);
3866
3867 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3868 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3869 /*
3870 * Host from V4.10 supports ADMA3 DMA type.
3871 * ADMA3 performs integrated descriptor which is more suitable
3872 * for cmd queuing to fetch both command and transfer descriptors.
3873 */
3874 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3875 ctrl |= SDHCI_CTRL_ADMA3;
3876 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3877 ctrl |= SDHCI_CTRL_ADMA64;
3878 else
3879 ctrl |= SDHCI_CTRL_ADMA32;
3880 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3881
3882 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3883 SDHCI_BLOCK_SIZE);
3884
3885 /* Set maximum timeout */
3886 sdhci_set_timeout(host, NULL);
3887
3888 host->ier = host->cqe_ier;
3889
3890 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3891 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3892
3893 host->cqe_on = true;
3894
3895 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3896 mmc_hostname(mmc), host->ier,
3897 sdhci_readl(host, SDHCI_INT_STATUS));
3898
3899 spin_unlock_irqrestore(&host->lock, flags);
3900 }
3901 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3902
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3903 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3904 {
3905 struct sdhci_host *host = mmc_priv(mmc);
3906 unsigned long flags;
3907
3908 spin_lock_irqsave(&host->lock, flags);
3909
3910 sdhci_set_default_irqs(host);
3911
3912 host->cqe_on = false;
3913
3914 if (recovery) {
3915 sdhci_do_reset(host, SDHCI_RESET_CMD);
3916 sdhci_do_reset(host, SDHCI_RESET_DATA);
3917 }
3918
3919 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3920 mmc_hostname(mmc), host->ier,
3921 sdhci_readl(host, SDHCI_INT_STATUS));
3922
3923 spin_unlock_irqrestore(&host->lock, flags);
3924 }
3925 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3926
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3927 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3928 int *data_error)
3929 {
3930 u32 mask;
3931
3932 if (!host->cqe_on)
3933 return false;
3934
3935 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3936 *cmd_error = -EILSEQ;
3937 else if (intmask & SDHCI_INT_TIMEOUT)
3938 *cmd_error = -ETIMEDOUT;
3939 else
3940 *cmd_error = 0;
3941
3942 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3943 *data_error = -EILSEQ;
3944 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3945 *data_error = -ETIMEDOUT;
3946 else if (intmask & SDHCI_INT_ADMA_ERROR)
3947 *data_error = -EIO;
3948 else
3949 *data_error = 0;
3950
3951 /* Clear selected interrupts. */
3952 mask = intmask & host->cqe_ier;
3953 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3954
3955 if (intmask & SDHCI_INT_BUS_POWER)
3956 pr_err("%s: Card is consuming too much power!\n",
3957 mmc_hostname(host->mmc));
3958
3959 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3960 if (intmask) {
3961 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3962 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3963 mmc_hostname(host->mmc), intmask);
3964 sdhci_dumpregs(host);
3965 }
3966
3967 return true;
3968 }
3969 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3970
3971 /*****************************************************************************\
3972 * *
3973 * Device allocation/registration *
3974 * *
3975 \*****************************************************************************/
3976
sdhci_alloc_host(struct device * dev,size_t priv_size)3977 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3978 size_t priv_size)
3979 {
3980 struct mmc_host *mmc;
3981 struct sdhci_host *host;
3982
3983 WARN_ON(dev == NULL);
3984
3985 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3986 if (!mmc)
3987 return ERR_PTR(-ENOMEM);
3988
3989 host = mmc_priv(mmc);
3990 host->mmc = mmc;
3991 host->mmc_host_ops = sdhci_ops;
3992 mmc->ops = &host->mmc_host_ops;
3993
3994 host->flags = SDHCI_SIGNALING_330;
3995
3996 host->cqe_ier = SDHCI_CQE_INT_MASK;
3997 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3998
3999 host->tuning_delay = -1;
4000 host->tuning_loop_count = MAX_TUNING_LOOP;
4001
4002 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4003
4004 /*
4005 * The DMA table descriptor count is calculated as the maximum
4006 * number of segments times 2, to allow for an alignment
4007 * descriptor for each segment, plus 1 for a nop end descriptor.
4008 */
4009 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4010 host->max_adma = 65536;
4011
4012 return host;
4013 }
4014
4015 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4016
sdhci_set_dma_mask(struct sdhci_host * host)4017 static int sdhci_set_dma_mask(struct sdhci_host *host)
4018 {
4019 struct mmc_host *mmc = host->mmc;
4020 struct device *dev = mmc_dev(mmc);
4021 int ret = -EINVAL;
4022
4023 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4024 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4025
4026 /* Try 64-bit mask if hardware is capable of it */
4027 if (host->flags & SDHCI_USE_64_BIT_DMA) {
4028 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4029 if (ret) {
4030 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4031 mmc_hostname(mmc));
4032 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4033 }
4034 }
4035
4036 /* 32-bit mask as default & fallback */
4037 if (ret) {
4038 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4039 if (ret)
4040 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4041 mmc_hostname(mmc));
4042 }
4043
4044 return ret;
4045 }
4046
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)4047 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4048 const u32 *caps, const u32 *caps1)
4049 {
4050 u16 v;
4051 u64 dt_caps_mask = 0;
4052 u64 dt_caps = 0;
4053
4054 if (host->read_caps)
4055 return;
4056
4057 host->read_caps = true;
4058
4059 if (debug_quirks)
4060 host->quirks = debug_quirks;
4061
4062 if (debug_quirks2)
4063 host->quirks2 = debug_quirks2;
4064
4065 sdhci_do_reset(host, SDHCI_RESET_ALL);
4066
4067 if (host->v4_mode)
4068 sdhci_do_enable_v4_mode(host);
4069
4070 device_property_read_u64_array(mmc_dev(host->mmc),
4071 "sdhci-caps-mask", &dt_caps_mask, 1);
4072 device_property_read_u64_array(mmc_dev(host->mmc),
4073 "sdhci-caps", &dt_caps, 1);
4074
4075 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4076 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4077
4078 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4079 return;
4080
4081 if (caps) {
4082 host->caps = *caps;
4083 } else {
4084 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4085 host->caps &= ~lower_32_bits(dt_caps_mask);
4086 host->caps |= lower_32_bits(dt_caps);
4087 }
4088
4089 if (host->version < SDHCI_SPEC_300)
4090 return;
4091
4092 if (caps1) {
4093 host->caps1 = *caps1;
4094 } else {
4095 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4096 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4097 host->caps1 |= upper_32_bits(dt_caps);
4098 }
4099 }
4100 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4101
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4102 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4103 {
4104 struct mmc_host *mmc = host->mmc;
4105 unsigned int max_blocks;
4106 unsigned int bounce_size;
4107 int ret;
4108
4109 /*
4110 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4111 * has diminishing returns, this is probably because SD/MMC
4112 * cards are usually optimized to handle this size of requests.
4113 */
4114 bounce_size = SZ_64K;
4115 /*
4116 * Adjust downwards to maximum request size if this is less
4117 * than our segment size, else hammer down the maximum
4118 * request size to the maximum buffer size.
4119 */
4120 if (mmc->max_req_size < bounce_size)
4121 bounce_size = mmc->max_req_size;
4122 max_blocks = bounce_size / 512;
4123
4124 /*
4125 * When we just support one segment, we can get significant
4126 * speedups by the help of a bounce buffer to group scattered
4127 * reads/writes together.
4128 */
4129 host->bounce_buffer = devm_kmalloc(mmc->parent,
4130 bounce_size,
4131 GFP_KERNEL);
4132 if (!host->bounce_buffer) {
4133 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4134 mmc_hostname(mmc),
4135 bounce_size);
4136 /*
4137 * Exiting with zero here makes sure we proceed with
4138 * mmc->max_segs == 1.
4139 */
4140 return;
4141 }
4142
4143 host->bounce_addr = dma_map_single(mmc->parent,
4144 host->bounce_buffer,
4145 bounce_size,
4146 DMA_BIDIRECTIONAL);
4147 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
4148 if (ret)
4149 /* Again fall back to max_segs == 1 */
4150 return;
4151 host->bounce_buffer_size = bounce_size;
4152
4153 /* Lie about this since we're bouncing */
4154 mmc->max_segs = max_blocks;
4155 mmc->max_seg_size = bounce_size;
4156 mmc->max_req_size = bounce_size;
4157
4158 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4159 mmc_hostname(mmc), max_blocks, bounce_size);
4160 }
4161
sdhci_can_64bit_dma(struct sdhci_host * host)4162 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4163 {
4164 /*
4165 * According to SD Host Controller spec v4.10, bit[27] added from
4166 * version 4.10 in Capabilities Register is used as 64-bit System
4167 * Address support for V4 mode.
4168 */
4169 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4170 return host->caps & SDHCI_CAN_64BIT_V4;
4171
4172 return host->caps & SDHCI_CAN_64BIT;
4173 }
4174
sdhci_setup_host(struct sdhci_host * host)4175 int sdhci_setup_host(struct sdhci_host *host)
4176 {
4177 struct mmc_host *mmc;
4178 u32 max_current_caps;
4179 unsigned int ocr_avail;
4180 unsigned int override_timeout_clk;
4181 u32 max_clk;
4182 int ret = 0;
4183 bool enable_vqmmc = false;
4184
4185 WARN_ON(host == NULL);
4186 if (host == NULL)
4187 return -EINVAL;
4188
4189 mmc = host->mmc;
4190
4191 /*
4192 * If there are external regulators, get them. Note this must be done
4193 * early before resetting the host and reading the capabilities so that
4194 * the host can take the appropriate action if regulators are not
4195 * available.
4196 */
4197 if (!mmc->supply.vqmmc) {
4198 ret = mmc_regulator_get_supply(mmc);
4199 if (ret)
4200 return ret;
4201 enable_vqmmc = true;
4202 }
4203
4204 DBG("Version: 0x%08x | Present: 0x%08x\n",
4205 sdhci_readw(host, SDHCI_HOST_VERSION),
4206 sdhci_readl(host, SDHCI_PRESENT_STATE));
4207 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4208 sdhci_readl(host, SDHCI_CAPABILITIES),
4209 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4210
4211 sdhci_read_caps(host);
4212
4213 override_timeout_clk = host->timeout_clk;
4214
4215 if (host->version > SDHCI_SPEC_420) {
4216 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4217 mmc_hostname(mmc), host->version);
4218 }
4219
4220 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4221 host->flags |= SDHCI_USE_SDMA;
4222 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4223 DBG("Controller doesn't have SDMA capability\n");
4224 else
4225 host->flags |= SDHCI_USE_SDMA;
4226
4227 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4228 (host->flags & SDHCI_USE_SDMA)) {
4229 DBG("Disabling DMA as it is marked broken\n");
4230 host->flags &= ~SDHCI_USE_SDMA;
4231 }
4232
4233 if ((host->version >= SDHCI_SPEC_200) &&
4234 (host->caps & SDHCI_CAN_DO_ADMA2))
4235 host->flags |= SDHCI_USE_ADMA;
4236
4237 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4238 (host->flags & SDHCI_USE_ADMA)) {
4239 DBG("Disabling ADMA as it is marked broken\n");
4240 host->flags &= ~SDHCI_USE_ADMA;
4241 }
4242
4243 if (sdhci_can_64bit_dma(host))
4244 host->flags |= SDHCI_USE_64_BIT_DMA;
4245
4246 if (host->use_external_dma) {
4247 ret = sdhci_external_dma_init(host);
4248 if (ret == -EPROBE_DEFER)
4249 goto unreg;
4250 /*
4251 * Fall back to use the DMA/PIO integrated in standard SDHCI
4252 * instead of external DMA devices.
4253 */
4254 else if (ret)
4255 sdhci_switch_external_dma(host, false);
4256 /* Disable internal DMA sources */
4257 else
4258 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4259 }
4260
4261 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4262 if (host->ops->set_dma_mask)
4263 ret = host->ops->set_dma_mask(host);
4264 else
4265 ret = sdhci_set_dma_mask(host);
4266
4267 if (!ret && host->ops->enable_dma)
4268 ret = host->ops->enable_dma(host);
4269
4270 if (ret) {
4271 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4272 mmc_hostname(mmc));
4273 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4274
4275 ret = 0;
4276 }
4277 }
4278
4279 /* SDMA does not support 64-bit DMA if v4 mode not set */
4280 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4281 host->flags &= ~SDHCI_USE_SDMA;
4282
4283 if (host->flags & SDHCI_USE_ADMA) {
4284 dma_addr_t dma;
4285 void *buf;
4286
4287 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4288 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4289 else if (!host->alloc_desc_sz)
4290 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4291
4292 host->desc_sz = host->alloc_desc_sz;
4293 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4294
4295 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4296 /*
4297 * Use zalloc to zero the reserved high 32-bits of 128-bit
4298 * descriptors so that they never need to be written.
4299 */
4300 buf = dma_alloc_coherent(mmc_dev(mmc),
4301 host->align_buffer_sz + host->adma_table_sz,
4302 &dma, GFP_KERNEL);
4303 if (!buf) {
4304 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4305 mmc_hostname(mmc));
4306 host->flags &= ~SDHCI_USE_ADMA;
4307 } else if ((dma + host->align_buffer_sz) &
4308 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4309 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4310 mmc_hostname(mmc));
4311 host->flags &= ~SDHCI_USE_ADMA;
4312 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4313 host->adma_table_sz, buf, dma);
4314 } else {
4315 host->align_buffer = buf;
4316 host->align_addr = dma;
4317
4318 host->adma_table = buf + host->align_buffer_sz;
4319 host->adma_addr = dma + host->align_buffer_sz;
4320 }
4321 }
4322
4323 /*
4324 * If we use DMA, then it's up to the caller to set the DMA
4325 * mask, but PIO does not need the hw shim so we set a new
4326 * mask here in that case.
4327 */
4328 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4329 host->dma_mask = DMA_BIT_MASK(64);
4330 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4331 }
4332
4333 if (host->version >= SDHCI_SPEC_300)
4334 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4335 else
4336 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4337
4338 host->max_clk *= 1000000;
4339 if (host->max_clk == 0 || host->quirks &
4340 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4341 if (!host->ops->get_max_clock) {
4342 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4343 mmc_hostname(mmc));
4344 ret = -ENODEV;
4345 goto undma;
4346 }
4347 host->max_clk = host->ops->get_max_clock(host);
4348 }
4349
4350 /*
4351 * In case of Host Controller v3.00, find out whether clock
4352 * multiplier is supported.
4353 */
4354 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4355
4356 /*
4357 * In case the value in Clock Multiplier is 0, then programmable
4358 * clock mode is not supported, otherwise the actual clock
4359 * multiplier is one more than the value of Clock Multiplier
4360 * in the Capabilities Register.
4361 */
4362 if (host->clk_mul)
4363 host->clk_mul += 1;
4364
4365 /*
4366 * Set host parameters.
4367 */
4368 max_clk = host->max_clk;
4369
4370 if (host->ops->get_min_clock)
4371 mmc->f_min = host->ops->get_min_clock(host);
4372 else if (host->version >= SDHCI_SPEC_300) {
4373 if (host->clk_mul)
4374 max_clk = host->max_clk * host->clk_mul;
4375 /*
4376 * Divided Clock Mode minimum clock rate is always less than
4377 * Programmable Clock Mode minimum clock rate.
4378 */
4379 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4380 } else
4381 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4382
4383 if (!mmc->f_max || mmc->f_max > max_clk)
4384 mmc->f_max = max_clk;
4385
4386 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4387 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4388
4389 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4390 host->timeout_clk *= 1000;
4391
4392 if (host->timeout_clk == 0) {
4393 if (!host->ops->get_timeout_clock) {
4394 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4395 mmc_hostname(mmc));
4396 ret = -ENODEV;
4397 goto undma;
4398 }
4399
4400 host->timeout_clk =
4401 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4402 1000);
4403 }
4404
4405 if (override_timeout_clk)
4406 host->timeout_clk = override_timeout_clk;
4407
4408 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4409 host->ops->get_max_timeout_count(host) : 1 << 27;
4410 mmc->max_busy_timeout /= host->timeout_clk;
4411 }
4412
4413 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4414 !host->ops->get_max_timeout_count)
4415 mmc->max_busy_timeout = 0;
4416
4417 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4418 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4419
4420 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4421 host->flags |= SDHCI_AUTO_CMD12;
4422
4423 /*
4424 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4425 * For v4 mode, SDMA may use Auto-CMD23 as well.
4426 */
4427 if ((host->version >= SDHCI_SPEC_300) &&
4428 ((host->flags & SDHCI_USE_ADMA) ||
4429 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4430 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4431 host->flags |= SDHCI_AUTO_CMD23;
4432 DBG("Auto-CMD23 available\n");
4433 } else {
4434 DBG("Auto-CMD23 unavailable\n");
4435 }
4436
4437 /*
4438 * A controller may support 8-bit width, but the board itself
4439 * might not have the pins brought out. Boards that support
4440 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4441 * their platform code before calling sdhci_add_host(), and we
4442 * won't assume 8-bit width for hosts without that CAP.
4443 */
4444 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4445 mmc->caps |= MMC_CAP_4_BIT_DATA;
4446
4447 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4448 mmc->caps &= ~MMC_CAP_CMD23;
4449
4450 if (host->caps & SDHCI_CAN_DO_HISPD)
4451 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4452
4453 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4454 mmc_card_is_removable(mmc) &&
4455 mmc_gpio_get_cd(host->mmc) < 0)
4456 mmc->caps |= MMC_CAP_NEEDS_POLL;
4457
4458 if (!IS_ERR(mmc->supply.vqmmc)) {
4459 if (enable_vqmmc) {
4460 ret = regulator_enable(mmc->supply.vqmmc);
4461 host->sdhci_core_to_disable_vqmmc = !ret;
4462 }
4463
4464 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4465 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4466 1950000))
4467 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4468 SDHCI_SUPPORT_SDR50 |
4469 SDHCI_SUPPORT_DDR50);
4470
4471 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4472 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4473 3600000))
4474 host->flags &= ~SDHCI_SIGNALING_330;
4475
4476 if (ret) {
4477 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4478 mmc_hostname(mmc), ret);
4479 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4480 }
4481
4482 }
4483
4484 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4485 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4486 SDHCI_SUPPORT_DDR50);
4487 /*
4488 * The SDHCI controller in a SoC might support HS200/HS400
4489 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4490 * but if the board is modeled such that the IO lines are not
4491 * connected to 1.8v then HS200/HS400 cannot be supported.
4492 * Disable HS200/HS400 if the board does not have 1.8v connected
4493 * to the IO lines. (Applicable for other modes in 1.8v)
4494 */
4495 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4496 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4497 }
4498
4499 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4500 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4501 SDHCI_SUPPORT_DDR50))
4502 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4503
4504 /* SDR104 supports also implies SDR50 support */
4505 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4506 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4507 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4508 * field can be promoted to support HS200.
4509 */
4510 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4511 mmc->caps2 |= MMC_CAP2_HS200;
4512 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4513 mmc->caps |= MMC_CAP_UHS_SDR50;
4514 }
4515
4516 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4517 (host->caps1 & SDHCI_SUPPORT_HS400))
4518 mmc->caps2 |= MMC_CAP2_HS400;
4519
4520 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4521 (IS_ERR(mmc->supply.vqmmc) ||
4522 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4523 1300000)))
4524 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4525
4526 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4527 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4528 mmc->caps |= MMC_CAP_UHS_DDR50;
4529
4530 /* Does the host need tuning for SDR50? */
4531 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4532 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4533
4534 /* Driver Type(s) (A, C, D) supported by the host */
4535 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4536 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4537 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4538 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4539 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4540 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4541
4542 /* Initial value for re-tuning timer count */
4543 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4544 host->caps1);
4545
4546 /*
4547 * In case Re-tuning Timer is not disabled, the actual value of
4548 * re-tuning timer will be 2 ^ (n - 1).
4549 */
4550 if (host->tuning_count)
4551 host->tuning_count = 1 << (host->tuning_count - 1);
4552
4553 /* Re-tuning mode supported by the Host Controller */
4554 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4555
4556 ocr_avail = 0;
4557
4558 /*
4559 * According to SD Host Controller spec v3.00, if the Host System
4560 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4561 * the value is meaningful only if Voltage Support in the Capabilities
4562 * register is set. The actual current value is 4 times the register
4563 * value.
4564 */
4565 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4566 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4567 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4568 if (curr > 0) {
4569
4570 /* convert to SDHCI_MAX_CURRENT format */
4571 curr = curr/1000; /* convert to mA */
4572 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4573
4574 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4575 max_current_caps =
4576 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4577 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4578 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4579 }
4580 }
4581
4582 if (host->caps & SDHCI_CAN_VDD_330) {
4583 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4584
4585 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4586 max_current_caps) *
4587 SDHCI_MAX_CURRENT_MULTIPLIER;
4588 }
4589 if (host->caps & SDHCI_CAN_VDD_300) {
4590 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4591
4592 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4593 max_current_caps) *
4594 SDHCI_MAX_CURRENT_MULTIPLIER;
4595 }
4596 if (host->caps & SDHCI_CAN_VDD_180) {
4597 ocr_avail |= MMC_VDD_165_195;
4598
4599 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4600 max_current_caps) *
4601 SDHCI_MAX_CURRENT_MULTIPLIER;
4602 }
4603
4604 /* If OCR set by host, use it instead. */
4605 if (host->ocr_mask)
4606 ocr_avail = host->ocr_mask;
4607
4608 /* If OCR set by external regulators, give it highest prio. */
4609 if (mmc->ocr_avail)
4610 ocr_avail = mmc->ocr_avail;
4611
4612 mmc->ocr_avail = ocr_avail;
4613 mmc->ocr_avail_sdio = ocr_avail;
4614 if (host->ocr_avail_sdio)
4615 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4616 mmc->ocr_avail_sd = ocr_avail;
4617 if (host->ocr_avail_sd)
4618 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4619 else /* normal SD controllers don't support 1.8V */
4620 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4621 mmc->ocr_avail_mmc = ocr_avail;
4622 if (host->ocr_avail_mmc)
4623 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4624
4625 if (mmc->ocr_avail == 0) {
4626 pr_err("%s: Hardware doesn't report any support voltages.\n",
4627 mmc_hostname(mmc));
4628 ret = -ENODEV;
4629 goto unreg;
4630 }
4631
4632 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4633 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4634 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4635 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4636 host->flags |= SDHCI_SIGNALING_180;
4637
4638 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4639 host->flags |= SDHCI_SIGNALING_120;
4640
4641 spin_lock_init(&host->lock);
4642
4643 /*
4644 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4645 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4646 * is less anyway.
4647 */
4648 mmc->max_req_size = 524288;
4649
4650 /*
4651 * Maximum number of segments. Depends on if the hardware
4652 * can do scatter/gather or not.
4653 */
4654 if (host->flags & SDHCI_USE_ADMA) {
4655 mmc->max_segs = SDHCI_MAX_SEGS;
4656 } else if (host->flags & SDHCI_USE_SDMA) {
4657 mmc->max_segs = 1;
4658 if (swiotlb_max_segment()) {
4659 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4660 IO_TLB_SEGSIZE;
4661 mmc->max_req_size = min(mmc->max_req_size,
4662 max_req_size);
4663 }
4664 } else { /* PIO */
4665 mmc->max_segs = SDHCI_MAX_SEGS;
4666 }
4667
4668 /*
4669 * Maximum segment size. Could be one segment with the maximum number
4670 * of bytes. When doing hardware scatter/gather, each entry cannot
4671 * be larger than 64 KiB though.
4672 */
4673 if (host->flags & SDHCI_USE_ADMA) {
4674 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4675 host->max_adma = 65532; /* 32-bit alignment */
4676 mmc->max_seg_size = 65535;
4677 } else {
4678 mmc->max_seg_size = 65536;
4679 }
4680 } else {
4681 mmc->max_seg_size = mmc->max_req_size;
4682 }
4683
4684 /*
4685 * Maximum block size. This varies from controller to controller and
4686 * is specified in the capabilities register.
4687 */
4688 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4689 mmc->max_blk_size = 2;
4690 } else {
4691 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4692 SDHCI_MAX_BLOCK_SHIFT;
4693 if (mmc->max_blk_size >= 3) {
4694 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4695 mmc_hostname(mmc));
4696 mmc->max_blk_size = 0;
4697 }
4698 }
4699
4700 mmc->max_blk_size = 512 << mmc->max_blk_size;
4701
4702 /*
4703 * Maximum block count.
4704 */
4705 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4706
4707 if (mmc->max_segs == 1)
4708 /* This may alter mmc->*_blk_* parameters */
4709 sdhci_allocate_bounce_buffer(host);
4710
4711 return 0;
4712
4713 unreg:
4714 if (host->sdhci_core_to_disable_vqmmc)
4715 regulator_disable(mmc->supply.vqmmc);
4716 undma:
4717 if (host->align_buffer)
4718 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4719 host->adma_table_sz, host->align_buffer,
4720 host->align_addr);
4721 host->adma_table = NULL;
4722 host->align_buffer = NULL;
4723
4724 return ret;
4725 }
4726 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4727
sdhci_cleanup_host(struct sdhci_host * host)4728 void sdhci_cleanup_host(struct sdhci_host *host)
4729 {
4730 struct mmc_host *mmc = host->mmc;
4731
4732 if (host->sdhci_core_to_disable_vqmmc)
4733 regulator_disable(mmc->supply.vqmmc);
4734
4735 if (host->align_buffer)
4736 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4737 host->adma_table_sz, host->align_buffer,
4738 host->align_addr);
4739
4740 if (host->use_external_dma)
4741 sdhci_external_dma_release(host);
4742
4743 host->adma_table = NULL;
4744 host->align_buffer = NULL;
4745 }
4746 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4747
__sdhci_add_host(struct sdhci_host * host)4748 int __sdhci_add_host(struct sdhci_host *host)
4749 {
4750 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4751 struct mmc_host *mmc = host->mmc;
4752 int ret;
4753
4754 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4755 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4756 mmc->caps2 &= ~MMC_CAP2_CQE;
4757 mmc->cqe_ops = NULL;
4758 }
4759
4760 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4761 if (!host->complete_wq)
4762 return -ENOMEM;
4763
4764 INIT_WORK(&host->complete_work, sdhci_complete_work);
4765
4766 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4767 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4768
4769 init_waitqueue_head(&host->buf_ready_int);
4770
4771 sdhci_init(host, 0);
4772
4773 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4774 IRQF_SHARED, mmc_hostname(mmc), host);
4775 if (ret) {
4776 pr_err("%s: Failed to request IRQ %d: %d\n",
4777 mmc_hostname(mmc), host->irq, ret);
4778 goto unwq;
4779 }
4780
4781 ret = sdhci_led_register(host);
4782 if (ret) {
4783 pr_err("%s: Failed to register LED device: %d\n",
4784 mmc_hostname(mmc), ret);
4785 goto unirq;
4786 }
4787
4788 ret = mmc_add_host(mmc);
4789 if (ret)
4790 goto unled;
4791
4792 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4793 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4794 host->use_external_dma ? "External DMA" :
4795 (host->flags & SDHCI_USE_ADMA) ?
4796 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4797 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4798
4799 sdhci_enable_card_detection(host);
4800
4801 return 0;
4802
4803 unled:
4804 sdhci_led_unregister(host);
4805 unirq:
4806 sdhci_do_reset(host, SDHCI_RESET_ALL);
4807 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4808 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4809 free_irq(host->irq, host);
4810 unwq:
4811 destroy_workqueue(host->complete_wq);
4812
4813 return ret;
4814 }
4815 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4816
sdhci_add_host(struct sdhci_host * host)4817 int sdhci_add_host(struct sdhci_host *host)
4818 {
4819 int ret;
4820
4821 ret = sdhci_setup_host(host);
4822 if (ret)
4823 return ret;
4824
4825 ret = __sdhci_add_host(host);
4826 if (ret)
4827 goto cleanup;
4828
4829 return 0;
4830
4831 cleanup:
4832 sdhci_cleanup_host(host);
4833
4834 return ret;
4835 }
4836 EXPORT_SYMBOL_GPL(sdhci_add_host);
4837
sdhci_remove_host(struct sdhci_host * host,int dead)4838 void sdhci_remove_host(struct sdhci_host *host, int dead)
4839 {
4840 struct mmc_host *mmc = host->mmc;
4841 unsigned long flags;
4842
4843 if (dead) {
4844 spin_lock_irqsave(&host->lock, flags);
4845
4846 host->flags |= SDHCI_DEVICE_DEAD;
4847
4848 if (sdhci_has_requests(host)) {
4849 pr_err("%s: Controller removed during "
4850 " transfer!\n", mmc_hostname(mmc));
4851 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4852 }
4853
4854 spin_unlock_irqrestore(&host->lock, flags);
4855 }
4856
4857 sdhci_disable_card_detection(host);
4858
4859 mmc_remove_host(mmc);
4860
4861 sdhci_led_unregister(host);
4862
4863 if (!dead)
4864 sdhci_do_reset(host, SDHCI_RESET_ALL);
4865
4866 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4867 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4868 free_irq(host->irq, host);
4869
4870 del_timer_sync(&host->timer);
4871 del_timer_sync(&host->data_timer);
4872
4873 destroy_workqueue(host->complete_wq);
4874
4875 if (host->sdhci_core_to_disable_vqmmc)
4876 regulator_disable(mmc->supply.vqmmc);
4877
4878 if (host->align_buffer)
4879 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4880 host->adma_table_sz, host->align_buffer,
4881 host->align_addr);
4882
4883 if (host->use_external_dma)
4884 sdhci_external_dma_release(host);
4885
4886 host->adma_table = NULL;
4887 host->align_buffer = NULL;
4888 }
4889
4890 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4891
sdhci_free_host(struct sdhci_host * host)4892 void sdhci_free_host(struct sdhci_host *host)
4893 {
4894 mmc_free_host(host->mmc);
4895 }
4896
4897 EXPORT_SYMBOL_GPL(sdhci_free_host);
4898
4899 /*****************************************************************************\
4900 * *
4901 * Driver init/exit *
4902 * *
4903 \*****************************************************************************/
4904
sdhci_drv_init(void)4905 static int __init sdhci_drv_init(void)
4906 {
4907 pr_info(DRIVER_NAME
4908 ": Secure Digital Host Controller Interface driver\n");
4909 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4910
4911 return 0;
4912 }
4913
sdhci_drv_exit(void)4914 static void __exit sdhci_drv_exit(void)
4915 {
4916 }
4917
4918 module_init(sdhci_drv_init);
4919 module_exit(sdhci_drv_exit);
4920
4921 module_param(debug_quirks, uint, 0444);
4922 module_param(debug_quirks2, uint, 0444);
4923
4924 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4925 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4926 MODULE_LICENSE("GPL");
4927
4928 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4929 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4930